2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #include <linux/smp_lock.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/string.h>
16 #include <linux/inetdevice.h>
17 #include <linux/inet.h>
18 #include <linux/interrupt.h>
19 #include <linux/netpoll.h>
20 #include <linux/sched.h>
21 #include <linux/rcupdate.h>
24 #include <asm/unaligned.h>
27 * We maintain a small pool of fully-sized skbs, to make sure the
28 * message gets out even in extreme OOM situations.
32 #define MAX_UDP_CHUNK 1460
34 static DEFINE_SPINLOCK(skb_list_lock);
36 static struct sk_buff *skbs;
38 static DEFINE_SPINLOCK(rx_list_lock);
39 static LIST_HEAD(rx_list);
41 static atomic_t trapped;
42 static DEFINE_SPINLOCK(netpoll_poll_lock);
44 #define NETPOLL_RX_ENABLED 1
45 #define NETPOLL_RX_DROP 2
47 #define MAX_SKB_SIZE \
48 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
49 sizeof(struct iphdr) + sizeof(struct ethhdr))
51 static void zap_completion_queue(void);
53 static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
54 unsigned short ulen, u32 saddr, u32 daddr)
59 if (skb->ip_summed == CHECKSUM_HW)
60 return csum_tcpudp_magic(
61 saddr, daddr, ulen, IPPROTO_UDP, skb->csum);
63 skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
65 return csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
69 * Check whether delayed processing was scheduled for our current CPU,
70 * and then manually invoke NAPI polling to pump data off the card.
72 * In cases where there is bi-directional communications, reading only
73 * one message at a time can lead to packets being dropped by the
74 * network adapter, forcing superfluous retries and possibly timeouts.
75 * Thus, we set our budget to greater than 1.
77 static void poll_napi(struct netpoll *np)
81 struct softnet_data *queue;
83 spin_lock_irqsave(&netpoll_poll_lock, flags);
84 queue = &__get_cpu_var(softnet_data);
85 if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
86 !list_empty(&queue->poll_list)) {
87 np->dev->netpoll_rx |= NETPOLL_RX_DROP;
90 np->dev->poll(np->dev, &budget);
93 np->dev->netpoll_rx &= ~NETPOLL_RX_DROP;
95 spin_unlock_irqrestore(&netpoll_poll_lock, flags);
98 void netpoll_poll(struct netpoll *np)
100 if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
103 /* Process pending work on NIC */
104 np->dev->poll_controller(np->dev);
108 zap_completion_queue();
111 static void refill_skbs(void)
116 spin_lock_irqsave(&skb_list_lock, flags);
117 while (nr_skbs < MAX_SKBS) {
118 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
126 spin_unlock_irqrestore(&skb_list_lock, flags);
129 static void zap_completion_queue(void)
132 struct softnet_data *sd = &get_cpu_var(softnet_data);
134 if (sd->completion_queue) {
135 struct sk_buff *clist;
137 local_irq_save(flags);
138 clist = sd->completion_queue;
139 sd->completion_queue = NULL;
140 local_irq_restore(flags);
142 while (clist != NULL) {
143 struct sk_buff *skb = clist;
149 put_cpu_var(softnet_data);
152 static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
154 int once = 1, count = 0;
156 struct sk_buff *skb = NULL;
158 zap_completion_queue();
160 if (nr_skbs < MAX_SKBS)
163 skb = alloc_skb(len, GFP_ATOMIC);
166 spin_lock_irqsave(&skb_list_lock, flags);
172 spin_unlock_irqrestore(&skb_list_lock, flags);
177 if (once && (count == 1000000)) {
178 printk("out of netpoll skbs!\n");
185 atomic_set(&skb->users, 1);
186 skb_reserve(skb, reserve);
190 static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
195 if(!np || !np->dev || !netif_running(np->dev)) {
200 spin_lock(&np->dev->xmit_lock);
201 np->dev->xmit_lock_owner = smp_processor_id();
204 * network drivers do not expect to be called if the queue is
207 if (netif_queue_stopped(np->dev)) {
208 np->dev->xmit_lock_owner = -1;
209 spin_unlock(&np->dev->xmit_lock);
215 status = np->dev->hard_start_xmit(skb, np->dev);
216 np->dev->xmit_lock_owner = -1;
217 spin_unlock(&np->dev->xmit_lock);
226 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
228 int total_len, eth_len, ip_len, udp_len;
234 udp_len = len + sizeof(*udph);
235 ip_len = eth_len = udp_len + sizeof(*iph);
236 total_len = eth_len + ETH_HLEN;
238 skb = find_skb(np, total_len, total_len - len);
242 memcpy(skb->data, msg, len);
245 udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
246 udph->source = htons(np->local_port);
247 udph->dest = htons(np->remote_port);
248 udph->len = htons(udp_len);
251 iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
253 /* iph->version = 4; iph->ihl = 5; */
254 put_unaligned(0x45, (unsigned char *)iph);
256 put_unaligned(htons(ip_len), &(iph->tot_len));
260 iph->protocol = IPPROTO_UDP;
262 put_unaligned(htonl(np->local_ip), &(iph->saddr));
263 put_unaligned(htonl(np->remote_ip), &(iph->daddr));
264 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
266 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
268 eth->h_proto = htons(ETH_P_IP);
269 memcpy(eth->h_source, np->local_mac, 6);
270 memcpy(eth->h_dest, np->remote_mac, 6);
272 netpoll_send_skb(np, skb);
275 static void arp_reply(struct sk_buff *skb)
278 unsigned char *arp_ptr;
279 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
281 struct sk_buff *send_skb;
284 struct netpoll *np = NULL;
286 spin_lock_irqsave(&rx_list_lock, flags);
287 list_for_each(p, &rx_list) {
288 np = list_entry(p, struct netpoll, rx_list);
289 if ( np->dev == skb->dev )
293 spin_unlock_irqrestore(&rx_list_lock, flags);
297 /* No arp on this interface */
298 if (skb->dev->flags & IFF_NOARP)
301 if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
302 (2 * skb->dev->addr_len) +
306 skb->h.raw = skb->nh.raw = skb->data;
309 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
310 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
311 arp->ar_pro != htons(ETH_P_IP) ||
312 arp->ar_op != htons(ARPOP_REQUEST))
315 arp_ptr = (unsigned char *)(arp+1) + skb->dev->addr_len;
316 memcpy(&sip, arp_ptr, 4);
317 arp_ptr += 4 + skb->dev->addr_len;
318 memcpy(&tip, arp_ptr, 4);
320 /* Should we ignore arp? */
321 if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
324 size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
325 send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
326 LL_RESERVED_SPACE(np->dev));
331 send_skb->nh.raw = send_skb->data;
332 arp = (struct arphdr *) skb_put(send_skb, size);
333 send_skb->dev = skb->dev;
334 send_skb->protocol = htons(ETH_P_ARP);
336 /* Fill the device header for the ARP frame */
338 if (np->dev->hard_header &&
339 np->dev->hard_header(send_skb, skb->dev, ptype,
340 np->remote_mac, np->local_mac,
341 send_skb->len) < 0) {
347 * Fill out the arp protocol part.
349 * we only support ethernet device type,
350 * which (according to RFC 1390) should always equal 1 (Ethernet).
353 arp->ar_hrd = htons(np->dev->type);
354 arp->ar_pro = htons(ETH_P_IP);
355 arp->ar_hln = np->dev->addr_len;
357 arp->ar_op = htons(type);
359 arp_ptr=(unsigned char *)(arp + 1);
360 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
361 arp_ptr += np->dev->addr_len;
362 memcpy(arp_ptr, &tip, 4);
364 memcpy(arp_ptr, np->remote_mac, np->dev->addr_len);
365 arp_ptr += np->dev->addr_len;
366 memcpy(arp_ptr, &sip, 4);
368 netpoll_send_skb(np, send_skb);
371 int netpoll_rx(struct sk_buff *skb)
373 int proto, len, ulen;
380 if (skb->dev->type != ARPHRD_ETHER)
383 /* check if netpoll clients need ARP */
384 if (skb->protocol == __constant_htons(ETH_P_ARP) &&
385 atomic_read(&trapped)) {
390 proto = ntohs(eth_hdr(skb)->h_proto);
391 if (proto != ETH_P_IP)
393 if (skb->pkt_type == PACKET_OTHERHOST)
398 iph = (struct iphdr *)skb->data;
399 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
401 if (iph->ihl < 5 || iph->version != 4)
403 if (!pskb_may_pull(skb, iph->ihl*4))
405 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
408 len = ntohs(iph->tot_len);
409 if (skb->len < len || len < iph->ihl*4)
412 if (iph->protocol != IPPROTO_UDP)
416 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
417 ulen = ntohs(uh->len);
421 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr) < 0)
424 spin_lock_irqsave(&rx_list_lock, flags);
425 list_for_each(p, &rx_list) {
426 np = list_entry(p, struct netpoll, rx_list);
427 if (np->dev && np->dev != skb->dev)
429 if (np->local_ip && np->local_ip != ntohl(iph->daddr))
431 if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
433 if (np->local_port && np->local_port != ntohs(uh->dest))
436 spin_unlock_irqrestore(&rx_list_lock, flags);
439 np->rx_hook(np, ntohs(uh->source),
441 ulen - sizeof(struct udphdr));
445 spin_unlock_irqrestore(&rx_list_lock, flags);
448 return atomic_read(&trapped);
451 int netpoll_parse_options(struct netpoll *np, char *opt)
453 char *cur=opt, *delim;
456 if ((delim = strchr(cur, '@')) == NULL)
459 np->local_port=simple_strtol(cur, NULL, 10);
463 printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
466 if ((delim = strchr(cur, '/')) == NULL)
469 np->local_ip=ntohl(in_aton(cur));
472 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
473 np->name, HIPQUAD(np->local_ip));
478 /* parse out dev name */
479 if ((delim = strchr(cur, ',')) == NULL)
482 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
487 printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
491 if ((delim = strchr(cur, '@')) == NULL)
494 np->remote_port=simple_strtol(cur, NULL, 10);
498 printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
501 if ((delim = strchr(cur, '/')) == NULL)
504 np->remote_ip=ntohl(in_aton(cur));
507 printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
508 np->name, HIPQUAD(np->remote_ip));
513 if ((delim = strchr(cur, ':')) == NULL)
516 np->remote_mac[0]=simple_strtol(cur, NULL, 16);
518 if ((delim = strchr(cur, ':')) == NULL)
521 np->remote_mac[1]=simple_strtol(cur, NULL, 16);
523 if ((delim = strchr(cur, ':')) == NULL)
526 np->remote_mac[2]=simple_strtol(cur, NULL, 16);
528 if ((delim = strchr(cur, ':')) == NULL)
531 np->remote_mac[3]=simple_strtol(cur, NULL, 16);
533 if ((delim = strchr(cur, ':')) == NULL)
536 np->remote_mac[4]=simple_strtol(cur, NULL, 16);
538 np->remote_mac[5]=simple_strtol(cur, NULL, 16);
541 printk(KERN_INFO "%s: remote ethernet address "
542 "%02x:%02x:%02x:%02x:%02x:%02x\n",
554 printk(KERN_INFO "%s: couldn't parse config at %s!\n",
559 int netpoll_setup(struct netpoll *np)
561 struct net_device *ndev = NULL;
562 struct in_device *in_dev;
565 ndev = dev_get_by_name(np->dev_name);
567 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
568 np->name, np->dev_name);
571 if (!ndev->poll_controller) {
572 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
573 np->name, np->dev_name);
577 if (!netif_running(ndev)) {
578 unsigned short oflags;
579 unsigned long atmost, atleast;
581 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
582 np->name, np->dev_name);
584 oflags = ndev->flags;
587 if (dev_change_flags(ndev, oflags | IFF_UP) < 0) {
588 printk(KERN_ERR "%s: failed to open %s\n",
589 np->name, np->dev_name);
595 atleast = jiffies + HZ/10;
596 atmost = jiffies + 10*HZ;
597 while (!netif_carrier_ok(ndev)) {
598 if (time_after(jiffies, atmost)) {
600 "%s: timeout waiting for carrier\n",
607 if (time_before(jiffies, atleast)) {
608 printk(KERN_NOTICE "%s: carrier detect appears flaky,"
609 " waiting 10 seconds\n",
611 while (time_before(jiffies, atmost))
616 if (!memcmp(np->local_mac, "\0\0\0\0\0\0", 6) && ndev->dev_addr)
617 memcpy(np->local_mac, ndev->dev_addr, 6);
621 in_dev = __in_dev_get(ndev);
623 if (!in_dev || !in_dev->ifa_list) {
625 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
626 np->name, np->dev_name);
630 np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
632 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
633 np->name, HIPQUAD(np->local_ip));
641 np->dev->netpoll_rx = NETPOLL_RX_ENABLED;
643 spin_lock_irqsave(&rx_list_lock, flags);
644 list_add(&np->rx_list, &rx_list);
645 spin_unlock_irqrestore(&rx_list_lock, flags);
654 void netpoll_cleanup(struct netpoll *np)
659 spin_lock_irqsave(&rx_list_lock, flags);
660 list_del(&np->rx_list);
661 spin_unlock_irqrestore(&rx_list_lock, flags);
665 np->dev->netpoll_rx = 0;
670 int netpoll_trap(void)
672 return atomic_read(&trapped);
675 void netpoll_set_trap(int trap)
678 atomic_inc(&trapped);
680 atomic_dec(&trapped);
683 EXPORT_SYMBOL(netpoll_set_trap);
684 EXPORT_SYMBOL(netpoll_trap);
685 EXPORT_SYMBOL(netpoll_parse_options);
686 EXPORT_SYMBOL(netpoll_setup);
687 EXPORT_SYMBOL(netpoll_cleanup);
688 EXPORT_SYMBOL(netpoll_send_udp);
689 EXPORT_SYMBOL(netpoll_poll);