#include <linux/interrupt.h>
#include <linux/netpoll.h>
#include <linux/sched.h>
+#include <linux/nmi.h>
#include <net/tcp.h>
#include <net/udp.h>
#define MAX_SKBS 32
#define MAX_UDP_CHUNK 1460
+#define NETPOLL_RX_ENABLED 1
+#define NETPOLL_RX_DROP 2
+
static spinlock_t skb_list_lock = SPIN_LOCK_UNLOCKED;
static int nr_skbs;
static struct sk_buff *skbs;
static int trapped;
+extern void (*netdump_func) (struct pt_regs *regs);
+
#define MAX_SKB_SIZE \
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
sizeof(struct iphdr) + sizeof(struct ethhdr))
void netpoll_poll(struct netpoll *np)
{
- int budget = 1;
+ int budget = netdump_mode ? 64 : 16;
if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
return;
np->dev->poll_controller(np->dev);
/* If scheduling is stopped, tickle NAPI bits */
- if(trapped && np->dev->poll &&
- test_bit(__LINK_STATE_RX_SCHED, &np->dev->state))
- np->dev->poll(np->dev, &budget);
+ if (np->dev->poll &&
+ test_bit(__LINK_STATE_RX_SCHED, &np->dev->state)) {
+ np->dev->netpoll_rx |= NETPOLL_RX_DROP;
+ if (trapped) {
+ np->dev->poll(np->dev, &budget);
+ } else {
+ trapped = 1;
+ np->dev->poll(np->dev, &budget);
+ trapped = 0;
+ }
+ np->dev->netpoll_rx &= ~NETPOLL_RX_DROP;
+ }
+
zap_completion_queue();
}
}
put_cpu_var(softnet_data);
+ touch_nmi_watchdog();
}
static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
struct list_head *p;
unsigned long flags;
+ if (!(skb->dev->netpoll_rx & NETPOLL_RX_ENABLED))
+ return 1;
+
if (skb->dev->type != ARPHRD_ETHER)
goto out;
if(np->rx_hook) {
unsigned long flags;
-#ifdef CONFIG_NETPOLL_RX
- np->dev->netpoll_rx = 1;
-#endif
+ np->dev->netpoll_rx = NETPOLL_RX_ENABLED;
spin_lock_irqsave(&rx_list_lock, flags);
list_add(&np->rx_list, &rx_list);
spin_unlock_irqrestore(&rx_list_lock, flags);
}
+ if(np->dump_func)
+ netdump_func = np->dump_func;
+
return 0;
release:
dev_put(ndev);
spin_lock_irqsave(&rx_list_lock, flags);
list_del(&np->rx_list);
-#ifdef CONFIG_NETPOLL_RX
np->dev->netpoll_rx = 0;
-#endif
spin_unlock_irqrestore(&rx_list_lock, flags);
}
trapped = trap;
}
+void netpoll_reset_locks(struct netpoll *np)
+{
+ spin_lock_init(&rx_list_lock);
+ spin_lock_init(&skb_list_lock);
+ spin_lock_init(&np->dev->xmit_lock);
+}
+
EXPORT_SYMBOL(netpoll_set_trap);
EXPORT_SYMBOL(netpoll_trap);
EXPORT_SYMBOL(netpoll_parse_options);
EXPORT_SYMBOL(netpoll_send_skb);
EXPORT_SYMBOL(netpoll_send_udp);
EXPORT_SYMBOL(netpoll_poll);
+EXPORT_SYMBOL_GPL(netpoll_reset_locks);