#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
#define MAX_RETRIES 20000
-static DEFINE_SPINLOCK(skb_list_lock);
-static int nr_skbs;
-static struct sk_buff *skbs;
-
-static DEFINE_SPINLOCK(queue_lock);
-static int queue_depth;
-static struct sk_buff *queue_head, *queue_tail;
+static struct sk_buff_head skb_pool;
static atomic_t trapped;
static void queue_process(void *p)
{
- unsigned long flags;
+ struct netpoll_info *npinfo = p;
struct sk_buff *skb;
- while (queue_head) {
- spin_lock_irqsave(&queue_lock, flags);
-
- skb = queue_head;
- queue_head = skb->next;
- if (skb == queue_tail)
- queue_head = NULL;
-
- queue_depth--;
-
- spin_unlock_irqrestore(&queue_lock, flags);
-
+ while ((skb = skb_dequeue(&npinfo->txq)))
dev_queue_xmit(skb);
- }
-}
-static DECLARE_WORK(send_queue, queue_process, NULL);
+}
void netpoll_queue(struct sk_buff *skb)
{
- unsigned long flags;
+ struct net_device *dev = skb->dev;
+ struct netpoll_info *npinfo = dev->npinfo;
- if (queue_depth == MAX_QUEUE_DEPTH) {
- __kfree_skb(skb);
- return;
+ if (!npinfo)
+ kfree_skb(skb);
+ else {
+ skb_queue_tail(&npinfo->txq, skb);
+ schedule_work(&npinfo->tx_work);
}
-
- spin_lock_irqsave(&queue_lock, flags);
- if (!queue_head)
- queue_head = skb;
- else
- queue_tail->next = skb;
- queue_tail = skb;
- queue_depth++;
- spin_unlock_irqrestore(&queue_lock, flags);
-
- schedule_work(&send_queue);
}
static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
struct sk_buff *skb;
unsigned long flags;
- spin_lock_irqsave(&skb_list_lock, flags);
- while (nr_skbs < MAX_SKBS) {
+ spin_lock_irqsave(&skb_pool.lock, flags);
+ while (skb_pool.qlen < MAX_SKBS) {
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
if (!skb)
break;
- skb->next = skbs;
- skbs = skb;
- nr_skbs++;
+ __skb_queue_tail(&skb_pool, skb);
}
- spin_unlock_irqrestore(&skb_list_lock, flags);
+ spin_unlock_irqrestore(&skb_pool.lock, flags);
}
static void zap_completion_queue(void)
put_cpu_var(softnet_data);
}
-static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
+static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
{
- int once = 1, count = 0;
- unsigned long flags;
- struct sk_buff *skb = NULL;
+ int count = 0;
+ struct sk_buff *skb;
zap_completion_queue();
+ refill_skbs();
repeat:
- if (nr_skbs < MAX_SKBS)
- refill_skbs();
skb = alloc_skb(len, GFP_ATOMIC);
-
- if (!skb) {
- spin_lock_irqsave(&skb_list_lock, flags);
- skb = skbs;
- if (skb) {
- skbs = skb->next;
- skb->next = NULL;
- nr_skbs--;
- }
- spin_unlock_irqrestore(&skb_list_lock, flags);
- }
+ if (!skb)
+ skb = skb_dequeue(&skb_pool);
if(!skb) {
- count++;
- if (once && (count == 1000000)) {
- printk("out of netpoll skbs!\n");
- once = 0;
+ if (++count < 10) {
+ netpoll_poll(np);
+ goto repeat;
}
- netpoll_poll(np);
- goto repeat;
+ return NULL;
}
atomic_set(&skb->users, 1);
udph->dest = htons(np->remote_port);
udph->len = htons(udp_len);
udph->check = 0;
+ udph->check = csum_tcpudp_magic(htonl(np->local_ip),
+ htonl(np->remote_ip),
+ udp_len, IPPROTO_UDP,
+ csum_partial((unsigned char *)udph, udp_len, 0));
+ if (udph->check == 0)
+ udph->check = -1;
skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
npinfo->tries = MAX_RETRIES;
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
- } else
+ skb_queue_head_init(&npinfo->txq);
+ INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
+
+ atomic_set(&npinfo->refcnt, 1);
+ } else {
npinfo = ndev->npinfo;
+ atomic_inc(&npinfo->refcnt);
+ }
if (!ndev->poll_controller) {
printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
return -1;
}
+static int __init netpoll_init(void) {
+ skb_queue_head_init(&skb_pool);
+ return 0;
+}
+core_initcall(netpoll_init);
+
void netpoll_cleanup(struct netpoll *np)
{
struct netpoll_info *npinfo;
if (np->dev) {
npinfo = np->dev->npinfo;
- if (npinfo && npinfo->rx_np == np) {
- spin_lock_irqsave(&npinfo->rx_lock, flags);
- npinfo->rx_np = NULL;
- npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+ if (npinfo) {
+ if (npinfo->rx_np == np) {
+ spin_lock_irqsave(&npinfo->rx_lock, flags);
+ npinfo->rx_np = NULL;
+ npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
+ spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+ }
+
+ np->dev->npinfo = NULL;
+ if (atomic_dec_and_test(&npinfo->refcnt)) {
+ skb_queue_purge(&npinfo->arp_tx);
+ skb_queue_purge(&npinfo->txq);
+ flush_scheduled_work();
+
+ kfree(npinfo);
+ }
}
+
dev_put(np->dev);
}