2
* Common framework for low-level network console, dump, and debugger code
4
* Sep 8 2003 Matt Mackall <mpm@selenic.com>
6
* based on the netconsole code from:
8
* Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9
* Copyright (C) 2002 Red Hat, Inc.
12
#include <linux/moduleparam.h>
13
#include <linux/netdevice.h>
14
#include <linux/etherdevice.h>
15
#include <linux/string.h>
16
#include <linux/if_arp.h>
17
#include <linux/inetdevice.h>
18
#include <linux/inet.h>
19
#include <linux/interrupt.h>
20
#include <linux/netpoll.h>
21
#include <linux/sched.h>
22
#include <linux/delay.h>
23
#include <linux/rcupdate.h>
24
#include <linux/workqueue.h>
25
#include <linux/slab.h>
26
#include <linux/export.h>
29
#include <asm/unaligned.h>
30
#include <trace/events/napi.h>
33
* We maintain a small pool of fully-sized skbs, to make sure the
34
* message gets out even in extreme OOM situations.
37
#define MAX_UDP_CHUNK 1460
40
static struct sk_buff_head skb_pool;
42
static atomic_t trapped;
44
#define USEC_PER_POLL 50
45
#define NETPOLL_RX_ENABLED 1
46
#define NETPOLL_RX_DROP 2
48
#define MAX_SKB_SIZE \
49
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
50
sizeof(struct iphdr) + sizeof(struct ethhdr))
52
static void zap_completion_queue(void);
53
static void arp_reply(struct sk_buff *skb);
55
static unsigned int carrier_timeout = 4;
56
module_param(carrier_timeout, uint, 0644);
58
static void queue_process(struct work_struct *work)
60
struct netpoll_info *npinfo =
61
container_of(work, struct netpoll_info, tx_work.work);
65
while ((skb = skb_dequeue(&npinfo->txq))) {
66
struct net_device *dev = skb->dev;
67
const struct net_device_ops *ops = dev->netdev_ops;
68
struct netdev_queue *txq;
70
if (!netif_device_present(dev) || !netif_running(dev)) {
75
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
77
local_irq_save(flags);
78
__netif_tx_lock(txq, smp_processor_id());
79
if (netif_tx_queue_frozen_or_stopped(txq) ||
80
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
81
skb_queue_head(&npinfo->txq, skb);
82
__netif_tx_unlock(txq);
83
local_irq_restore(flags);
85
schedule_delayed_work(&npinfo->tx_work, HZ/10);
88
__netif_tx_unlock(txq);
89
local_irq_restore(flags);
93
static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
94
unsigned short ulen, __be32 saddr, __be32 daddr)
98
if (uh->check == 0 || skb_csum_unnecessary(skb))
101
psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
103
if (skb->ip_summed == CHECKSUM_COMPLETE &&
104
!csum_fold(csum_add(psum, skb->csum)))
109
return __skb_checksum_complete(skb);
113
* Check whether delayed processing was scheduled for our NIC. If so,
114
* we attempt to grab the poll lock and use ->poll() to pump the card.
115
* If this fails, either we've recursed in ->poll() or it's already
116
* running on another CPU.
118
* Note: we don't mask interrupts with this lock because we're using
119
* trylock here and interrupts are already disabled in the softirq
120
* case. Further, we test the poll_owner to avoid recursion on UP
121
* systems where the lock doesn't exist.
123
* In cases where there is bi-directional communications, reading only
124
* one message at a time can lead to packets being dropped by the
125
* network adapter, forcing superfluous retries and possibly timeouts.
126
* Thus, we set our budget to greater than 1.
128
static int poll_one_napi(struct netpoll_info *npinfo,
129
struct napi_struct *napi, int budget)
133
/* net_rx_action's ->poll() invocations and our's are
134
* synchronized by this test which is only made while
135
* holding the napi->poll_lock.
137
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
140
npinfo->rx_flags |= NETPOLL_RX_DROP;
141
atomic_inc(&trapped);
142
set_bit(NAPI_STATE_NPSVC, &napi->state);
144
work = napi->poll(napi, budget);
145
trace_napi_poll(napi);
147
clear_bit(NAPI_STATE_NPSVC, &napi->state);
148
atomic_dec(&trapped);
149
npinfo->rx_flags &= ~NETPOLL_RX_DROP;
151
return budget - work;
154
static void poll_napi(struct net_device *dev)
156
struct napi_struct *napi;
159
list_for_each_entry(napi, &dev->napi_list, dev_list) {
160
if (napi->poll_owner != smp_processor_id() &&
161
spin_trylock(&napi->poll_lock)) {
162
budget = poll_one_napi(dev->npinfo, napi, budget);
163
spin_unlock(&napi->poll_lock);
171
static void service_arp_queue(struct netpoll_info *npi)
176
while ((skb = skb_dequeue(&npi->arp_tx)))
181
static void netpoll_poll_dev(struct net_device *dev)
183
const struct net_device_ops *ops;
185
if (!dev || !netif_running(dev))
188
ops = dev->netdev_ops;
189
if (!ops->ndo_poll_controller)
192
/* Process pending work on NIC */
193
ops->ndo_poll_controller(dev);
197
if (dev->priv_flags & IFF_SLAVE) {
199
struct net_device *bond_dev = dev->master;
201
while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
203
skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
208
service_arp_queue(dev->npinfo);
210
zap_completion_queue();
213
static void refill_skbs(void)
218
spin_lock_irqsave(&skb_pool.lock, flags);
219
while (skb_pool.qlen < MAX_SKBS) {
220
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
224
__skb_queue_tail(&skb_pool, skb);
226
spin_unlock_irqrestore(&skb_pool.lock, flags);
229
static void zap_completion_queue(void)
232
struct softnet_data *sd = &get_cpu_var(softnet_data);
234
if (sd->completion_queue) {
235
struct sk_buff *clist;
237
local_irq_save(flags);
238
clist = sd->completion_queue;
239
sd->completion_queue = NULL;
240
local_irq_restore(flags);
242
while (clist != NULL) {
243
struct sk_buff *skb = clist;
245
if (skb->destructor) {
246
atomic_inc(&skb->users);
247
dev_kfree_skb_any(skb); /* put this one back */
254
put_cpu_var(softnet_data);
257
static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
262
zap_completion_queue();
266
skb = alloc_skb(len, GFP_ATOMIC);
268
skb = skb_dequeue(&skb_pool);
272
netpoll_poll_dev(np->dev);
278
atomic_set(&skb->users, 1);
279
skb_reserve(skb, reserve);
283
static int netpoll_owner_active(struct net_device *dev)
285
struct napi_struct *napi;
287
list_for_each_entry(napi, &dev->napi_list, dev_list) {
288
if (napi->poll_owner == smp_processor_id())
294
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
295
struct net_device *dev)
297
int status = NETDEV_TX_BUSY;
299
const struct net_device_ops *ops = dev->netdev_ops;
300
/* It is up to the caller to keep npinfo alive. */
301
struct netpoll_info *npinfo = np->dev->npinfo;
303
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
308
/* don't get messages out of order, and no recursion */
309
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
310
struct netdev_queue *txq;
313
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
315
local_irq_save(flags);
316
/* try until next clock tick */
317
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
318
tries > 0; --tries) {
319
if (__netif_tx_trylock(txq)) {
320
if (!netif_tx_queue_stopped(txq)) {
321
status = ops->ndo_start_xmit(skb, dev);
322
if (status == NETDEV_TX_OK)
323
txq_trans_update(txq);
325
__netif_tx_unlock(txq);
327
if (status == NETDEV_TX_OK)
332
/* tickle device maybe there is some cleanup */
333
netpoll_poll_dev(np->dev);
335
udelay(USEC_PER_POLL);
338
WARN_ONCE(!irqs_disabled(),
339
"netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
340
dev->name, ops->ndo_start_xmit);
342
local_irq_restore(flags);
345
if (status != NETDEV_TX_OK) {
346
skb_queue_tail(&npinfo->txq, skb);
347
schedule_delayed_work(&npinfo->tx_work,0);
350
EXPORT_SYMBOL(netpoll_send_skb_on_dev);
352
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
354
int total_len, eth_len, ip_len, udp_len;
360
udp_len = len + sizeof(*udph);
361
ip_len = eth_len = udp_len + sizeof(*iph);
362
total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
364
skb = find_skb(np, total_len, total_len - len);
368
skb_copy_to_linear_data(skb, msg, len);
371
skb_push(skb, sizeof(*udph));
372
skb_reset_transport_header(skb);
374
udph->source = htons(np->local_port);
375
udph->dest = htons(np->remote_port);
376
udph->len = htons(udp_len);
378
udph->check = csum_tcpudp_magic(np->local_ip,
380
udp_len, IPPROTO_UDP,
381
csum_partial(udph, udp_len, 0));
382
if (udph->check == 0)
383
udph->check = CSUM_MANGLED_0;
385
skb_push(skb, sizeof(*iph));
386
skb_reset_network_header(skb);
389
/* iph->version = 4; iph->ihl = 5; */
390
put_unaligned(0x45, (unsigned char *)iph);
392
put_unaligned(htons(ip_len), &(iph->tot_len));
396
iph->protocol = IPPROTO_UDP;
398
put_unaligned(np->local_ip, &(iph->saddr));
399
put_unaligned(np->remote_ip, &(iph->daddr));
400
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
402
eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
403
skb_reset_mac_header(skb);
404
skb->protocol = eth->h_proto = htons(ETH_P_IP);
405
memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
406
memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
410
netpoll_send_skb(np, skb);
412
EXPORT_SYMBOL(netpoll_send_udp);
414
static void arp_reply(struct sk_buff *skb)
416
struct netpoll_info *npinfo = skb->dev->npinfo;
418
unsigned char *arp_ptr;
419
int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
422
struct sk_buff *send_skb;
423
struct netpoll *np, *tmp;
427
if (list_empty(&npinfo->rx_np))
430
/* Before checking the packet, we do some early
431
inspection whether this is interesting at all */
432
spin_lock_irqsave(&npinfo->rx_lock, flags);
433
list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
434
if (np->dev == skb->dev)
437
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
439
/* No netpoll struct is using this dev */
443
/* No arp on this interface */
444
if (skb->dev->flags & IFF_NOARP)
447
if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
450
skb_reset_network_header(skb);
451
skb_reset_transport_header(skb);
454
if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
455
arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
456
arp->ar_pro != htons(ETH_P_IP) ||
457
arp->ar_op != htons(ARPOP_REQUEST))
460
arp_ptr = (unsigned char *)(arp+1);
461
/* save the location of the src hw addr */
463
arp_ptr += skb->dev->addr_len;
464
memcpy(&sip, arp_ptr, 4);
466
/* If we actually cared about dst hw addr,
467
it would get copied here */
468
arp_ptr += skb->dev->addr_len;
469
memcpy(&tip, arp_ptr, 4);
471
/* Should we ignore arp? */
472
if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
475
size = arp_hdr_len(skb->dev);
477
spin_lock_irqsave(&npinfo->rx_lock, flags);
478
list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
479
if (tip != np->local_ip)
482
send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
483
LL_RESERVED_SPACE(np->dev));
487
skb_reset_network_header(send_skb);
488
arp = (struct arphdr *) skb_put(send_skb, size);
489
send_skb->dev = skb->dev;
490
send_skb->protocol = htons(ETH_P_ARP);
492
/* Fill the device header for the ARP frame */
493
if (dev_hard_header(send_skb, skb->dev, ptype,
494
sha, np->dev->dev_addr,
495
send_skb->len) < 0) {
501
* Fill out the arp protocol part.
503
* we only support ethernet device type,
504
* which (according to RFC 1390) should
505
* always equal 1 (Ethernet).
508
arp->ar_hrd = htons(np->dev->type);
509
arp->ar_pro = htons(ETH_P_IP);
510
arp->ar_hln = np->dev->addr_len;
512
arp->ar_op = htons(type);
514
arp_ptr = (unsigned char *)(arp + 1);
515
memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
516
arp_ptr += np->dev->addr_len;
517
memcpy(arp_ptr, &tip, 4);
519
memcpy(arp_ptr, sha, np->dev->addr_len);
520
arp_ptr += np->dev->addr_len;
521
memcpy(arp_ptr, &sip, 4);
523
netpoll_send_skb(np, send_skb);
525
/* If there are several rx_hooks for the same address,
526
we're fine by sending a single reply */
529
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
532
int __netpoll_rx(struct sk_buff *skb)
534
int proto, len, ulen;
536
const struct iphdr *iph;
538
struct netpoll_info *npinfo = skb->dev->npinfo;
539
struct netpoll *np, *tmp;
541
if (list_empty(&npinfo->rx_np))
544
if (skb->dev->type != ARPHRD_ETHER)
547
/* check if netpoll clients need ARP */
548
if (skb->protocol == htons(ETH_P_ARP) &&
549
atomic_read(&trapped)) {
550
skb_queue_tail(&npinfo->arp_tx, skb);
554
proto = ntohs(eth_hdr(skb)->h_proto);
555
if (proto != ETH_P_IP)
557
if (skb->pkt_type == PACKET_OTHERHOST)
562
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
564
iph = (struct iphdr *)skb->data;
565
if (iph->ihl < 5 || iph->version != 4)
567
if (!pskb_may_pull(skb, iph->ihl*4))
569
iph = (struct iphdr *)skb->data;
570
if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
573
len = ntohs(iph->tot_len);
574
if (skb->len < len || len < iph->ihl*4)
578
* Our transport medium may have padded the buffer out.
579
* Now We trim to the true length of the frame.
581
if (pskb_trim_rcsum(skb, len))
584
iph = (struct iphdr *)skb->data;
585
if (iph->protocol != IPPROTO_UDP)
589
uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
590
ulen = ntohs(uh->len);
594
if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
597
list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
598
if (np->local_ip && np->local_ip != iph->daddr)
600
if (np->remote_ip && np->remote_ip != iph->saddr)
602
if (np->local_port && np->local_port != ntohs(uh->dest))
605
np->rx_hook(np, ntohs(uh->source),
607
ulen - sizeof(struct udphdr));
618
if (atomic_read(&trapped)) {
626
void netpoll_print_options(struct netpoll *np)
628
printk(KERN_INFO "%s: local port %d\n",
629
np->name, np->local_port);
630
printk(KERN_INFO "%s: local IP %pI4\n",
631
np->name, &np->local_ip);
632
printk(KERN_INFO "%s: interface '%s'\n",
633
np->name, np->dev_name);
634
printk(KERN_INFO "%s: remote port %d\n",
635
np->name, np->remote_port);
636
printk(KERN_INFO "%s: remote IP %pI4\n",
637
np->name, &np->remote_ip);
638
printk(KERN_INFO "%s: remote ethernet address %pM\n",
639
np->name, np->remote_mac);
641
EXPORT_SYMBOL(netpoll_print_options);
643
int netpoll_parse_options(struct netpoll *np, char *opt)
645
char *cur=opt, *delim;
648
if ((delim = strchr(cur, '@')) == NULL)
651
np->local_port = simple_strtol(cur, NULL, 10);
657
if ((delim = strchr(cur, '/')) == NULL)
660
np->local_ip = in_aton(cur);
666
/* parse out dev name */
667
if ((delim = strchr(cur, ',')) == NULL)
670
strlcpy(np->dev_name, cur, sizeof(np->dev_name));
677
if ((delim = strchr(cur, '@')) == NULL)
680
if (*cur == ' ' || *cur == '\t')
681
printk(KERN_INFO "%s: warning: whitespace"
682
"is not allowed\n", np->name);
683
np->remote_port = simple_strtol(cur, NULL, 10);
689
if ((delim = strchr(cur, '/')) == NULL)
692
np->remote_ip = in_aton(cur);
697
if (!mac_pton(cur, np->remote_mac))
701
netpoll_print_options(np);
706
printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
710
EXPORT_SYMBOL(netpoll_parse_options);
712
int __netpoll_setup(struct netpoll *np)
714
struct net_device *ndev = np->dev;
715
struct netpoll_info *npinfo;
716
const struct net_device_ops *ops;
720
if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
721
!ndev->netdev_ops->ndo_poll_controller) {
722
printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
723
np->name, np->dev_name);
729
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
735
npinfo->rx_flags = 0;
736
INIT_LIST_HEAD(&npinfo->rx_np);
738
spin_lock_init(&npinfo->rx_lock);
739
skb_queue_head_init(&npinfo->arp_tx);
740
skb_queue_head_init(&npinfo->txq);
741
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
743
atomic_set(&npinfo->refcnt, 1);
745
ops = np->dev->netdev_ops;
746
if (ops->ndo_netpoll_setup) {
747
err = ops->ndo_netpoll_setup(ndev, npinfo);
752
npinfo = ndev->npinfo;
753
atomic_inc(&npinfo->refcnt);
756
npinfo->netpoll = np;
759
spin_lock_irqsave(&npinfo->rx_lock, flags);
760
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
761
list_add_tail(&np->rx, &npinfo->rx_np);
762
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
765
/* last thing to do is link it to the net device structure */
766
RCU_INIT_POINTER(ndev->npinfo, npinfo);
775
EXPORT_SYMBOL_GPL(__netpoll_setup);
777
int netpoll_setup(struct netpoll *np)
779
struct net_device *ndev = NULL;
780
struct in_device *in_dev;
784
ndev = dev_get_by_name(&init_net, np->dev_name);
786
printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
787
np->name, np->dev_name);
792
printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
793
np->name, np->dev_name);
798
if (!netif_running(ndev)) {
799
unsigned long atmost, atleast;
801
printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
802
np->name, np->dev_name);
805
err = dev_open(ndev);
809
printk(KERN_ERR "%s: failed to open %s\n",
810
np->name, ndev->name);
814
atleast = jiffies + HZ/10;
815
atmost = jiffies + carrier_timeout * HZ;
816
while (!netif_carrier_ok(ndev)) {
817
if (time_after(jiffies, atmost)) {
819
"%s: timeout waiting for carrier\n",
826
/* If carrier appears to come up instantly, we don't
827
* trust it and pause so that we don't pump all our
828
* queued console messages into the bitbucket.
831
if (time_before(jiffies, atleast)) {
832
printk(KERN_NOTICE "%s: carrier detect appears"
833
" untrustworthy, waiting 4 seconds\n",
841
in_dev = __in_dev_get_rcu(ndev);
843
if (!in_dev || !in_dev->ifa_list) {
845
printk(KERN_ERR "%s: no IP address for %s, aborting\n",
846
np->name, np->dev_name);
851
np->local_ip = in_dev->ifa_list->ifa_local;
853
printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
858
/* fill up the skb queue */
862
err = __netpoll_setup(np);
874
EXPORT_SYMBOL(netpoll_setup);
876
static int __init netpoll_init(void)
878
skb_queue_head_init(&skb_pool);
881
core_initcall(netpoll_init);
883
void __netpoll_cleanup(struct netpoll *np)
885
struct netpoll_info *npinfo;
888
npinfo = np->dev->npinfo;
892
if (!list_empty(&npinfo->rx_np)) {
893
spin_lock_irqsave(&npinfo->rx_lock, flags);
895
if (list_empty(&npinfo->rx_np))
896
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
897
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
900
if (atomic_dec_and_test(&npinfo->refcnt)) {
901
const struct net_device_ops *ops;
903
ops = np->dev->netdev_ops;
904
if (ops->ndo_netpoll_cleanup)
905
ops->ndo_netpoll_cleanup(np->dev);
907
RCU_INIT_POINTER(np->dev->npinfo, NULL);
909
/* avoid racing with NAPI reading npinfo */
910
synchronize_rcu_bh();
912
skb_queue_purge(&npinfo->arp_tx);
913
skb_queue_purge(&npinfo->txq);
914
cancel_delayed_work_sync(&npinfo->tx_work);
916
/* clean after last, unfinished work */
917
__skb_queue_purge(&npinfo->txq);
921
EXPORT_SYMBOL_GPL(__netpoll_cleanup);
923
void netpoll_cleanup(struct netpoll *np)
929
__netpoll_cleanup(np);
935
EXPORT_SYMBOL(netpoll_cleanup);
937
int netpoll_trap(void)
939
return atomic_read(&trapped);
941
EXPORT_SYMBOL(netpoll_trap);
943
void netpoll_set_trap(int trap)
946
atomic_inc(&trapped);
948
atomic_dec(&trapped);
950
EXPORT_SYMBOL(netpoll_set_trap);