~ubuntu-branches/ubuntu/utopic/xen/utopic

« back to all changes in this revision

Viewing changes to xen/drivers/net/tulip/interrupt.c

  • Committer: Bazaar Package Importer
  • Author(s): Bastian Blank
  • Date: 2010-05-06 15:47:38 UTC
  • mto: (1.3.1) (15.1.1 sid) (4.1.1 experimental)
  • mto: This revision was merged to the branch mainline in revision 3.
  • Revision ID: james.westby@ubuntu.com-20100506154738-agoz0rlafrh1fnq7
Tags: upstream-4.0.0
ImportĀ upstreamĀ versionĀ 4.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/*
2
 
        drivers/net/tulip/interrupt.c
3
 
 
4
 
        Maintained by Jeff Garzik <jgarzik@pobox.com>
5
 
        Copyright 2000,2001  The Linux Kernel Team
6
 
        Written/copyright 1994-2001 by Donald Becker.
7
 
 
8
 
        This software may be used and distributed according to the terms
9
 
        of the GNU General Public License, incorporated herein by reference.
10
 
 
11
 
        Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
12
 
        for more information on this driver, or visit the project
13
 
        Web page at http://sourceforge.net/projects/tulip/
14
 
 
15
 
*/
16
 
 
17
 
#include "tulip.h"
18
 
#include <linux/config.h>
19
 
#include <linux/etherdevice.h>
20
 
#include <linux/pci.h>
21
 
 
22
 
 
23
 
int tulip_rx_copybreak;
24
 
unsigned int tulip_max_interrupt_work;
25
 
 
26
 
#ifdef CONFIG_NET_HW_FLOWCONTROL
27
 
 
28
 
#define MIT_SIZE 15
29
 
unsigned int mit_table[MIT_SIZE+1] =
30
 
{
31
 
        /*  CRS11 21143 hardware Mitigation Control Interrupt
32
 
            We use only RX mitigation we other techniques for
33
 
            TX intr. mitigation.
34
 
 
35
 
           31    Cycle Size (timer control)
36
 
           30:27 TX timer in 16 * Cycle size
37
 
           26:24 TX No pkts before Int.
38
 
           23:20 RX timer in Cycle size
39
 
           19:17 RX No pkts before Int.
40
 
           16       Continues Mode (CM)
41
 
        */
42
 
 
43
 
        0x0,             /* IM disabled */
44
 
        0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
45
 
        0x80150000,
46
 
        0x80270000,
47
 
        0x80370000,
48
 
        0x80490000,
49
 
        0x80590000,
50
 
        0x80690000,
51
 
        0x807B0000,
52
 
        0x808B0000,
53
 
        0x809D0000,
54
 
        0x80AD0000,
55
 
        0x80BD0000,
56
 
        0x80CF0000,
57
 
        0x80DF0000,
58
 
//       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
59
 
        0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
60
 
};
61
 
#endif
62
 
 
63
 
 
64
 
int tulip_refill_rx(struct net_device *dev)
65
 
{
66
 
        struct tulip_private *tp = (struct tulip_private *)dev->priv;
67
 
        int entry;
68
 
        int refilled = 0;
69
 
 
70
 
        /* Refill the Rx ring buffers. */
71
 
        for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
72
 
                entry = tp->dirty_rx % RX_RING_SIZE;
73
 
                if (tp->rx_buffers[entry].skb == NULL) {
74
 
                        struct sk_buff *skb;
75
 
                        dma_addr_t mapping;
76
 
 
77
 
                        skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
78
 
                        if (skb == NULL)
79
 
                                break;
80
 
 
81
 
                        mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
82
 
                                                 PCI_DMA_FROMDEVICE);
83
 
                        tp->rx_buffers[entry].mapping = mapping;
84
 
 
85
 
                        skb->dev = dev;                 /* Mark as being used by this device. */
86
 
                        tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
87
 
                        refilled++;
88
 
                }
89
 
                tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
90
 
        }
91
 
        if(tp->chip_id == LC82C168) {
92
 
                if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
93
 
                        /* Rx stopped due to out of buffers,
94
 
                         * restart it
95
 
                         */
96
 
                        outl(0x01, dev->base_addr + CSR2);
97
 
                }
98
 
        }
99
 
        return refilled;
100
 
}
101
 
 
102
 
 
103
 
static int tulip_rx(struct net_device *dev)
104
 
{
105
 
        struct tulip_private *tp = (struct tulip_private *)dev->priv;
106
 
        int entry = tp->cur_rx % RX_RING_SIZE;
107
 
        int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
108
 
        int received = 0;
109
 
 
110
 
#ifdef CONFIG_NET_HW_FLOWCONTROL
111
 
        int drop = 0, mit_sel = 0;
112
 
 
113
 
/* that one buffer is needed for mit activation; or might be a
114
 
   bug in the ring buffer code; check later -- JHS*/
115
 
 
116
 
        if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
117
 
#endif
118
 
 
119
 
        if (tulip_debug > 4)
120
 
                printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
121
 
                           tp->rx_ring[entry].status);
122
 
        /* If we own the next entry, it is a new packet. Send it up. */
123
 
        while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
124
 
                s32 status = le32_to_cpu(tp->rx_ring[entry].status);
125
 
 
126
 
                if (tulip_debug > 5)
127
 
                        printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
128
 
                                   dev->name, entry, status);
129
 
                if (--rx_work_limit < 0)
130
 
                        break;
131
 
                if ((status & 0x38008300) != 0x0300) {
132
 
                        if ((status & 0x38000300) != 0x0300) {
133
 
                                /* Ingore earlier buffers. */
134
 
                                if ((status & 0xffff) != 0x7fff) {
135
 
                                        if (tulip_debug > 1)
136
 
                                                printk(KERN_WARNING "%s: Oversized Ethernet frame "
137
 
                                                           "spanned multiple buffers, status %8.8x!\n",
138
 
                                                           dev->name, status);
139
 
                                        tp->stats.rx_length_errors++;
140
 
                                }
141
 
                        } else if (status & RxDescFatalErr) {
142
 
                                /* There was a fatal error. */
143
 
                                if (tulip_debug > 2)
144
 
                                        printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
145
 
                                                   dev->name, status);
146
 
                                tp->stats.rx_errors++; /* end of a packet.*/
147
 
                                if (status & 0x0890) tp->stats.rx_length_errors++;
148
 
                                if (status & 0x0004) tp->stats.rx_frame_errors++;
149
 
                                if (status & 0x0002) tp->stats.rx_crc_errors++;
150
 
                                if (status & 0x0001) tp->stats.rx_fifo_errors++;
151
 
                        }
152
 
                } else {
153
 
                        /* Omit the four octet CRC from the length. */
154
 
                        short pkt_len = ((status >> 16) & 0x7ff) - 4;
155
 
                        struct sk_buff *skb;
156
 
 
157
 
#ifndef final_version
158
 
                        if (pkt_len > 1518) {
159
 
                                printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
160
 
                                           dev->name, pkt_len, pkt_len);
161
 
                                pkt_len = 1518;
162
 
                                tp->stats.rx_length_errors++;
163
 
                        }
164
 
#endif
165
 
 
166
 
#ifdef CONFIG_NET_HW_FLOWCONTROL
167
 
                        drop = atomic_read(&netdev_dropping);
168
 
                        if (drop)
169
 
                                goto throttle;
170
 
#endif
171
 
#ifdef COPYBREAK
172
 
                        /* Check if the packet is long enough to accept without copying
173
 
                           to a minimally-sized skbuff. */
174
 
                        if (pkt_len < tulip_rx_copybreak
175
 
                                && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
176
 
                                skb->dev = dev;
177
 
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
178
 
                                pci_dma_sync_single(tp->pdev,
179
 
                                                    tp->rx_buffers[entry].mapping,
180
 
                                                    pkt_len, PCI_DMA_FROMDEVICE);
181
 
#if ! defined(__alpha__)
182
 
                                eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
183
 
                                                 pkt_len, 0);
184
 
                                skb_put(skb, pkt_len);
185
 
#else
186
 
                                memcpy(skb_put(skb, pkt_len),
187
 
                                       tp->rx_buffers[entry].skb->tail,
188
 
                                       pkt_len);
189
 
#endif
190
 
                        } else {        /* Pass up the skb already on the Rx ring. */
191
 
#else
192
 
                        {
193
 
#endif /* COPYBREAK */
194
 
                                char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
195
 
                                                     pkt_len);
196
 
 
197
 
#ifndef final_version
198
 
                                if (tp->rx_buffers[entry].mapping !=
199
 
                                    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
200
 
                                        printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
201
 
                                               "do not match in tulip_rx: %08x vs. %08x %p / %p.\n",
202
 
                                               dev->name,
203
 
                                               le32_to_cpu(tp->rx_ring[entry].buffer1),
204
 
                                               tp->rx_buffers[entry].mapping,
205
 
                                               skb->head, temp);
206
 
                                }
207
 
#endif
208
 
 
209
 
                                pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
210
 
                                                 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
211
 
 
212
 
                                tp->rx_buffers[entry].skb = NULL;
213
 
                                tp->rx_buffers[entry].mapping = 0;
214
 
                        }
215
 
                        skb->protocol = eth_type_trans(skb, dev);
216
 
#ifdef CONFIG_NET_HW_FLOWCONTROL
217
 
                        mit_sel =
218
 
#endif
219
 
                        netif_rx(skb);
220
 
 
221
 
#ifdef CONFIG_NET_HW_FLOWCONTROL
222
 
                        switch (mit_sel) {
223
 
                        case NET_RX_SUCCESS:
224
 
                        case NET_RX_CN_LOW:
225
 
                        case NET_RX_CN_MOD:
226
 
                                break;
227
 
 
228
 
                        case NET_RX_CN_HIGH:
229
 
                                rx_work_limit -= NET_RX_CN_HIGH; /* additional*/
230
 
                                break;
231
 
                        case NET_RX_DROP:
232
 
                                rx_work_limit = -1;
233
 
                                break;
234
 
                        default:
235
 
                                printk("unknown feedback return code %d\n", mit_sel);
236
 
                                break;
237
 
                        }
238
 
 
239
 
                        drop = atomic_read(&netdev_dropping);
240
 
                        if (drop) {
241
 
throttle:
242
 
                                rx_work_limit = -1;
243
 
                                mit_sel = NET_RX_DROP;
244
 
 
245
 
                                if (tp->fc_bit) {
246
 
                                        long ioaddr = dev->base_addr;
247
 
 
248
 
                                        /* disable Rx & RxNoBuf ints. */
249
 
                                        outl(tulip_tbl[tp->chip_id].valid_intrs&RX_A_NBF_STOP, ioaddr + CSR7);
250
 
                                        set_bit(tp->fc_bit, &netdev_fc_xoff);
251
 
                                }
252
 
                        }
253
 
#endif
254
 
                        dev->last_rx = jiffies;
255
 
                        tp->stats.rx_packets++;
256
 
                        tp->stats.rx_bytes += pkt_len;
257
 
                }
258
 
                received++;
259
 
                entry = (++tp->cur_rx) % RX_RING_SIZE;
260
 
        }
261
 
#ifdef CONFIG_NET_HW_FLOWCONTROL
262
 
 
263
 
        /* We use this simplistic scheme for IM. It's proven by
264
 
           real life installations. We can have IM enabled
265
 
           continuesly but this would cause unnecessary latency.
266
 
           Unfortunely we can't use all the NET_RX_* feedback here.
267
 
           This would turn on IM for devices that is not contributing
268
 
           to backlog congestion with unnecessary latency.
269
 
 
270
 
           We monitor the device RX-ring and have:
271
 
 
272
 
           HW Interrupt Mitigation either ON or OFF.
273
 
 
274
 
           ON:  More then 1 pkt received (per intr.) OR we are dropping
275
 
           OFF: Only 1 pkt received
276
 
 
277
 
           Note. We only use min and max (0, 15) settings from mit_table */
278
 
 
279
 
 
280
 
        if( tp->flags &  HAS_INTR_MITIGATION) {
281
 
                if((received > 1 || mit_sel == NET_RX_DROP)
282
 
                   && tp->mit_sel != 15 ) {
283
 
                        tp->mit_sel = 15;
284
 
                        tp->mit_change = 1; /* Force IM change */
285
 
                }
286
 
                if((received <= 1 && mit_sel != NET_RX_DROP) && tp->mit_sel != 0 ) {
287
 
                        tp->mit_sel = 0;
288
 
                        tp->mit_change = 1; /* Force IM change */
289
 
                }
290
 
        }
291
 
 
292
 
        return RX_RING_SIZE+1; /* maxrx+1 */
293
 
#else
294
 
        return received;
295
 
#endif
296
 
}
297
 
 
298
 
static inline void phy_interrupt (struct net_device *dev)
299
 
{
300
 
#ifdef __hppa__
301
 
        int csr12 = inl(dev->base_addr + CSR12) & 0xff;
302
 
        struct tulip_private *tp = (struct tulip_private *)dev->priv;
303
 
 
304
 
        if (csr12 != tp->csr12_shadow) {
305
 
                /* ack interrupt */
306
 
                outl(csr12 | 0x02, dev->base_addr + CSR12);
307
 
                tp->csr12_shadow = csr12;
308
 
                /* do link change stuff */
309
 
                spin_lock(&tp->lock);
310
 
                tulip_check_duplex(dev);
311
 
                spin_unlock(&tp->lock);
312
 
                /* clear irq ack bit */
313
 
                outl(csr12 & ~0x02, dev->base_addr + CSR12);
314
 
        }
315
 
#endif
316
 
}
317
 
 
318
 
/* The interrupt handler does all of the Rx thread work and cleans up
319
 
   after the Tx thread. */
320
 
void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
321
 
{
322
 
        struct net_device *dev = (struct net_device *)dev_instance;
323
 
        struct tulip_private *tp = (struct tulip_private *)dev->priv;
324
 
        long ioaddr = dev->base_addr;
325
 
        int csr5;
326
 
        int entry;
327
 
        int missed;
328
 
        int rx = 0;
329
 
        int tx = 0;
330
 
        int oi = 0;
331
 
        int maxrx = RX_RING_SIZE;
332
 
        int maxtx = TX_RING_SIZE;
333
 
        int maxoi = TX_RING_SIZE;
334
 
        unsigned int work_count = tulip_max_interrupt_work;
335
 
 
336
 
        /* Let's see whether the interrupt really is for us */
337
 
        csr5 = inl(ioaddr + CSR5);
338
 
 
339
 
        if (tp->flags & HAS_PHY_IRQ)
340
 
                phy_interrupt (dev);
341
 
    
342
 
        if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
343
 
                return;
344
 
 
345
 
        tp->nir++;
346
 
 
347
 
        do {
348
 
                /* Acknowledge all of the current interrupt sources ASAP. */
349
 
                outl(csr5 & 0x0001ffff, ioaddr + CSR5);
350
 
 
351
 
                if (tulip_debug > 4)
352
 
                        printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
353
 
                                   dev->name, csr5, inl(dev->base_addr + CSR5));
354
 
 
355
 
                if (csr5 & (RxIntr | RxNoBuf)) {
356
 
#ifdef CONFIG_NET_HW_FLOWCONTROL
357
 
                        if ((!tp->fc_bit) ||
358
 
                            (!test_bit(tp->fc_bit, &netdev_fc_xoff)))
359
 
#endif
360
 
                                rx += tulip_rx(dev);
361
 
                        tulip_refill_rx(dev);
362
 
                }
363
 
 
364
 
                if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
365
 
                        unsigned int dirty_tx;
366
 
 
367
 
                        spin_lock(&tp->lock);
368
 
 
369
 
                        for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
370
 
                                 dirty_tx++) {
371
 
                                int entry = dirty_tx % TX_RING_SIZE;
372
 
                                int status = le32_to_cpu(tp->tx_ring[entry].status);
373
 
 
374
 
                                if (status < 0)
375
 
                                        break;                  /* It still has not been Txed */
376
 
 
377
 
                                /* Check for Rx filter setup frames. */
378
 
                                if (tp->tx_buffers[entry].skb == NULL) {
379
 
                                        /* test because dummy frames not mapped */
380
 
                                        if (tp->tx_buffers[entry].mapping)
381
 
                                                pci_unmap_single(tp->pdev,
382
 
                                                         tp->tx_buffers[entry].mapping,
383
 
                                                         sizeof(tp->setup_frame),
384
 
                                                         PCI_DMA_TODEVICE);
385
 
                                        continue;
386
 
                                }
387
 
 
388
 
                                if (status & 0x8000) {
389
 
                                        /* There was an major error, log it. */
390
 
#ifndef final_version
391
 
                                        if (tulip_debug > 1)
392
 
                                                printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
393
 
                                                           dev->name, status);
394
 
#endif
395
 
                                        tp->stats.tx_errors++;
396
 
                                        if (status & 0x4104) tp->stats.tx_aborted_errors++;
397
 
                                        if (status & 0x0C00) tp->stats.tx_carrier_errors++;
398
 
                                        if (status & 0x0200) tp->stats.tx_window_errors++;
399
 
                                        if (status & 0x0002) tp->stats.tx_fifo_errors++;
400
 
                                        if ((status & 0x0080) && tp->full_duplex == 0)
401
 
                                                tp->stats.tx_heartbeat_errors++;
402
 
                                } else {
403
 
                                        tp->stats.tx_bytes +=
404
 
                                                tp->tx_buffers[entry].skb->len;
405
 
                                        tp->stats.collisions += (status >> 3) & 15;
406
 
                                        tp->stats.tx_packets++;
407
 
                                }
408
 
 
409
 
                                pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
410
 
                                                 tp->tx_buffers[entry].skb->len,
411
 
                                                 PCI_DMA_TODEVICE);
412
 
 
413
 
                                /* Free the original skb. */
414
 
                                dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
415
 
                                tp->tx_buffers[entry].skb = NULL;
416
 
                                tp->tx_buffers[entry].mapping = 0;
417
 
                                tx++;
418
 
                        }
419
 
 
420
 
#ifndef final_version
421
 
                        if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
422
 
                                printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
423
 
                                           dev->name, dirty_tx, tp->cur_tx);
424
 
                                dirty_tx += TX_RING_SIZE;
425
 
                        }
426
 
#endif
427
 
 
428
 
                        if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
429
 
                                netif_wake_queue(dev);
430
 
 
431
 
                        tp->dirty_tx = dirty_tx;
432
 
                        if (csr5 & TxDied) {
433
 
                                if (tulip_debug > 2)
434
 
                                        printk(KERN_WARNING "%s: The transmitter stopped."
435
 
                                                   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
436
 
                                                   dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
437
 
                                tulip_restart_rxtx(tp);
438
 
                        }
439
 
                        spin_unlock(&tp->lock);
440
 
                }
441
 
 
442
 
                /* Log errors. */
443
 
                if (csr5 & AbnormalIntr) {      /* Abnormal error summary bit. */
444
 
                        if (csr5 == 0xffffffff)
445
 
                                break;
446
 
                        if (csr5 & TxJabber) tp->stats.tx_errors++;
447
 
                        if (csr5 & TxFIFOUnderflow) {
448
 
                                if ((tp->csr6 & 0xC000) != 0xC000)
449
 
                                        tp->csr6 += 0x4000;     /* Bump up the Tx threshold */
450
 
                                else
451
 
                                        tp->csr6 |= 0x00200000;  /* Store-n-forward. */
452
 
                                /* Restart the transmit process. */
453
 
                                tulip_restart_rxtx(tp);
454
 
                                outl(0, ioaddr + CSR1);
455
 
                        }
456
 
                        if (csr5 & (RxDied | RxNoBuf)) {
457
 
                                if (tp->flags & COMET_MAC_ADDR) {
458
 
                                        outl(tp->mc_filter[0], ioaddr + 0xAC);
459
 
                                        outl(tp->mc_filter[1], ioaddr + 0xB0);
460
 
                                }
461
 
                        }
462
 
                        if (csr5 & RxDied) {            /* Missed a Rx frame. */
463
 
                                tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
464
 
#ifdef CONFIG_NET_HW_FLOWCONTROL
465
 
                                if (tp->fc_bit && !test_bit(tp->fc_bit, &netdev_fc_xoff)) {
466
 
                                        tp->stats.rx_errors++;
467
 
                                        tulip_start_rxtx(tp);
468
 
                                }
469
 
#else
470
 
                                tp->stats.rx_errors++;
471
 
                                tulip_start_rxtx(tp);
472
 
#endif
473
 
                        }
474
 
                        /*
475
 
                         * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
476
 
                         * call is ever done under the spinlock
477
 
                         */
478
 
                        if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
479
 
                                if (tp->link_change)
480
 
                                        (tp->link_change)(dev, csr5);
481
 
                        }
482
 
                        if (csr5 & SytemError) {
483
 
                                int error = (csr5 >> 23) & 7;
484
 
                                /* oops, we hit a PCI error.  The code produced corresponds
485
 
                                 * to the reason:
486
 
                                 *  0 - parity error
487
 
                                 *  1 - master abort
488
 
                                 *  2 - target abort
489
 
                                 * Note that on parity error, we should do a software reset
490
 
                                 * of the chip to get it back into a sane state (according
491
 
                                 * to the 21142/3 docs that is).
492
 
                                 *   -- rmk
493
 
                                 */
494
 
                                printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n",
495
 
                                        dev->name, tp->nir, error);
496
 
                        }
497
 
                        /* Clear all error sources, included undocumented ones! */
498
 
                        outl(0x0800f7ba, ioaddr + CSR5);
499
 
                        oi++;
500
 
                }
501
 
                if (csr5 & TimerInt) {
502
 
 
503
 
                        if (tulip_debug > 2)
504
 
                                printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
505
 
                                           dev->name, csr5);
506
 
#ifdef CONFIG_NET_HW_FLOWCONTROL
507
 
                        if (tp->fc_bit && (test_bit(tp->fc_bit, &netdev_fc_xoff)))
508
 
                          if (net_ratelimit()) printk("BUG!! enabling interupt when FC off (timerintr.) \n");
509
 
#endif
510
 
                        outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
511
 
                        tp->ttimer = 0;
512
 
                        oi++;
513
 
                }
514
 
                if (tx > maxtx || rx > maxrx || oi > maxoi) {
515
 
                        if (tulip_debug > 1)
516
 
                                printk(KERN_WARNING "%s: Too much work during an interrupt, "
517
 
                                           "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
518
 
 
519
 
                       /* Acknowledge all interrupt sources. */
520
 
                        outl(0x8001ffff, ioaddr + CSR5);
521
 
                        if (tp->flags & HAS_INTR_MITIGATION) {
522
 
#ifdef CONFIG_NET_HW_FLOWCONTROL
523
 
                                if(tp->mit_change) {
524
 
                                        outl(mit_table[tp->mit_sel], ioaddr + CSR11);
525
 
                                        tp->mit_change = 0;
526
 
                                }
527
 
#else
528
 
                     /* Josip Loncaric at ICASE did extensive experimentation
529
 
                        to develop a good interrupt mitigation setting.*/
530
 
                                outl(0x8b240000, ioaddr + CSR11);
531
 
#endif
532
 
                        } else if (tp->chip_id == LC82C168) {
533
 
                                /* the LC82C168 doesn't have a hw timer.*/
534
 
                                outl(0x00, ioaddr + CSR7);
535
 
                                mod_timer(&tp->timer, RUN_AT(HZ/50));
536
 
                        } else {
537
 
                          /* Mask all interrupting sources, set timer to
538
 
                                re-enable. */
539
 
#ifndef CONFIG_NET_HW_FLOWCONTROL
540
 
                                outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
541
 
                                outl(0x0012, ioaddr + CSR11);
542
 
#endif
543
 
                        }
544
 
                        break;
545
 
                }
546
 
 
547
 
                work_count--;
548
 
                if (work_count == 0)
549
 
                        break;
550
 
 
551
 
                csr5 = inl(ioaddr + CSR5);
552
 
        } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
553
 
 
554
 
        tulip_refill_rx(dev);
555
 
 
556
 
        /* check if the card is in suspend mode */
557
 
        entry = tp->dirty_rx % RX_RING_SIZE;
558
 
        if (tp->rx_buffers[entry].skb == NULL) {
559
 
                if (tulip_debug > 1)
560
 
                        printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
561
 
                if (tp->chip_id == LC82C168) {
562
 
                        outl(0x00, ioaddr + CSR7);
563
 
                        mod_timer(&tp->timer, RUN_AT(HZ/50));
564
 
                } else {
565
 
                        if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
566
 
                                if (tulip_debug > 1)
567
 
                                        printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
568
 
                                outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
569
 
                                        ioaddr + CSR7);
570
 
                                outl(TimerInt, ioaddr + CSR5);
571
 
                                outl(12, ioaddr + CSR11);
572
 
                                tp->ttimer = 1;
573
 
                        }
574
 
                }
575
 
        }
576
 
 
577
 
        if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
578
 
                tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
579
 
        }
580
 
 
581
 
        if (tulip_debug > 4)
582
 
                printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
583
 
                           dev->name, inl(ioaddr + CSR5));
584
 
 
585
 
}