2
* Primitive le diver made in 1999 from Linux sunlance.c.
3
* sunlance.c: Written 1995, 1996 by Miguel de Icaza <miguel@nuclecu.unam.mx>.
6
// #include <linux/errno.h>
9
/* #include <asm/byteorder.h> */ /* Used by the checksum routines */
10
// #include <psr.h> /* PSR_PIL */
11
#include <system.h> /* == <asm/system.h> */
12
#include <general.h> /* __P for netpriv.h */
13
#include <net.h> /* ETH_ALEN */
14
/* #include <asm/bitops.h> */
15
/* #include <asm/io.h> */
16
#include <dma.h> /* dmaga */
20
#include "phys_jj.h" /* hardcoded address */
26
struct sparc_dma_registers *regs;
28
enum dvma_rev revision;
31
/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
32
#ifndef LANCE_LOG_TX_BUFFERS
33
#define LANCE_LOG_TX_BUFFERS 4
34
#define LANCE_LOG_RX_BUFFERS 4
37
#define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
38
#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
45
#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
47
#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
48
#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
49
#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
50
#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
51
#define LE_C0_MERR 0x0800 /* ME: Memory error */
52
#define LE_C0_RINT 0x0400 /* Received interrupt */
53
#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
54
#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
55
#define LE_C0_INTR 0x0080 /* Interrupt or error */
56
#define LE_C0_INEA 0x0040 /* Interrupt enable */
57
#define LE_C0_RXON 0x0020 /* Receiver on */
58
#define LE_C0_TXON 0x0010 /* Transmitter on */
59
#define LE_C0_TDMD 0x0008 /* Transmitter demand */
60
#define LE_C0_STOP 0x0004 /* Stop the card */
61
#define LE_C0_STRT 0x0002 /* Start the card */
62
#define LE_C0_INIT 0x0001 /* Init the card */
64
#define LE_C3_BSWP 0x4 /* SWAP */
65
#define LE_C3_ACON 0x2 /* ALE Control */
66
#define LE_C3_BCON 0x1 /* Byte control */
68
/* Receive message descriptor 1 */
69
#define LE_R1_OWN 0x80 /* Who owns the entry */
70
#define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */
71
#define LE_R1_FRA 0x20 /* FRA: Frame error */
72
#define LE_R1_OFL 0x10 /* OFL: Frame overflow */
73
#define LE_R1_CRC 0x08 /* CRC error */
74
#define LE_R1_BUF 0x04 /* BUF: Buffer error */
75
#define LE_R1_SOP 0x02 /* Start of packet */
76
#define LE_R1_EOP 0x01 /* End of packet */
77
#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
79
#define LE_T1_OWN 0x80 /* Lance owns the packet */
80
#define LE_T1_ERR 0x40 /* Error summary */
81
#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */
82
#define LE_T1_EONE 0x08 /* Error: one retry needed */
83
#define LE_T1_EDEF 0x04 /* Error: deferred */
84
#define LE_T1_SOP 0x02 /* Start of packet */
85
#define LE_T1_EOP 0x01 /* End of packet */
86
#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
88
#define LE_T3_BUF 0x8000 /* Buffer error */
89
#define LE_T3_UFL 0x4000 /* Error underflow */
90
#define LE_T3_LCOL 0x1000 /* Error late collision */
91
#define LE_T3_CLOS 0x0800 /* Error carrier loss */
92
#define LE_T3_RTY 0x0400 /* Error retry */
93
#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
95
#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
96
#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
97
#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
99
#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
100
#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
101
#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
103
#define PKT_BUF_SZ 1544
104
#define RX_BUFF_SIZE PKT_BUF_SZ
105
#define TX_BUFF_SIZE PKT_BUF_SZ
107
struct lance_rx_desc {
108
unsigned short rmd0; /* low address of packet */
109
unsigned char rmd1_bits; /* descriptor bits */
110
unsigned char rmd1_hadr; /* high address of packet */
111
short length; /* This length is 2s complement (negative)!
114
unsigned short mblength; /* This is the actual number of bytes received */
117
struct lance_tx_desc {
118
unsigned short tmd0; /* low address of packet */
119
unsigned char tmd1_bits; /* descriptor bits */
120
unsigned char tmd1_hadr; /* high address of packet */
121
short length; /* Length is 2s complement (negative)! */
125
/* The LANCE initialization block, described in databook. */
126
/* On the Sparc, this block should be on a DMA region */
127
struct lance_init_block {
128
unsigned short mode; /* Pre-set mode (reg. 15) */
129
unsigned char phys_addr[6]; /* Physical ethernet address */
130
unsigned filter[2]; /* Multicast filter. */
132
/* Receive and transmit ring base, along with extra bits. */
133
unsigned short rx_ptr; /* receive descriptor addr */
134
unsigned short rx_len; /* receive len and high addr */
135
unsigned short tx_ptr; /* transmit descriptor addr */
136
unsigned short tx_len; /* transmit len and high addr */
138
/* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
139
struct lance_rx_desc brx_ring[RX_RING_SIZE];
140
struct lance_tx_desc btx_ring[TX_RING_SIZE];
142
char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
143
char pad[2]; /* align rx_buf for copy_and_sum(). */
144
char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
147
#define libdesc_offset(rt, elem) \
148
((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem])))))
150
#define libbuff_offset(rt, elem) \
151
((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0])))))
154
int active; /* initialized */
155
int inst; /* iface number */
157
volatile struct lance_regs *ll;
158
volatile struct lance_init_block *init_block;
159
__u32 init_block_dvma;
160
unsigned int irq; /* device IRQ number */
165
struct le_dma *ledma; /* If set this points to ledma */
166
/* and arch = sun4m */
167
int tpe; /* cable-selection is TPE */
168
int auto_select; /* cable-selection by carrier */
169
int burst_sizes; /* ledma SBus burst sizes */
171
unsigned short busmaster_regval;
172
unsigned short pio_buffer;
174
struct device edev; /* Inherit Ethernet */
178
#define TX_BUFFS_AVAIL(lp) ((lp->tx_old<=lp->tx_new)?\
179
lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
180
lp->tx_old - lp->tx_new-1)
182
/* On the sparc, the lance control ports are memory mapped */
184
unsigned short rdp; /* register data port */
185
unsigned short rap; /* register address port */
188
int sparc_lance_debug = 2;
190
/* The Lance uses 24 bit addresses */
191
/* On the Sun4c the DVMA will provide the remaining bytes for us */
192
/* On the Sun4m we have to instruct the ledma to provide them */
193
/* Even worse, on scsi/ether SBUS cards, the init block and the
194
* transmit/receive buffers are addresses as offsets from absolute
195
* zero on the lebuffer PIO area. -davem
198
#define LANCE_ADDR(x) ((long)(x) & ~0xff000000)
201
* XXX Turn this into PROM node when needed.
208
int tpe; /* 0 = auto; 1 = AUI; 2 = TPE; */
213
struct le_dma *ledma;
218
static struct le_private le_arena[1];
219
static struct le_info le_node0;
220
static struct le_dma ledma0;
222
/* Load the CSR registers */
223
static void load_csrs (struct le_private *lp)
225
volatile struct lance_regs *ll = lp->ll;
226
__u32 ib_dvma = lp->init_block_dvma;
229
/* This is right now because when we are using a PIO buffered
230
* init block, init_block_dvma is set to zero. -DaveM
232
leptr = LANCE_ADDR (ib_dvma);
235
ll->rdp = (leptr & 0xFFFF);
237
ll->rdp = leptr >> 16;
239
ll->rdp = lp->busmaster_regval;
241
/* Point back to csr0 */
247
/* Setup the Lance Rx and Tx rings */
248
/* Sets dev->tbusy */
249
static void lance_init_ring (struct device *dev)
251
struct le_private *lp = (struct le_private *) dev->priv;
252
volatile struct lance_init_block *ib = lp->init_block;
253
__u32 ib_dvma = lp->init_block_dvma;
254
__u32 aib; /* for LANCE_ADDR computations */
258
/* This is right now because when we are using a PIO buffered
259
* init block, init_block_dvma is set to zero. -DaveM
263
/* Lock out other processes while setting up hardware */
265
lp->rx_new = lp->tx_new = 0;
266
lp->rx_old = lp->tx_old = 0;
270
/* Copy the ethernet address to the lance init block
271
* Note that on the sparc you need to swap the ethernet address.
272
* Note also we want the CPU ptr of the init_block here.
274
ib->phys_addr [0] = dev->dev_addr [1];
275
ib->phys_addr [1] = dev->dev_addr [0];
276
ib->phys_addr [2] = dev->dev_addr [3];
277
ib->phys_addr [3] = dev->dev_addr [2];
278
ib->phys_addr [4] = dev->dev_addr [5];
279
ib->phys_addr [5] = dev->dev_addr [4];
282
printk ("TX rings:\n");
284
/* Setup the Tx ring entries */
285
for (i = 0; i <= TX_RING_SIZE; i++) {
286
leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i));
287
ib->btx_ring [i].tmd0 = leptr;
288
ib->btx_ring [i].tmd1_hadr = leptr >> 16;
289
ib->btx_ring [i].tmd1_bits = 0;
290
ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
291
ib->btx_ring [i].misc = 0;
293
if (ZERO) printk ("%d: 0x%8.8x\n", i, leptr);
296
/* Setup the Rx ring entries */
298
printk ("RX rings:\n");
299
for (i = 0; i < RX_RING_SIZE; i++) {
300
leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i));
302
ib->brx_ring [i].rmd0 = leptr;
303
ib->brx_ring [i].rmd1_hadr = leptr >> 16;
304
ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
305
ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
306
ib->brx_ring [i].mblength = 0;
308
printk ("%d: 0x%8.8x\n", i, leptr);
311
/* Setup the initialization block */
313
/* Setup rx descriptor pointer */
314
leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0));
315
ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
318
printk ("RX ptr: %8.8x\n", leptr);
320
/* Setup tx descriptor pointer */
321
leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0));
322
ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
325
printk ("TX ptr: %8.8x\n", leptr);
327
/* Clear the multicast filter */
332
static int init_restart_lance (struct le_private *lp)
334
volatile struct lance_regs *ll = lp->ll;
338
struct sparc_dma_registers *dregs = lp->ledma->regs;
341
if (!(dregs->cond_reg & DMA_HNDL_ERROR)) {
342
/* E-Cache draining */
343
while (dregs->cond_reg & DMA_FIFO_ISDRAIN)
347
creg = dregs->cond_reg;
348
if (lp->burst_sizes & DMA_BURST32)
349
creg |= DMA_E_BURST8;
351
creg &= ~DMA_E_BURST8;
353
creg |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV);
356
creg |= DMA_EN_ENETAUI;
358
creg &= ~DMA_EN_ENETAUI;
360
dregs->cond_reg = creg;
365
ll->rdp = LE_C0_INIT;
367
/* Wait for the lance to complete initialization */
368
for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
370
if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
371
printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp);
373
printk ("dcsr=%8.8x\n",
374
(unsigned int) lp->ledma->regs->cond_reg);
378
/* Clear IDON by writing a "1", enable interrupts and start lance */
379
ll->rdp = LE_C0_IDON;
380
ll->rdp = LE_C0_INEA | LE_C0_STRT;
383
lp->ledma->regs->cond_reg |= DMA_INT_ENAB;
388
static int lance_rx (struct device *dev)
390
struct le_private *lp = (struct le_private *) dev->priv;
391
volatile struct lance_init_block *ib = lp->init_block;
392
volatile struct lance_rx_desc *rd;
399
for (i = 0; i < RX_RING_SIZE; i++) {
402
ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
405
ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
410
for (rd = &ib->brx_ring [lp->rx_new];
411
!((bits = rd->rmd1_bits) & LE_R1_OWN);
412
rd = &ib->brx_ring [lp->rx_new]) {
414
/* We got an incomplete frame? */
415
if ((bits & LE_R1_POK) != LE_R1_POK) {
417
} else if (bits & LE_R1_ERR) {
418
/* Count only the end frame as a rx error,
422
if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
423
if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
424
if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
425
if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
426
if (bits & LE_R1_EOP) lp->stats.rx_errors++;
429
len = (rd->mblength & 0xfff) - 4;
430
skb = dev_alloc_skb (len+2);
433
printk ("%s: Memory squeeze, deferring packet.\n",
436
rd->rmd1_bits = LE_R1_OWN;
437
lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK;
442
skb_reserve (skb, 2); /* 16 byte align */
443
skb_put (skb, len); /* make room */
444
eth_copy_and_sum(skb,
445
(unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
447
skb->protocol = eth_type_trans (skb, dev);
451
/* Return the packet to the pool */
453
rd->rmd1_bits = LE_R1_OWN;
454
lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK;
459
static int lance_tx (struct device *dev)
461
struct le_private *lp = (struct le_private *) dev->priv;
462
volatile struct lance_init_block *ib = lp->init_block;
463
volatile struct lance_regs *ll = lp->ll;
464
volatile struct lance_tx_desc *td;
469
for (i = j; i != lp->tx_new; i = j) {
470
td = &ib->btx_ring [i];
472
/* If we hit a packet not owned by us, stop */
473
if (td->tmd1_bits & LE_T1_OWN)
476
if (td->tmd1_bits & LE_T1_ERR) {
479
if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++;
480
if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
482
if (status & LE_T3_CLOS) {
483
if (lp->auto_select) {
484
lp->tpe = 1 - lp->tpe;
485
printk("%s: Carrier Lost, trying %s\n",
486
dev->name, lp->tpe?"TPE":"AUI");
489
ll->rdp = LE_C0_STOP;
490
lance_init_ring (dev);
492
init_restart_lance (lp);
497
/* Buffer errors and underflows turn off the
498
* transmitter, restart the adapter.
500
if (status & (LE_T3_BUF|LE_T3_UFL)) {
501
printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
505
ll->rdp = LE_C0_STOP;
506
lance_init_ring (dev);
508
init_restart_lance (lp);
511
} else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
513
* So we don't count the packet more than once.
515
td->tmd1_bits &= ~(LE_T1_POK);
518
/* One collision before packet was sent. */
519
if (td->tmd1_bits & LE_T1_EONE)
520
lp->stats.collisions++;
522
/* More than one collision, be optimistic. */
523
if (td->tmd1_bits & LE_T1_EMORE)
524
lp->stats.collisions += 2;
526
lp->stats.tx_packets++;
530
j = (j + 1) & TX_RING_MOD_MASK;
536
static void lance_interrupt(void *dev_id)
538
struct device *dev = (struct device *)dev_id;
539
struct le_private *lp = (struct le_private *)dev->priv;
540
volatile struct lance_regs *ll = lp->ll;
544
printk ("%s: again", dev->name);
551
/* Acknowledge all the interrupt sources ASAP */
552
ll->rdp = csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT);
554
if ((csr0 & LE_C0_ERR)) {
555
/* Clear the error condition */
556
ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
557
LE_C0_CERR | LE_C0_MERR;
560
if (csr0 & LE_C0_RINT)
563
if (csr0 & LE_C0_TINT)
566
if ((TX_BUFFS_AVAIL(lp) >= 0) && dev->tbusy) {
572
if (csr0 & LE_C0_BABL)
573
lp->stats.tx_errors++;
575
if (csr0 & LE_C0_MISS)
576
lp->stats.rx_errors++;
579
if (csr0 & LE_C0_MERR) {
580
struct sparc_dma_registers *dregs = lp->ledma->regs;
581
unsigned long tst = (unsigned long)dregs->st_addr;
583
printk ("%s: Memory error, status %04x, addr %06lx\n",
584
dev->name, csr0, tst & 0xffffff);
586
ll->rdp = LE_C0_STOP;
589
lp->ledma->regs->cond_reg |= DMA_FIFO_INV;
591
lance_init_ring (dev);
593
init_restart_lance (lp);
597
ll->rdp = LE_C0_INEA;
601
static int lance_open (struct device *dev)
603
struct le_private *lp = (struct le_private *)dev->priv;
604
volatile struct lance_regs *ll = lp->ll;
607
if (request_irq(lp->irq, &lance_interrupt, (void *)dev)) {
608
printk ("Lance: Can't get irq %d\n", lp->irq);
614
ll->rdp = LE_C0_STOP;
616
/* On the 4m, setup the ledma to provide the upper bits for buffers */
618
lp->ledma->regs->dma_test = ((__u32) lp->init_block_dvma) & 0xff000000;
620
lance_init_ring (dev);
627
status = init_restart_lance (lp);
629
/* To emulate SunOS, we add a route to the local network */
631
dev->pa_addr & ip_get_mask (dev->pa_addr),
632
ip_get_mask (dev->pa_addr),
633
0, dev, dev->mtu, 0, 0);
636
if (!status && lp->auto_select) {
638
* Build a fake network packet and send it to ourselfs.
640
volatile struct lance_init_block *ib = lp->init_block;
641
volatile unsigned long flush;
642
unsigned char packet[ETH_ZLEN];
643
struct ethhdr *eth = (struct ethhdr *)packet;
646
memset(packet, 0, ETH_ZLEN);
647
for (i = 0; i < 6; i++) {
648
eth->h_dest[i] = dev->dev_addr[i];
649
eth->h_source[i] = dev->dev_addr[i];
652
entry = lp->tx_new & TX_RING_MOD_MASK;
653
ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000;
654
ib->btx_ring[entry].misc = 0;
656
bcopy(packet, (char *)&ib->tx_buf[entry][0], ETH_ZLEN);
657
ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
658
lp->tx_new = (lp->tx_new + 1) & TX_RING_MOD_MASK;
660
ll->rdp = LE_C0_INEA | LE_C0_TDMD;
668
static int lance_close (struct device *dev)
670
struct le_private *lp = (struct le_private *) dev->priv;
671
volatile struct lance_regs *ll = lp->ll;
678
ll->rdp = LE_C0_STOP;
680
free_irq (lp->irq, (void *) dev);
684
static inline int lance_reset (struct device *dev)
686
struct le_private *lp = (struct le_private *)dev->priv;
687
volatile struct lance_regs *ll = lp->ll;
692
ll->rdp = LE_C0_STOP;
694
/* On the 4m, reset the dma too */
696
printk ("resetting ledma\n");
697
lp->ledma->regs->cond_reg |= DMA_RST_ENET;
699
lp->ledma->regs->cond_reg &= ~DMA_RST_ENET;
700
lp->ledma->regs->dma_test = ((__u32) lp->init_block_dvma) & 0xff000000;
702
lance_init_ring (dev);
704
dev->trans_start = jiffies;
708
status = init_restart_lance (lp);
710
printk ("Lance restart=%d\n", status);
715
static int lance_start_xmit (struct sk_buff *skb, struct device *dev)
717
struct le_private *lp = (struct le_private *)dev->priv;
718
volatile struct lance_regs *ll = lp->ll;
719
volatile struct lance_init_block *ib = lp->init_block;
720
volatile unsigned long flush;
722
int entry, skblen, len;
726
/* Transmitter timeout, serious problems */
728
int tickssofar = jiffies - dev->trans_start;
730
if (tickssofar < 100) {
733
printk ("%s: transmit timed out, status %04x, reset\n",
741
/* Block a timer-based transmit from overlapping. */
742
if (test_and_set_bit (0, (void *) &dev->tbusy) != 0) {
743
printk ("Transmitter access conflict.\n");
747
if (dev->tbusy != 0) {
748
printk ("Transmitter access conflict.\n");
758
if (!TX_BUFFS_AVAIL(lp)) {
759
restore_flags(flags);
763
len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
765
entry = lp->tx_new & TX_RING_MOD_MASK;
766
ib->btx_ring [entry].length = (-len) | 0xf000;
767
ib->btx_ring [entry].misc = 0;
769
bcopy(skb->data, (char *)&ib->tx_buf [entry][0], skblen);
771
/* Clear the slack of the packet, do I need this? */
772
/* For a firewall its a good idea - AC */
774
bzero((char *) &ib->tx_buf [entry][skblen], len - skblen);
776
/* Now, give the packet to the lance */
777
ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
778
lp->tx_new = (lp->tx_new+1) & TX_RING_MOD_MASK;
781
/* Kick the lance: transmit now */
782
ll->rdp = LE_C0_INEA | LE_C0_TDMD;
783
dev->trans_start = jiffies;
786
if (TX_BUFFS_AVAIL(lp))
789
/* Read back CSR to invalidate the E-Cache.
790
* This is needed, because DMA_DSBL_WR_INV is set. */
794
restore_flags(flags);
799
le_init(struct le_private *le, struct le_info *info, int instance)
801
struct device *dev = &le->edev;
802
volatile struct lance_regs *ll;
808
le->lename[2] = instance + '0';
809
le->lename[3] = '\0';
811
printk ("%s: LANCE ", le->lename);
812
/* Copy the IDPROM ethernet address to the device structure, later we
813
* will copy the address in the device structure to the lance
814
* initialization block.
816
for (i = 0; i < 6; i++)
817
printk ("%x%c", dev->dev_addr[i] = idprom[i + 2],
821
/* Get the IO region */
822
ll = map_io(info->physaddr[1], sizeof (struct lance_regs));
823
if (ll == 0) return -1;
826
/* Make certain the data structures used by the LANCE are aligned. */
827
dev->priv = (void *)(((unsigned long)dev->priv + 7) & ~7);
831
lp->sbus = sdev->my_bus;
833
prom_apply_sbus_ranges (lebuffer->my_bus,
834
&lebuffer->reg_addrs [0],
835
lebuffer->num_registers,
838
le->init_block = (void *)
839
sparc_alloc_io (lebuffer->reg_addrs [0].phys_addr, 0,
840
sizeof (struct lance_init_block), "lebuffer",
841
lebuffer->reg_addrs [0].which_io, 0);
842
le->init_block_dvma = 0;
847
le->init_block = dvma_alloc(sizeof (struct lance_init_block),
848
&le->init_block_dvma);
853
le->busmaster_regval = (LE_C3_BSWP | LE_C3_ACON | LE_C3_BCON);
855
le->ledma = info->ledma;
856
le->irq = info->intr;
860
/* Find burst-size property for ledma */
861
le->burst_sizes = info->ledma->bursizes;
863
/* Get the cable-selection property */
864
if (info->tpe == 0) {
865
printk("le%d: using auto-carrier-detection.\n",
869
} else if (info->tpe == 1) {
875
printk("le%d: using TPE.\n", le->inst);
879
le->ledma->regs->cond_reg |= DMA_RST_ENET;
881
le->ledma->regs->cond_reg &= ~DMA_RST_ENET;
884
/* This should never happen. */
885
if ((unsigned long)(le->init_block->brx_ring) & 0x07) {
886
printk("%s: ERROR: Rx and Tx rings not on even boundary.\n",
891
dev->name = le->lename;
893
dev->open = &lance_open;
894
dev->stop = &lance_close;
895
dev->hard_start_xmit = &lance_start_xmit;
901
static int ledma_init(struct le_dma *ledma)
905
/* Hardcode everything for MrCoffee. */
906
if ((p = map_io(PHYS_JJ_LEDMA, 0x10)) == 0) {
907
printk("ledma_init: cannot map registers\n");
911
ledma->bursizes = 0x3F;
915
switch((ledma->regs->cond_reg)&DMA_DEVICE_ID) {
917
ledma->revision=dvmarev0;
918
printk("Revision 0 ");
921
ledma->revision=dvmaesc1;
922
printk("ESC Revision 1 ");
925
ledma->revision=dvmarev1;
926
printk("Revision 1 ");
929
ledma->revision=dvmarev2;
930
printk("Revision 2 ");
933
ledma->revision=dvmahme;
934
printk("HME DVMA gate array ");
937
ledma->revision=dvmarevplus;
938
printk("Revision 1 PLUS ");
941
printk("unknown dma version %x",
942
(ledma->regs->cond_reg)&DMA_DEVICE_ID);
943
/* ledma->allocated = 1; */
951
* Find all the lance cards on the system and initialize them
956
/* Hardcode everything for MrCoffee. */
957
le_node0.physaddr[0] = 0;
958
le_node0.physaddr[1] = PHYS_JJ_LE;
959
le_node0.physaddr[3] = 4;
961
le_node0.intr = 6; /* 0x26 - OBIO? */
963
if (le_arena[0].active) {
964
printk("le_probe: no free le\n");
968
if (ledma_init(&ledma0) != 0) {
971
le_node0.ledma = &ledma0;
973
if (le_init(&le_arena[0], &le_node0, 0) != 0) {
974
printk("le_probe: le0 init failed\n");
977
le_arena[0].active = 1;