2
* Copyright (C) 2012 Adrian Jamroz <adrian.jamroz@gmail.com>
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License as
6
* published by the Free Software Foundation; either version 2 of the
7
* License, or (at your option) any later version.
9
* This program is distributed in the hope that it will be useful, but
10
* WITHOUT ANY WARRANTY; without even the implied warranty of
11
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12
* General Public License for more details.
14
* You should have received a copy of the GNU General Public License
15
* along with this program; if not, write to the Free Software
16
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20
FILE_LICENCE ( GPL2_OR_LATER );
27
#include <ipxe/netdevice.h>
28
#include <ipxe/ethernet.h>
29
#include <ipxe/if_ether.h>
30
#include <ipxe/iobuf.h>
31
#include <ipxe/malloc.h>
38
* VIA Rhine network driver
42
/******************************************************************************
46
******************************************************************************
50
* Read from MII register
52
* @v mii MII interface
53
* @v reg Register address
54
* @ret value Data read, or negative error
56
static int rhine_mii_read ( struct mii_interface *mii, unsigned int reg ) {
57
struct rhine_nic *rhn = container_of ( mii, struct rhine_nic, mii );
58
unsigned int timeout = RHINE_TIMEOUT_US;
61
DBGC2 ( rhn, "RHINE %p MII read reg %d\n", rhn, reg );
64
writeb ( reg, rhn->regs + RHINE_MII_ADDR );
65
cr = readb ( rhn->regs + RHINE_MII_CR );
66
writeb ( ( cr | RHINE_MII_CR_RDEN ), rhn->regs + RHINE_MII_CR );
68
/* Wait for read to complete */
71
cr = readb ( rhn->regs + RHINE_MII_CR );
72
if ( ! ( cr & RHINE_MII_CR_RDEN ) )
73
return readw ( rhn->regs + RHINE_MII_RDWR );
76
DBGC ( rhn, "RHINE %p MII read timeout\n", rhn );
81
* Write to MII register
83
* @v mii MII interface
84
* @v reg Register address
85
* @v data Data to write
86
* @ret rc Return status code
88
static int rhine_mii_write ( struct mii_interface *mii, unsigned int reg,
90
struct rhine_nic *rhn = container_of ( mii, struct rhine_nic, mii );
91
unsigned int timeout = RHINE_TIMEOUT_US;
94
DBGC2 ( rhn, "RHINE %p MII write reg %d data 0x%04x\n",
98
writeb ( reg, rhn->regs + RHINE_MII_ADDR );
99
writew ( data, rhn->regs + RHINE_MII_RDWR );
100
cr = readb ( rhn->regs + RHINE_MII_CR );
101
writeb ( ( cr | RHINE_MII_CR_WREN ), rhn->regs + RHINE_MII_CR );
103
/* Wait for write to complete */
104
while ( timeout-- ) {
106
cr = readb ( rhn->regs + RHINE_MII_CR );
107
if ( ! ( cr & RHINE_MII_CR_WREN ) )
111
DBGC ( rhn, "RHINE %p MII write timeout\n", rhn );
115
/** Rhine MII operations */
116
static struct mii_operations rhine_mii_operations = {
117
.read = rhine_mii_read,
118
.write = rhine_mii_write,
122
* Enable auto-polling
124
* @v rhn Rhine device
125
* @ret rc Return status code
127
* This is voodoo. There seems to be no documentation on exactly what
128
* we are waiting for, or why we have to do anything other than simply
129
* turn the feature on.
131
static int rhine_mii_autopoll ( struct rhine_nic *rhn ) {
132
unsigned int timeout = RHINE_TIMEOUT_US;
135
/* Initiate auto-polling */
136
writeb ( MII_BMSR, rhn->regs + RHINE_MII_ADDR );
137
writeb ( RHINE_MII_CR_AUTOPOLL, rhn->regs + RHINE_MII_CR );
139
/* Wait for auto-polling to complete */
140
while ( timeout-- ) {
142
addr = readb ( rhn->regs + RHINE_MII_ADDR );
143
if ( ! ( addr & RHINE_MII_ADDR_MDONE ) ) {
144
writeb ( ( MII_BMSR | RHINE_MII_ADDR_MSRCEN ),
145
rhn->regs + RHINE_MII_ADDR );
150
DBGC ( rhn, "RHINE %p MII auto-poll timeout\n", rhn );
154
/******************************************************************************
158
******************************************************************************
164
* @v rhn Rhine device
165
* @ret rc Return status code
167
* We're using PIO because this might reset the MMIO enable bit.
169
static int rhine_reset ( struct rhine_nic *rhn ) {
170
unsigned int timeout = RHINE_TIMEOUT_US;
173
DBGC ( rhn, "RHINE %p reset\n", rhn );
176
outb ( RHINE_CR1_RESET, rhn->ioaddr + RHINE_CR1 );
178
/* Wait for reset to complete */
179
while ( timeout-- ) {
181
cr1 = inb ( rhn->ioaddr + RHINE_CR1 );
182
if ( ! ( cr1 & RHINE_CR1_RESET ) )
186
DBGC ( rhn, "RHINE %p reset timeout\n", rhn );
191
* Enable MMIO register access
193
* @v rhn Rhine device
194
* @v revision Card revision
196
static void rhine_enable_mmio ( struct rhine_nic *rhn, int revision ) {
199
if ( revision < RHINE_REVISION_OLD ) {
200
conf = inb ( rhn->ioaddr + RHINE_CHIPCFG_A );
201
outb ( ( conf | RHINE_CHIPCFG_A_MMIO ),
202
rhn->ioaddr + RHINE_CHIPCFG_A );
204
conf = inb ( rhn->ioaddr + RHINE_CHIPCFG_D );
205
outb ( ( conf | RHINE_CHIPCFG_D_MMIO ),
206
rhn->ioaddr + RHINE_CHIPCFG_D );
211
* Reload EEPROM contents
213
* @v rhn Rhine device
214
* @ret rc Return status code
216
* We're using PIO because this might reset the MMIO enable bit.
218
static int rhine_reload_eeprom ( struct rhine_nic *rhn ) {
219
unsigned int timeout = RHINE_TIMEOUT_US;
222
/* Initiate reload */
223
eeprom = inb ( rhn->ioaddr + RHINE_EEPROM_CTRL );
224
outb ( ( eeprom | RHINE_EEPROM_CTRL_RELOAD ),
225
rhn->ioaddr + RHINE_EEPROM_CTRL );
227
/* Wait for reload to complete */
228
while ( timeout-- ) {
230
eeprom = inb ( rhn->ioaddr + RHINE_EEPROM_CTRL );
231
if ( ! ( eeprom & RHINE_EEPROM_CTRL_RELOAD ) )
235
DBGC ( rhn, "RHINE %p EEPROM reload timeout\n", rhn );
239
/******************************************************************************
243
******************************************************************************
249
* @v netdev Network device
251
static void rhine_check_link ( struct net_device *netdev ) {
252
struct rhine_nic *rhn = netdev->priv;
255
/* Read MII status register */
256
mii_sr = readb ( rhn->regs + RHINE_MII_SR );
257
DBGC ( rhn, "RHINE %p link status %02x\n", rhn, mii_sr );
259
/* Report link state */
260
if ( ! ( mii_sr & RHINE_MII_SR_LINKPOLL ) ) {
261
netdev_link_up ( netdev );
262
} else if ( mii_sr & RHINE_MII_SR_PHYERR ) {
263
netdev_link_err ( netdev, -EIO );
265
netdev_link_down ( netdev );
269
/******************************************************************************
271
* Network device interface
273
******************************************************************************
277
* Create descriptor ring
279
* @v rhn Rhine device
280
* @v ring Descriptor ring
281
* @ret rc Return status code
283
static int rhine_create_ring ( struct rhine_nic *rhn,
284
struct rhine_ring *ring ) {
285
size_t len = ( ring->count * sizeof ( ring->desc[0] ) );
286
struct rhine_descriptor *next;
290
/* Allocate descriptors */
291
ring->desc = malloc_dma ( len, RHINE_RING_ALIGN );
295
/* Initialise descriptor ring */
296
memset ( ring->desc, 0, len );
297
for ( i = 0 ; i < ring->count ; i++ ) {
298
next = &ring->desc[ ( i + 1 ) % ring->count ];
299
ring->desc[i].next = cpu_to_le32 ( virt_to_bus ( next ) );
302
/* Program ring address */
303
address = virt_to_bus ( ring->desc );
304
writel ( address, rhn->regs + ring->reg );
306
DBGC ( rhn, "RHINE %p ring %02x is at [%08llx,%08llx)\n",
307
rhn, ring->reg, ( ( unsigned long long ) address ),
308
( ( unsigned long long ) address + len ) );
314
* Destroy descriptor ring
316
* @v rhn Rhine device
317
* @v ring Descriptor ring
319
static void rhine_destroy_ring ( struct rhine_nic *rhn,
320
struct rhine_ring *ring ) {
321
size_t len = ( ring->count * sizeof ( ring->desc[0] ) );
323
/* Clear ring address */
324
writel ( 0, rhn->regs + ring->reg );
326
/* Free descriptor ring */
327
free_dma ( ring->desc, len );
334
* Refill RX descriptor ring
336
* @v rhn Rhine device
338
static void rhine_refill_rx ( struct rhine_nic *rhn ) {
339
struct rhine_descriptor *desc;
340
struct io_buffer *iobuf;
344
while ( ( rhn->rx.prod - rhn->rx.cons ) < RHINE_RXDESC_NUM ) {
346
/* Allocate I/O buffer */
347
iobuf = alloc_iob ( RHINE_RX_MAX_LEN );
349
/* Wait for next refill */
353
/* Populate next receive descriptor */
354
rx_idx = ( rhn->rx.prod++ % RHINE_RXDESC_NUM );
355
desc = &rhn->rx.desc[rx_idx];
356
address = virt_to_bus ( iobuf->data );
357
desc->buffer = cpu_to_le32 ( address );
359
cpu_to_le32 ( RHINE_DES1_SIZE ( RHINE_RX_MAX_LEN - 1) |
360
RHINE_DES1_CHAIN | RHINE_DES1_IC );
362
desc->des0 = cpu_to_le32 ( RHINE_DES0_OWN );
364
/* Record I/O buffer */
365
rhn->rx_iobuf[rx_idx] = iobuf;
367
DBGC2 ( rhn, "RHINE %p RX %d is [%llx,%llx)\n", rhn, rx_idx,
368
( ( unsigned long long ) address ),
369
( ( unsigned long long ) address + RHINE_RX_MAX_LEN ) );
374
* Open network device
376
* @v netdev Network device
377
* @ret rc Return status code
379
static int rhine_open ( struct net_device *netdev ) {
380
struct rhine_nic *rhn = netdev->priv;
383
/* Create transmit ring */
384
if ( ( rc = rhine_create_ring ( rhn, &rhn->tx ) ) != 0 )
387
/* Create receive ring */
388
if ( ( rc = rhine_create_ring ( rhn, &rhn->rx ) ) != 0 )
391
/* Set receive configuration */
392
writeb ( ( RHINE_RCR_PHYS_ACCEPT | RHINE_RCR_BCAST_ACCEPT |
393
RHINE_RCR_RUNT_ACCEPT ), rhn->regs + RHINE_RCR );
395
/* Enable link status monitoring */
396
if ( ( rc = rhine_mii_autopoll ( rhn ) ) != 0 )
397
goto err_mii_autopoll;
399
/* Some cards need an extra delay(observed with VT6102) */
402
/* Enable RX/TX of packets */
403
writeb ( ( RHINE_CR0_STARTNIC | RHINE_CR0_RXEN | RHINE_CR0_TXEN ),
404
rhn->regs + RHINE_CR0 );
406
/* Enable auto polling and full duplex operation */
407
rhn->cr1 = RHINE_CR1_FDX;
408
writeb ( rhn->cr1, rhn->regs + RHINE_CR1 );
411
rhine_refill_rx ( rhn );
413
/* Update link state */
414
rhine_check_link ( netdev );
419
rhine_destroy_ring ( rhn, &rhn->rx );
421
rhine_destroy_ring ( rhn, &rhn->tx );
427
* Close network device
429
* @v netdev Network device
431
static void rhine_close ( struct net_device *netdev ) {
432
struct rhine_nic *rhn = netdev->priv;
435
/* Disable interrupts */
436
writeb ( 0, RHINE_IMR0 );
437
writeb ( 0, RHINE_IMR1 );
439
/* Stop card, clear RXON and TXON bits */
440
writeb ( RHINE_CR0_STOPNIC, rhn->regs + RHINE_CR0 );
442
/* Destroy receive ring */
443
rhine_destroy_ring ( rhn, &rhn->rx );
445
/* Discard any unused receive buffers */
446
for ( i = 0 ; i < RHINE_RXDESC_NUM ; i++ ) {
447
if ( rhn->rx_iobuf[i] )
448
free_iob ( rhn->rx_iobuf[i] );
449
rhn->rx_iobuf[i] = NULL;
452
/* Destroy transmit ring */
453
rhine_destroy_ring ( rhn, &rhn->tx );
459
* @v netdev Network device
460
* @v iobuf I/O buffer
461
* @ret rc Return status code
463
static int rhine_transmit ( struct net_device *netdev,
464
struct io_buffer *iobuf ) {
465
struct rhine_nic *rhn = netdev->priv;
466
struct rhine_descriptor *desc;
470
/* Get next transmit descriptor */
471
if ( ( rhn->tx.prod - rhn->tx.cons ) >= RHINE_TXDESC_NUM )
473
tx_idx = ( rhn->tx.prod++ % RHINE_TXDESC_NUM );
474
desc = &rhn->tx.desc[tx_idx];
476
/* Pad and align packet */
477
iob_pad ( iobuf, ETH_ZLEN );
478
address = virt_to_bus ( iobuf->data );
480
/* Populate transmit descriptor */
481
desc->buffer = cpu_to_le32 ( address );
482
desc->des1 = cpu_to_le32 ( RHINE_DES1_IC | RHINE_TDES1_STP |
483
RHINE_TDES1_EDP | RHINE_DES1_CHAIN |
484
RHINE_DES1_SIZE ( iob_len ( iobuf ) ) );
486
desc->des0 = cpu_to_le32 ( RHINE_DES0_OWN );
489
/* Notify card that there are packets ready to transmit */
490
writeb ( ( rhn->cr1 | RHINE_CR1_TXPOLL ), rhn->regs + RHINE_CR1 );
492
DBGC2 ( rhn, "RHINE %p TX %d is [%llx,%llx)\n", rhn, tx_idx,
493
( ( unsigned long long ) address ),
494
( ( unsigned long long ) address + iob_len ( iobuf ) ) );
500
* Poll for completed packets
502
* @v netdev Network device
504
static void rhine_poll_tx ( struct net_device *netdev ) {
505
struct rhine_nic *rhn = netdev->priv;
506
struct rhine_descriptor *desc;
510
/* Check for completed packets */
511
while ( rhn->tx.cons != rhn->tx.prod ) {
513
/* Get next transmit descriptor */
514
tx_idx = ( rhn->tx.cons % RHINE_TXDESC_NUM );
515
desc = &rhn->tx.desc[tx_idx];
517
/* Stop if descriptor is still in use */
518
if ( desc->des0 & cpu_to_le32 ( RHINE_DES0_OWN ) )
521
/* Complete TX descriptor */
522
des0 = le32_to_cpu ( desc->des0 );
523
if ( des0 & RHINE_TDES0_TERR ) {
524
DBGC ( rhn, "RHINE %p TX %d error (DES0 %08x)\n",
526
netdev_tx_complete_next_err ( netdev, -EIO );
528
DBGC2 ( rhn, "RHINE %p TX %d complete\n", rhn, tx_idx );
529
netdev_tx_complete_next ( netdev );
536
* Poll for received packets
538
* @v netdev Network device
540
static void rhine_poll_rx ( struct net_device *netdev ) {
541
struct rhine_nic *rhn = netdev->priv;
542
struct rhine_descriptor *desc;
543
struct io_buffer *iobuf;
548
/* Check for received packets */
549
while ( rhn->rx.cons != rhn->rx.prod ) {
551
/* Get next receive descriptor */
552
rx_idx = ( rhn->rx.cons % RHINE_RXDESC_NUM );
553
desc = &rhn->rx.desc[rx_idx];
555
/* Stop if descriptor is still in use */
556
if ( desc->des0 & cpu_to_le32 ( RHINE_DES0_OWN ) )
559
/* Populate I/O buffer */
560
iobuf = rhn->rx_iobuf[rx_idx];
561
rhn->rx_iobuf[rx_idx] = NULL;
562
des0 = le32_to_cpu ( desc->des0 );
563
len = ( RHINE_DES0_GETSIZE ( des0 ) - 4 /* strip CRC */ );
564
iob_put ( iobuf, len );
566
/* Hand off to network stack */
567
if ( des0 & RHINE_RDES0_RXOK ) {
568
DBGC2 ( rhn, "RHINE %p RX %d complete (length %zd)\n",
570
netdev_rx ( netdev, iobuf );
572
DBGC ( rhn, "RHINE %p RX %d error (length %zd, DES0 "
573
"%08x)\n", rhn, rx_idx, len, des0 );
574
netdev_rx_err ( netdev, iobuf, -EIO );
581
* Poll for completed and received packets
583
* @v netdev Network device
585
static void rhine_poll ( struct net_device *netdev ) {
586
struct rhine_nic *rhn = netdev->priv;
590
/* Read and acknowledge interrupts */
591
isr0 = readb ( rhn->regs + RHINE_ISR0 );
592
isr1 = readb ( rhn->regs + RHINE_ISR1 );
594
writeb ( isr0, rhn->regs + RHINE_ISR0 );
596
writeb ( isr1, rhn->regs + RHINE_ISR1 );
598
/* Report unexpected errors */
599
if ( ( isr0 & ( RHINE_ISR0_MIBOVFL | RHINE_ISR0_PCIERR |
600
RHINE_ISR0_RXRINGERR | RHINE_ISR0_TXRINGERR ) ) ||
601
( isr1 & ( RHINE_ISR1_GPI | RHINE_ISR1_TXABORT |
602
RHINE_ISR1_RXFIFOOVFL | RHINE_ISR1_RXFIFOUNFL |
603
RHINE_ISR1_TXFIFOUNFL ) ) ) {
604
DBGC ( rhn, "RHINE %p unexpected ISR0 %02x ISR1 %02x\n",
606
/* Report as a TX error */
607
netdev_tx_err ( netdev, NULL, -EIO );
610
/* Poll for TX completions, if applicable */
611
if ( isr0 & ( RHINE_ISR0_TXDONE | RHINE_ISR0_TXERR ) )
612
rhine_poll_tx ( netdev );
614
/* Poll for RX completions, if applicable */
615
if ( isr0 & ( RHINE_ISR0_RXDONE | RHINE_ISR0_RXERR ) )
616
rhine_poll_rx ( netdev );
618
/* Handle RX buffer exhaustion */
619
if ( isr1 & RHINE_ISR1_RXNOBUF ) {
620
rhine_poll_rx ( netdev );
621
netdev_rx_err ( netdev, NULL, -ENOBUFS );
624
/* Check link state, if applicable */
625
if ( isr1 & RHINE_ISR1_PORTSTATE )
626
rhine_check_link ( netdev );
629
rhine_refill_rx ( rhn );
633
* Enable or disable interrupts
635
* @v netdev Network device
636
* @v enable Interrupts should be enabled
638
static void rhine_irq ( struct net_device *netdev, int enable ) {
639
struct rhine_nic *nic = netdev->priv;
642
/* Enable interrupts */
643
writeb ( 0xff, nic->regs + RHINE_IMR0 );
644
writeb ( 0xff, nic->regs + RHINE_IMR1 );
646
/* Disable interrupts */
647
writeb ( 0, nic->regs + RHINE_IMR0 );
648
writeb ( 0, nic->regs + RHINE_IMR1 );
652
/** Rhine network device operations */
653
static struct net_device_operations rhine_operations = {
655
.close = rhine_close,
656
.transmit = rhine_transmit,
661
/******************************************************************************
665
******************************************************************************
672
* @ret rc Return status code
674
static int rhine_probe ( struct pci_device *pci ) {
675
struct net_device *netdev;
676
struct rhine_nic *rhn;
681
/* Allocate and initialise net device */
682
netdev = alloc_etherdev ( sizeof ( *rhn ) );
687
netdev_init ( netdev, &rhine_operations );
689
pci_set_drvdata ( pci, netdev );
690
netdev->dev = &pci->dev;
691
memset ( rhn, 0, sizeof ( *rhn ) );
692
rhine_init_ring ( &rhn->tx, RHINE_TXDESC_NUM, RHINE_TXQUEUE_BASE );
693
rhine_init_ring ( &rhn->rx, RHINE_RXDESC_NUM, RHINE_RXQUEUE_BASE );
695
/* Fix up PCI device */
696
adjust_pci_device ( pci );
699
rhn->regs = ioremap ( pci->membase, RHINE_BAR_SIZE );
700
rhn->ioaddr = pci->ioaddr;
701
DBGC ( rhn, "RHINE %p regs at %08lx, I/O at %04lx\n", rhn,
702
pci->membase, pci->ioaddr );
705
if ( ( rc = rhine_reset ( rhn ) ) != 0 )
709
if ( ( rc = rhine_reload_eeprom ( rhn ) ) != 0 )
710
goto err_reload_eeprom;
712
/* Read card revision and enable MMIO */
713
pci_read_config_byte ( pci, PCI_REVISION, &revision );
714
DBGC ( rhn, "RHINE %p revision %#02x detected\n", rhn, revision );
715
rhine_enable_mmio ( rhn, revision );
717
/* Read MAC address */
718
for ( i = 0 ; i < ETH_ALEN ; i++ )
719
netdev->hw_addr[i] = readb ( rhn->regs + RHINE_MAC + i );
721
/* Initialise and reset MII interface */
722
mii_init ( &rhn->mii, &rhine_mii_operations );
723
if ( ( rc = mii_reset ( &rhn->mii ) ) != 0 ) {
724
DBGC ( rhn, "RHINE %p could not reset MII: %s\n",
725
rhn, strerror ( rc ) );
728
DBGC ( rhn, "RHINE PHY vendor %04x device %04x\n",
729
rhine_mii_read ( &rhn->mii, 0x02 ),
730
rhine_mii_read ( &rhn->mii, 0x03 ) );
732
/* Register network device */
733
if ( ( rc = register_netdev ( netdev ) ) != 0 )
734
goto err_register_netdev;
736
/* Set initial link state */
737
rhine_check_link ( netdev );
746
netdev_nullify ( netdev );
747
netdev_put ( netdev );
757
static void rhine_remove ( struct pci_device *pci ) {
758
struct net_device *netdev = pci_get_drvdata ( pci );
759
struct rhine_nic *nic = netdev->priv;
761
/* Unregister network device */
762
unregister_netdev ( netdev );
767
/* Free network device */
768
netdev_nullify ( netdev );
769
netdev_put ( netdev );
772
/** Rhine PCI device IDs */
773
static struct pci_device_id rhine_nics[] = {
774
PCI_ROM ( 0x1106, 0x3065, "dlink-530tx", "VIA VT6102", 0 ),
775
PCI_ROM ( 0x1106, 0x3106, "vt6105", "VIA VT6105", 0 ),
776
PCI_ROM ( 0x1106, 0x3043, "dlink-530tx-old", "VIA VT3043", 0 ),
777
PCI_ROM ( 0x1106, 0x3053, "vt6105m", "VIA VT6105M", 0 ),
778
PCI_ROM ( 0x1106, 0x6100, "via-rhine-old", "VIA 86C100A", 0 )
781
/** Rhine PCI driver */
782
struct pci_driver rhine_driver __pci_driver = {
784
.id_count = ( sizeof ( rhine_nics ) / sizeof ( rhine_nics[0] ) ),
785
.probe = rhine_probe,
786
.remove = rhine_remove,