2
* ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
4
* Copyright (C) 2003, 2007 IC Plus Corp
9
* Sundance Technology, Inc.
11
* craig_rich@sundanceti.com
16
* http://www.icplus.com.tw
17
* sorbica@icplus.com.tw
20
* http://www.icplus.com.tw
24
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
#include <linux/crc32.h>
27
#include <linux/ethtool.h>
28
#include <linux/interrupt.h>
29
#include <linux/gfp.h>
30
#include <linux/mii.h>
31
#include <linux/mutex.h>
33
#include <asm/div64.h>
35
#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
36
#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
37
#define IPG_RESET_MASK \
38
(IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
39
IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
42
#define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg))
43
#define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg))
44
#define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg))
46
#define ipg_r32(reg) ioread32(ioaddr + (reg))
47
#define ipg_r16(reg) ioread16(ioaddr + (reg))
48
#define ipg_r8(reg) ioread8(ioaddr + (reg))
55
#define DRV_NAME "ipg"
57
MODULE_AUTHOR("IC Plus Corp. 2003");
58
MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
59
MODULE_LICENSE("GPL");
64
#define IPG_MAX_RXFRAME_SIZE 0x0600
65
#define IPG_RXFRAG_SIZE 0x0600
66
#define IPG_RXSUPPORT_SIZE 0x0600
67
#define IPG_IS_JUMBO false
70
* Variable record -- index by leading revision/length
71
* Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
73
static const unsigned short DefaultPhyParam[] = {
74
/* 11/12/03 IP1000A v1-3 rev=0x40 */
75
/*--------------------------------------------------------------------------
76
(0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
77
27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
78
31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
79
--------------------------------------------------------------------------*/
80
/* 12/17/03 IP1000A v1-4 rev=0x40 */
81
(0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
83
30, 0x005e, 9, 0x0700,
84
/* 01/09/04 IP1000A v1-5 rev=0x41 */
85
(0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
87
30, 0x005e, 9, 0x0700,
91
static const char * const ipg_brand_name[] = {
92
"IC PLUS IP1000 1000/100/10 based NIC",
93
"Sundance Technology ST2021 based NIC",
94
"Tamarack Microelectronics TC9020/9021 based NIC",
98
static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
99
{ PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
100
{ PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
101
{ PCI_VDEVICE(DLINK, 0x9021), 2 },
102
{ PCI_VDEVICE(DLINK, 0x4020), 3 },
106
MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
108
static inline void __iomem *ipg_ioaddr(struct net_device *dev)
110
struct ipg_nic_private *sp = netdev_priv(dev);
115
static void ipg_dump_rfdlist(struct net_device *dev)
117
struct ipg_nic_private *sp = netdev_priv(dev);
118
void __iomem *ioaddr = sp->ioaddr;
122
IPG_DEBUG_MSG("_dump_rfdlist\n");
124
netdev_info(dev, "rx_current = %02x\n", sp->rx_current);
125
netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty);
126
netdev_info(dev, "RFDList start address = %016lx\n",
127
(unsigned long)sp->rxd_map);
128
netdev_info(dev, "RFDListPtr register = %08x%08x\n",
129
ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
131
for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
132
offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
133
netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n",
134
i, offset, (unsigned long)sp->rxd[i].next_desc);
135
offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
136
netdev_info(dev, "%02x %04x RFS = %016lx\n",
137
i, offset, (unsigned long)sp->rxd[i].rfs);
138
offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
139
netdev_info(dev, "%02x %04x frag_info = %016lx\n",
140
i, offset, (unsigned long)sp->rxd[i].frag_info);
144
static void ipg_dump_tfdlist(struct net_device *dev)
146
struct ipg_nic_private *sp = netdev_priv(dev);
147
void __iomem *ioaddr = sp->ioaddr;
151
IPG_DEBUG_MSG("_dump_tfdlist\n");
153
netdev_info(dev, "tx_current = %02x\n", sp->tx_current);
154
netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty);
155
netdev_info(dev, "TFDList start address = %016lx\n",
156
(unsigned long) sp->txd_map);
157
netdev_info(dev, "TFDListPtr register = %08x%08x\n",
158
ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
160
for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
161
offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
162
netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n",
163
i, offset, (unsigned long)sp->txd[i].next_desc);
165
offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
166
netdev_info(dev, "%02x %04x TFC = %016lx\n",
167
i, offset, (unsigned long) sp->txd[i].tfc);
168
offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
169
netdev_info(dev, "%02x %04x frag_info = %016lx\n",
170
i, offset, (unsigned long) sp->txd[i].frag_info);
175
static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
177
ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL);
178
ndelay(IPG_PC_PHYCTRLWAIT_NS);
181
static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
183
ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
184
ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
187
static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
189
phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
191
ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
194
static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
196
ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR |
197
phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
200
static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity)
204
ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
206
bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
208
ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
214
* Read a register from the Physical Layer device located
215
* on the IPG NIC, using the IPG PHYCTRL register.
217
static int mdio_read(struct net_device *dev, int phy_id, int phy_reg)
219
void __iomem *ioaddr = ipg_ioaddr(dev);
221
* The GMII mangement frame structure for a read is as follows:
223
* |Preamble|st|op|phyad|regad|ta| data |idle|
224
* |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
226
* <32 1s> = 32 consecutive logic 1 values
227
* A = bit of Physical Layer device address (MSB first)
228
* R = bit of register address (MSB first)
229
* z = High impedance state
230
* D = bit of read data (MSB first)
232
* Transmission order is 'Preamble' field first, bits transmitted
233
* left to right (first to last).
239
{ GMII_PREAMBLE, 32 }, /* Preamble */
240
{ GMII_ST, 2 }, /* ST */
241
{ GMII_READ, 2 }, /* OP */
242
{ phy_id, 5 }, /* PHYAD */
243
{ phy_reg, 5 }, /* REGAD */
244
{ 0x0000, 2 }, /* TA */
245
{ 0x0000, 16 }, /* DATA */
246
{ 0x0000, 1 } /* IDLE */
251
polarity = ipg_r8(PHY_CTRL);
252
polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
254
/* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
255
for (j = 0; j < 5; j++) {
256
for (i = 0; i < p[j].len; i++) {
257
/* For each variable length field, the MSB must be
258
* transmitted first. Rotate through the field bits,
259
* starting with the MSB, and move each bit into the
260
* the 1st (2^1) bit position (this is the bit position
261
* corresponding to the MgmtData bit of the PhyCtrl
262
* register for the IPG).
266
* First write a '0' to bit 1 of the PhyCtrl
267
* register, then write a '1' to bit 1 of the
270
* To do this, right shift the MSB of ST by the value:
271
* [field length - 1 - #ST bits already written]
272
* then left shift this result by 1.
274
data = (p[j].field >> (p[j].len - 1 - i)) << 1;
275
data &= IPG_PC_MGMTDATA;
276
data |= polarity | IPG_PC_MGMTDIR;
278
ipg_drive_phy_ctl_low_high(ioaddr, data);
282
send_three_state(ioaddr, polarity);
284
read_phy_bit(ioaddr, polarity);
287
* For a read cycle, the bits for the next two fields (TA and
288
* DATA) are driven by the PHY (the IPG reads these bits).
290
for (i = 0; i < p[6].len; i++) {
292
(read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
295
send_three_state(ioaddr, polarity);
296
send_three_state(ioaddr, polarity);
297
send_three_state(ioaddr, polarity);
298
send_end(ioaddr, polarity);
300
/* Return the value of the DATA field. */
305
* Write to a register from the Physical Layer device located
306
* on the IPG NIC, using the IPG PHYCTRL register.
308
static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
310
void __iomem *ioaddr = ipg_ioaddr(dev);
312
* The GMII mangement frame structure for a read is as follows:
314
* |Preamble|st|op|phyad|regad|ta| data |idle|
315
* |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
317
* <32 1s> = 32 consecutive logic 1 values
318
* A = bit of Physical Layer device address (MSB first)
319
* R = bit of register address (MSB first)
320
* z = High impedance state
321
* D = bit of write data (MSB first)
323
* Transmission order is 'Preamble' field first, bits transmitted
324
* left to right (first to last).
330
{ GMII_PREAMBLE, 32 }, /* Preamble */
331
{ GMII_ST, 2 }, /* ST */
332
{ GMII_WRITE, 2 }, /* OP */
333
{ phy_id, 5 }, /* PHYAD */
334
{ phy_reg, 5 }, /* REGAD */
335
{ 0x0002, 2 }, /* TA */
336
{ val & 0xffff, 16 }, /* DATA */
337
{ 0x0000, 1 } /* IDLE */
342
polarity = ipg_r8(PHY_CTRL);
343
polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
345
/* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
346
for (j = 0; j < 7; j++) {
347
for (i = 0; i < p[j].len; i++) {
348
/* For each variable length field, the MSB must be
349
* transmitted first. Rotate through the field bits,
350
* starting with the MSB, and move each bit into the
351
* the 1st (2^1) bit position (this is the bit position
352
* corresponding to the MgmtData bit of the PhyCtrl
353
* register for the IPG).
357
* First write a '0' to bit 1 of the PhyCtrl
358
* register, then write a '1' to bit 1 of the
361
* To do this, right shift the MSB of ST by the value:
362
* [field length - 1 - #ST bits already written]
363
* then left shift this result by 1.
365
data = (p[j].field >> (p[j].len - 1 - i)) << 1;
366
data &= IPG_PC_MGMTDATA;
367
data |= polarity | IPG_PC_MGMTDIR;
369
ipg_drive_phy_ctl_low_high(ioaddr, data);
373
/* The last cycle is a tri-state, so read from the PHY. */
374
for (j = 7; j < 8; j++) {
375
for (i = 0; i < p[j].len; i++) {
376
ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
378
p[j].field |= ((ipg_r8(PHY_CTRL) &
379
IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
381
ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
386
static void ipg_set_led_mode(struct net_device *dev)
388
struct ipg_nic_private *sp = netdev_priv(dev);
389
void __iomem *ioaddr = sp->ioaddr;
392
mode = ipg_r32(ASIC_CTRL);
393
mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
395
if ((sp->led_mode & 0x03) > 1)
396
mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */
398
if ((sp->led_mode & 0x01) == 1)
399
mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */
401
if ((sp->led_mode & 0x08) == 8)
402
mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */
404
ipg_w32(mode, ASIC_CTRL);
407
static void ipg_set_phy_set(struct net_device *dev)
409
struct ipg_nic_private *sp = netdev_priv(dev);
410
void __iomem *ioaddr = sp->ioaddr;
413
physet = ipg_r8(PHY_SET);
414
physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET);
415
physet |= ((sp->led_mode & 0x70) >> 4);
416
ipg_w8(physet, PHY_SET);
419
static int ipg_reset(struct net_device *dev, u32 resetflags)
421
/* Assert functional resets via the IPG AsicCtrl
422
* register as specified by the 'resetflags' input
425
void __iomem *ioaddr = ipg_ioaddr(dev);
426
unsigned int timeout_count = 0;
428
IPG_DEBUG_MSG("_reset\n");
430
ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
432
/* Delay added to account for problem with 10Mbps reset. */
433
mdelay(IPG_AC_RESETWAIT);
435
while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
436
mdelay(IPG_AC_RESETWAIT);
437
if (++timeout_count > IPG_AC_RESET_TIMEOUT)
440
/* Set LED Mode in Asic Control */
441
ipg_set_led_mode(dev);
443
/* Set PHYSet Register Value */
444
ipg_set_phy_set(dev);
448
/* Find the GMII PHY address. */
449
static int ipg_find_phyaddr(struct net_device *dev)
451
unsigned int phyaddr, i;
453
for (i = 0; i < 32; i++) {
456
/* Search for the correct PHY address among 32 possible. */
457
phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
459
/* 10/22/03 Grace change verify from GMII_PHY_STATUS to
463
status = mdio_read(dev, phyaddr, MII_BMSR);
465
if ((status != 0xFFFF) && (status != 0))
473
* Configure IPG based on result of IEEE 802.3 PHY
476
static int ipg_config_autoneg(struct net_device *dev)
478
struct ipg_nic_private *sp = netdev_priv(dev);
479
void __iomem *ioaddr = sp->ioaddr;
480
unsigned int txflowcontrol;
481
unsigned int rxflowcontrol;
482
unsigned int fullduplex;
491
IPG_DEBUG_MSG("_config_autoneg\n");
493
asicctrl = ipg_r32(ASIC_CTRL);
494
phyctrl = ipg_r8(PHY_CTRL);
495
mac_ctrl_val = ipg_r32(MAC_CTRL);
497
/* Set flags for use in resolving auto-negotiation, assuming
498
* non-1000Mbps, half duplex, no flow control.
504
/* To accommodate a problem in 10Mbps operation,
505
* set a global flag if PHY running in 10Mbps mode.
509
/* Determine actual speed of operation. */
510
switch (phyctrl & IPG_PC_LINK_SPEED) {
511
case IPG_PC_LINK_SPEED_10MBPS:
515
case IPG_PC_LINK_SPEED_100MBPS:
518
case IPG_PC_LINK_SPEED_1000MBPS:
522
speed = "undefined!";
526
netdev_info(dev, "Link speed = %s\n", speed);
527
if (sp->tenmbpsmode == 1)
528
netdev_info(dev, "10Mbps operational mode enabled\n");
530
if (phyctrl & IPG_PC_DUPLEX_STATUS) {
536
/* Configure full duplex, and flow control. */
537
if (fullduplex == 1) {
539
/* Configure IPG for full duplex operation. */
543
mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
545
if (txflowcontrol == 1) {
547
mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
550
mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
553
if (rxflowcontrol == 1) {
555
mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
558
mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
564
mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD &
565
~IPG_MC_TX_FLOW_CONTROL_ENABLE &
566
~IPG_MC_RX_FLOW_CONTROL_ENABLE);
569
netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n",
570
duplex, tx_desc, rx_desc);
571
ipg_w32(mac_ctrl_val, MAC_CTRL);
576
/* Determine and configure multicast operation and set
577
* receive mode for IPG.
579
static void ipg_nic_set_multicast_list(struct net_device *dev)
581
void __iomem *ioaddr = ipg_ioaddr(dev);
582
struct netdev_hw_addr *ha;
583
unsigned int hashindex;
587
IPG_DEBUG_MSG("_nic_set_multicast_list\n");
589
receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;
591
if (dev->flags & IFF_PROMISC) {
592
/* NIC to be configured in promiscuous mode. */
593
receivemode = IPG_RM_RECEIVEALLFRAMES;
594
} else if ((dev->flags & IFF_ALLMULTI) ||
595
((dev->flags & IFF_MULTICAST) &&
596
(netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
597
/* NIC to be configured to receive all multicast
599
receivemode |= IPG_RM_RECEIVEMULTICAST;
600
} else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
601
/* NIC to be configured to receive selected
602
* multicast addresses. */
603
receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
606
/* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
607
* The IPG applies a cyclic-redundancy-check (the same CRC
608
* used to calculate the frame data FCS) to the destination
609
* address all incoming multicast frames whose destination
610
* address has the multicast bit set. The least significant
611
* 6 bits of the CRC result are used as an addressing index
612
* into the hash table. If the value of the bit addressed by
613
* this index is a 1, the frame is passed to the host system.
616
/* Clear hashtable. */
617
hashtable[0] = 0x00000000;
618
hashtable[1] = 0x00000000;
620
/* Cycle through all multicast addresses to filter. */
621
netdev_for_each_mc_addr(ha, dev) {
622
/* Calculate CRC result for each multicast address. */
623
hashindex = crc32_le(0xffffffff, ha->addr,
626
/* Use only the least significant 6 bits. */
627
hashindex = hashindex & 0x3F;
629
/* Within "hashtable", set bit number "hashindex"
632
set_bit(hashindex, (void *)hashtable);
635
/* Write the value of the hashtable, to the 4, 16 bit
636
* HASHTABLE IPG registers.
638
ipg_w32(hashtable[0], HASHTABLE_0);
639
ipg_w32(hashtable[1], HASHTABLE_1);
641
ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
643
IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
646
static int ipg_io_config(struct net_device *dev)
648
struct ipg_nic_private *sp = netdev_priv(dev);
649
void __iomem *ioaddr = ipg_ioaddr(dev);
653
IPG_DEBUG_MSG("_io_config\n");
655
origmacctrl = ipg_r32(MAC_CTRL);
657
restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
659
/* Based on compilation option, determine if FCS is to be
660
* stripped on receive frames by IPG.
662
if (!IPG_STRIP_FCS_ON_RX)
663
restoremacctrl |= IPG_MC_RCV_FCS;
665
/* Determine if transmitter and/or receiver are
666
* enabled so we may restore MACCTRL correctly.
668
if (origmacctrl & IPG_MC_TX_ENABLED)
669
restoremacctrl |= IPG_MC_TX_ENABLE;
671
if (origmacctrl & IPG_MC_RX_ENABLED)
672
restoremacctrl |= IPG_MC_RX_ENABLE;
674
/* Transmitter and receiver must be disabled before setting
677
ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
678
IPG_MC_RSVD_MASK, MAC_CTRL);
680
/* Now that transmitter and receiver are disabled, write
683
ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);
685
/* Set RECEIVEMODE register. */
686
ipg_nic_set_multicast_list(dev);
688
ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE);
690
ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD);
691
ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);
692
ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH);
693
ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD);
694
ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);
695
ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH);
696
ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |
697
IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |
698
IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |
699
IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);
700
ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH);
701
ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);
703
/* IPG multi-frag frame bug workaround.
704
* Per silicon revision B3 eratta.
706
ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
708
/* IPG TX poll now bug workaround.
709
* Per silicon revision B3 eratta.
711
ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
713
/* IPG RX poll now bug workaround.
714
* Per silicon revision B3 eratta.
716
ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
718
/* Now restore MACCTRL to original setting. */
719
ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
721
/* Disable unused RMON statistics. */
722
ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);
724
/* Disable unused MIB statistics. */
725
ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |
726
IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |
727
IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |
728
IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |
729
IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |
730
IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);
736
* Create a receive buffer within system memory and update
737
* NIC private structure appropriately.
739
static int ipg_get_rxbuff(struct net_device *dev, int entry)
741
struct ipg_nic_private *sp = netdev_priv(dev);
742
struct ipg_rx *rxfd = sp->rxd + entry;
746
IPG_DEBUG_MSG("_get_rxbuff\n");
748
skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
750
sp->rx_buff[entry] = NULL;
754
/* Associate the receive buffer with the IPG NIC. */
757
/* Save the address of the sk_buff structure. */
758
sp->rx_buff[entry] = skb;
760
rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
761
sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
763
/* Set the RFD fragment length. */
764
rxfragsize = sp->rxfrag_size;
765
rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
770
static int init_rfdlist(struct net_device *dev)
772
struct ipg_nic_private *sp = netdev_priv(dev);
773
void __iomem *ioaddr = sp->ioaddr;
776
IPG_DEBUG_MSG("_init_rfdlist\n");
778
for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
779
struct ipg_rx *rxfd = sp->rxd + i;
781
if (sp->rx_buff[i]) {
782
pci_unmap_single(sp->pdev,
783
le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
784
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
785
dev_kfree_skb_irq(sp->rx_buff[i]);
786
sp->rx_buff[i] = NULL;
789
/* Clear out the RFS field. */
790
rxfd->rfs = 0x0000000000000000;
792
if (ipg_get_rxbuff(dev, i) < 0) {
794
* A receive buffer was not ready, break the
797
IPG_DEBUG_MSG("Cannot allocate Rx buffer\n");
799
/* Just in case we cannot allocate a single RFD.
803
netdev_err(dev, "No memory available for RFD list\n");
808
rxfd->next_desc = cpu_to_le64(sp->rxd_map +
809
sizeof(struct ipg_rx)*(i + 1));
811
sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
816
/* Write the location of the RFDList to the IPG. */
817
ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
818
ipg_w32(0x00000000, RFD_LIST_PTR_1);
823
static void init_tfdlist(struct net_device *dev)
825
struct ipg_nic_private *sp = netdev_priv(dev);
826
void __iomem *ioaddr = sp->ioaddr;
829
IPG_DEBUG_MSG("_init_tfdlist\n");
831
for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
832
struct ipg_tx *txfd = sp->txd + i;
834
txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
836
if (sp->tx_buff[i]) {
837
dev_kfree_skb_irq(sp->tx_buff[i]);
838
sp->tx_buff[i] = NULL;
841
txfd->next_desc = cpu_to_le64(sp->txd_map +
842
sizeof(struct ipg_tx)*(i + 1));
844
sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
849
/* Write the location of the TFDList to the IPG. */
850
IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n",
852
ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
853
ipg_w32(0x00000000, TFD_LIST_PTR_1);
855
sp->reset_current_tfd = 1;
859
* Free all transmit buffers which have already been transferred
860
* via DMA to the IPG.
862
static void ipg_nic_txfree(struct net_device *dev)
864
struct ipg_nic_private *sp = netdev_priv(dev);
865
unsigned int released, pending, dirty;
867
IPG_DEBUG_MSG("_nic_txfree\n");
869
pending = sp->tx_current - sp->tx_dirty;
870
dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
872
for (released = 0; released < pending; released++) {
873
struct sk_buff *skb = sp->tx_buff[dirty];
874
struct ipg_tx *txfd = sp->txd + dirty;
876
IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc);
878
/* Look at each TFD's TFC field beginning
879
* at the last freed TFD up to the current TFD.
880
* If the TFDDone bit is set, free the associated
883
if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
886
/* Free the transmit buffer. */
888
pci_unmap_single(sp->pdev,
889
le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
890
skb->len, PCI_DMA_TODEVICE);
892
dev_kfree_skb_irq(skb);
894
sp->tx_buff[dirty] = NULL;
896
dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
899
sp->tx_dirty += released;
901
if (netif_queue_stopped(dev) &&
902
(sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
903
netif_wake_queue(dev);
907
static void ipg_tx_timeout(struct net_device *dev)
909
struct ipg_nic_private *sp = netdev_priv(dev);
910
void __iomem *ioaddr = sp->ioaddr;
912
ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
915
spin_lock_irq(&sp->lock);
917
/* Re-configure after DMA reset. */
918
if (ipg_io_config(dev) < 0)
919
netdev_info(dev, "Error during re-configuration\n");
923
spin_unlock_irq(&sp->lock);
925
ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,
930
* For TxComplete interrupts, free all transmit
931
* buffers which have already been transferred via DMA
934
static void ipg_nic_txcleanup(struct net_device *dev)
936
struct ipg_nic_private *sp = netdev_priv(dev);
937
void __iomem *ioaddr = sp->ioaddr;
940
IPG_DEBUG_MSG("_nic_txcleanup\n");
942
for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
943
/* Reading the TXSTATUS register clears the
944
* TX_COMPLETE interrupt.
946
u32 txstatusdword = ipg_r32(TX_STATUS);
948
IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword);
950
/* Check for Transmit errors. Error bits only valid if
951
* TX_COMPLETE bit in the TXSTATUS register is a 1.
953
if (!(txstatusdword & IPG_TS_TX_COMPLETE))
956
/* If in 10Mbps mode, indicate transmit is ready. */
957
if (sp->tenmbpsmode) {
958
netif_wake_queue(dev);
961
/* Transmit error, increment stat counters. */
962
if (txstatusdword & IPG_TS_TX_ERROR) {
963
IPG_DEBUG_MSG("Transmit error\n");
964
sp->stats.tx_errors++;
967
/* Late collision, re-enable transmitter. */
968
if (txstatusdword & IPG_TS_LATE_COLLISION) {
969
IPG_DEBUG_MSG("Late collision on transmit\n");
970
ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
971
IPG_MC_RSVD_MASK, MAC_CTRL);
974
/* Maximum collisions, re-enable transmitter. */
975
if (txstatusdword & IPG_TS_TX_MAX_COLL) {
976
IPG_DEBUG_MSG("Maximum collisions on transmit\n");
977
ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
978
IPG_MC_RSVD_MASK, MAC_CTRL);
981
/* Transmit underrun, reset and re-enable
984
if (txstatusdword & IPG_TS_TX_UNDERRUN) {
985
IPG_DEBUG_MSG("Transmitter underrun\n");
986
sp->stats.tx_fifo_errors++;
987
ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
988
IPG_AC_NETWORK | IPG_AC_FIFO);
990
/* Re-configure after DMA reset. */
991
if (ipg_io_config(dev) < 0) {
992
netdev_info(dev, "Error during re-configuration\n");
996
ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
997
IPG_MC_RSVD_MASK, MAC_CTRL);
1001
ipg_nic_txfree(dev);
1004
/* Provides statistical information about the IPG NIC. */
1005
static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
1007
struct ipg_nic_private *sp = netdev_priv(dev);
1008
void __iomem *ioaddr = sp->ioaddr;
1012
IPG_DEBUG_MSG("_nic_get_stats\n");
1014
/* Check to see if the NIC has been initialized via nic_open,
1015
* before trying to read statistic registers.
1017
if (!test_bit(__LINK_STATE_START, &dev->state))
1020
sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
1021
sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
1022
sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
1023
sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
1024
temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS);
1025
sp->stats.rx_errors += temp1;
1026
sp->stats.rx_missed_errors += temp1;
1027
temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) +
1028
ipg_r32(IPG_LATECOLLISIONS);
1029
temp2 = ipg_r16(IPG_CARRIERSENSEERRORS);
1030
sp->stats.collisions += temp1;
1031
sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
1032
sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
1033
ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2;
1034
sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
1036
/* detailed tx_errors */
1037
sp->stats.tx_carrier_errors += temp2;
1039
/* detailed rx_errors */
1040
sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
1041
ipg_r16(IPG_FRAMETOOLONGERRRORS);
1042
sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
1044
/* Unutilized IPG statistic registers. */
1045
ipg_r32(IPG_MCSTFRAMESRCVDOK);
1050
/* Restore used receive buffers. */
1051
static int ipg_nic_rxrestore(struct net_device *dev)
1053
struct ipg_nic_private *sp = netdev_priv(dev);
1054
const unsigned int curr = sp->rx_current;
1055
unsigned int dirty = sp->rx_dirty;
1057
IPG_DEBUG_MSG("_nic_rxrestore\n");
1059
for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
1060
unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
1062
/* rx_copybreak may poke hole here and there. */
1063
if (sp->rx_buff[entry])
1066
/* Generate a new receive buffer to replace the
1067
* current buffer (which will be released by the
1070
if (ipg_get_rxbuff(dev, entry) < 0) {
1071
IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n");
1076
/* Reset the RFS field. */
1077
sp->rxd[entry].rfs = 0x0000000000000000;
1079
sp->rx_dirty = dirty;
1084
/* use jumboindex and jumbosize to control jumbo frame status
1085
* initial status is jumboindex=-1 and jumbosize=0
1086
* 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
1087
* 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
1088
* 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
1089
* previous receiving and need to continue dumping the current one
1097
FRAME_NO_START_NO_END = 0,
1098
FRAME_WITH_START = 1,
1099
FRAME_WITH_END = 10,
1100
FRAME_WITH_START_WITH_END = 11
1103
static void ipg_nic_rx_free_skb(struct net_device *dev)
1105
struct ipg_nic_private *sp = netdev_priv(dev);
1106
unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1108
if (sp->rx_buff[entry]) {
1109
struct ipg_rx *rxfd = sp->rxd + entry;
1111
pci_unmap_single(sp->pdev,
1112
le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1113
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1114
dev_kfree_skb_irq(sp->rx_buff[entry]);
1115
sp->rx_buff[entry] = NULL;
1119
static int ipg_nic_rx_check_frame_type(struct net_device *dev)
1121
struct ipg_nic_private *sp = netdev_priv(dev);
1122
struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
1123
int type = FRAME_NO_START_NO_END;
1125
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
1126
type += FRAME_WITH_START;
1127
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
1128
type += FRAME_WITH_END;
1132
static int ipg_nic_rx_check_error(struct net_device *dev)
1134
struct ipg_nic_private *sp = netdev_priv(dev);
1135
unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1136
struct ipg_rx *rxfd = sp->rxd + entry;
1138
if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1139
(IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1140
IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1141
IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
1142
IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
1143
(unsigned long) rxfd->rfs);
1145
/* Increment general receive error statistic. */
1146
sp->stats.rx_errors++;
1148
/* Increment detailed receive error statistics. */
1149
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1150
IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
1152
sp->stats.rx_fifo_errors++;
1155
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1156
IPG_DEBUG_MSG("RX runt occurred\n");
1157
sp->stats.rx_length_errors++;
1160
/* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
1161
* error count handled by a IPG statistic register.
1164
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1165
IPG_DEBUG_MSG("RX alignment error occurred\n");
1166
sp->stats.rx_frame_errors++;
1169
/* Do nothing for IPG_RFS_RXFCSERROR, error count
1170
* handled by a IPG statistic register.
1173
/* Free the memory associated with the RX
1174
* buffer since it is erroneous and we will
1175
* not pass it to higher layer processes.
1177
if (sp->rx_buff[entry]) {
1178
pci_unmap_single(sp->pdev,
1179
le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1180
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1182
dev_kfree_skb_irq(sp->rx_buff[entry]);
1183
sp->rx_buff[entry] = NULL;
1185
return ERROR_PACKET;
1187
return NORMAL_PACKET;
1190
static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1191
struct ipg_nic_private *sp,
1192
struct ipg_rx *rxfd, unsigned entry)
1194
struct ipg_jumbo *jumbo = &sp->jumbo;
1195
struct sk_buff *skb;
1198
if (jumbo->found_start) {
1199
dev_kfree_skb_irq(jumbo->skb);
1200
jumbo->found_start = 0;
1201
jumbo->current_size = 0;
1205
/* 1: found error, 0 no error */
1206
if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1209
skb = sp->rx_buff[entry];
1213
/* accept this frame and send to upper layer */
1214
framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1215
if (framelen > sp->rxfrag_size)
1216
framelen = sp->rxfrag_size;
1218
skb_put(skb, framelen);
1219
skb->protocol = eth_type_trans(skb, dev);
1220
skb_checksum_none_assert(skb);
1222
sp->rx_buff[entry] = NULL;
1225
static void ipg_nic_rx_with_start(struct net_device *dev,
1226
struct ipg_nic_private *sp,
1227
struct ipg_rx *rxfd, unsigned entry)
1229
struct ipg_jumbo *jumbo = &sp->jumbo;
1230
struct pci_dev *pdev = sp->pdev;
1231
struct sk_buff *skb;
1233
/* 1: found error, 0 no error */
1234
if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1237
/* accept this frame and send to upper layer */
1238
skb = sp->rx_buff[entry];
1242
if (jumbo->found_start)
1243
dev_kfree_skb_irq(jumbo->skb);
1245
pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1246
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1248
skb_put(skb, sp->rxfrag_size);
1250
jumbo->found_start = 1;
1251
jumbo->current_size = sp->rxfrag_size;
1254
sp->rx_buff[entry] = NULL;
1257
static void ipg_nic_rx_with_end(struct net_device *dev,
1258
struct ipg_nic_private *sp,
1259
struct ipg_rx *rxfd, unsigned entry)
1261
struct ipg_jumbo *jumbo = &sp->jumbo;
1263
/* 1: found error, 0 no error */
1264
if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1265
struct sk_buff *skb = sp->rx_buff[entry];
1270
if (jumbo->found_start) {
1271
int framelen, endframelen;
1273
framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1275
endframelen = framelen - jumbo->current_size;
1276
if (framelen > sp->rxsupport_size)
1277
dev_kfree_skb_irq(jumbo->skb);
1279
memcpy(skb_put(jumbo->skb, endframelen),
1280
skb->data, endframelen);
1282
jumbo->skb->protocol =
1283
eth_type_trans(jumbo->skb, dev);
1285
skb_checksum_none_assert(jumbo->skb);
1286
netif_rx(jumbo->skb);
1290
jumbo->found_start = 0;
1291
jumbo->current_size = 0;
1294
ipg_nic_rx_free_skb(dev);
1296
dev_kfree_skb_irq(jumbo->skb);
1297
jumbo->found_start = 0;
1298
jumbo->current_size = 0;
1303
static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
1304
struct ipg_nic_private *sp,
1305
struct ipg_rx *rxfd, unsigned entry)
1307
struct ipg_jumbo *jumbo = &sp->jumbo;
1309
/* 1: found error, 0 no error */
1310
if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1311
struct sk_buff *skb = sp->rx_buff[entry];
1314
if (jumbo->found_start) {
1315
jumbo->current_size += sp->rxfrag_size;
1316
if (jumbo->current_size <= sp->rxsupport_size) {
1317
memcpy(skb_put(jumbo->skb,
1319
skb->data, sp->rxfrag_size);
1322
ipg_nic_rx_free_skb(dev);
1325
dev_kfree_skb_irq(jumbo->skb);
1326
jumbo->found_start = 0;
1327
jumbo->current_size = 0;
1332
static int ipg_nic_rx_jumbo(struct net_device *dev)
1334
struct ipg_nic_private *sp = netdev_priv(dev);
1335
unsigned int curr = sp->rx_current;
1336
void __iomem *ioaddr = sp->ioaddr;
1339
IPG_DEBUG_MSG("_nic_rx\n");
1341
for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1342
unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1343
struct ipg_rx *rxfd = sp->rxd + entry;
1345
if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
1348
switch (ipg_nic_rx_check_frame_type(dev)) {
1349
case FRAME_WITH_START_WITH_END:
1350
ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
1352
case FRAME_WITH_START:
1353
ipg_nic_rx_with_start(dev, sp, rxfd, entry);
1355
case FRAME_WITH_END:
1356
ipg_nic_rx_with_end(dev, sp, rxfd, entry);
1358
case FRAME_NO_START_NO_END:
1359
ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
1364
sp->rx_current = curr;
1366
if (i == IPG_MAXRFDPROCESS_COUNT) {
1367
/* There are more RFDs to process, however the
1368
* allocated amount of RFD processing time has
1369
* expired. Assert Interrupt Requested to make
1370
* sure we come back to process the remaining RFDs.
1372
ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1375
ipg_nic_rxrestore(dev);
1380
static int ipg_nic_rx(struct net_device *dev)
1382
/* Transfer received Ethernet frames to higher network layers. */
1383
struct ipg_nic_private *sp = netdev_priv(dev);
1384
unsigned int curr = sp->rx_current;
1385
void __iomem *ioaddr = sp->ioaddr;
1386
struct ipg_rx *rxfd;
1389
IPG_DEBUG_MSG("_nic_rx\n");
1391
#define __RFS_MASK \
1392
cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
1394
for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1395
unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1396
struct sk_buff *skb = sp->rx_buff[entry];
1397
unsigned int framelen;
1399
rxfd = sp->rxd + entry;
1401
if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
1404
/* Get received frame length. */
1405
framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1407
/* Check for jumbo frame arrival with too small
1410
if (framelen > sp->rxfrag_size) {
1412
("RFS FrameLen > allocated fragment size\n");
1414
framelen = sp->rxfrag_size;
1417
if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1418
(IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1419
IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1420
IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
1422
IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
1423
(unsigned long int) rxfd->rfs);
1425
/* Increment general receive error statistic. */
1426
sp->stats.rx_errors++;
1428
/* Increment detailed receive error statistics. */
1429
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1430
IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
1431
sp->stats.rx_fifo_errors++;
1434
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1435
IPG_DEBUG_MSG("RX runt occurred\n");
1436
sp->stats.rx_length_errors++;
1439
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
1440
/* Do nothing, error count handled by a IPG
1441
* statistic register.
1444
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1445
IPG_DEBUG_MSG("RX alignment error occurred\n");
1446
sp->stats.rx_frame_errors++;
1449
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
1450
/* Do nothing, error count handled by a IPG
1451
* statistic register.
1454
/* Free the memory associated with the RX
1455
* buffer since it is erroneous and we will
1456
* not pass it to higher layer processes.
1459
__le64 info = rxfd->frag_info;
1461
pci_unmap_single(sp->pdev,
1462
le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
1463
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1465
dev_kfree_skb_irq(skb);
1469
/* Adjust the new buffer length to accommodate the size
1470
* of the received frame.
1472
skb_put(skb, framelen);
1474
/* Set the buffer's protocol field to Ethernet. */
1475
skb->protocol = eth_type_trans(skb, dev);
1477
/* The IPG encountered an error with (or
1478
* there were no) IP/TCP/UDP checksums.
1479
* This may or may not indicate an invalid
1480
* IP/TCP/UDP frame was received. Let the
1481
* upper layer decide.
1483
skb_checksum_none_assert(skb);
1485
/* Hand off frame for higher layer processing.
1486
* The function netif_rx() releases the sk_buff
1487
* when processing completes.
1492
/* Assure RX buffer is not reused by IPG. */
1493
sp->rx_buff[entry] = NULL;
1497
* If there are more RFDs to process and the allocated amount of RFD
1498
* processing time has expired, assert Interrupt Requested to make
1499
* sure we come back to process the remaining RFDs.
1501
if (i == IPG_MAXRFDPROCESS_COUNT)
1502
ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1505
/* Check if the RFD list contained no receive frame data. */
1507
sp->EmptyRFDListCount++;
1509
while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
1510
!((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
1511
(le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
1512
unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
1514
rxfd = sp->rxd + entry;
1516
IPG_DEBUG_MSG("Frame requires multiple RFDs\n");
1518
/* An unexpected event, additional code needed to handle
1519
* properly. So for the time being, just disregard the
1523
/* Free the memory associated with the RX
1524
* buffer since it is erroneous and we will
1525
* not pass it to higher layer processes.
1527
if (sp->rx_buff[entry]) {
1528
pci_unmap_single(sp->pdev,
1529
le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1530
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1531
dev_kfree_skb_irq(sp->rx_buff[entry]);
1534
/* Assure RX buffer is not reused by IPG. */
1535
sp->rx_buff[entry] = NULL;
1538
sp->rx_current = curr;
1540
/* Check to see if there are a minimum number of used
1541
* RFDs before restoring any (should improve performance.)
1543
if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
1544
ipg_nic_rxrestore(dev);
1549
static void ipg_reset_after_host_error(struct work_struct *work)
1551
struct ipg_nic_private *sp =
1552
container_of(work, struct ipg_nic_private, task.work);
1553
struct net_device *dev = sp->dev;
1556
* Acknowledge HostError interrupt by resetting
1559
ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1564
if (ipg_io_config(dev) < 0) {
1565
netdev_info(dev, "Cannot recover from PCI error\n");
1566
schedule_delayed_work(&sp->task, HZ);
1570
static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
1572
struct net_device *dev = dev_inst;
1573
struct ipg_nic_private *sp = netdev_priv(dev);
1574
void __iomem *ioaddr = sp->ioaddr;
1575
unsigned int handled = 0;
1578
IPG_DEBUG_MSG("_interrupt_handler\n");
1581
ipg_nic_rxrestore(dev);
1583
spin_lock(&sp->lock);
1585
/* Get interrupt source information, and acknowledge
1586
* some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
1587
* IntRequested, MacControlFrame, LinkEvent) interrupts
1588
* if issued. Also, all IPG interrupts are disabled by
1589
* reading IntStatusAck.
1591
status = ipg_r16(INT_STATUS_ACK);
1593
IPG_DEBUG_MSG("IntStatusAck = %04x\n", status);
1595
/* Shared IRQ of remove event. */
1596
if (!(status & IPG_IS_RSVD_MASK))
1601
if (unlikely(!netif_running(dev)))
1604
/* If RFDListEnd interrupt, restore all used RFDs. */
1605
if (status & IPG_IS_RFD_LIST_END) {
1606
IPG_DEBUG_MSG("RFDListEnd Interrupt\n");
1608
/* The RFD list end indicates an RFD was encountered
1609
* with a 0 NextPtr, or with an RFDDone bit set to 1
1610
* (indicating the RFD is not read for use by the
1611
* IPG.) Try to restore all RFDs.
1613
ipg_nic_rxrestore(dev);
1616
/* Increment the RFDlistendCount counter. */
1617
sp->RFDlistendCount++;
1621
/* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
1622
* IntRequested interrupt, process received frames. */
1623
if ((status & IPG_IS_RX_DMA_PRIORITY) ||
1624
(status & IPG_IS_RFD_LIST_END) ||
1625
(status & IPG_IS_RX_DMA_COMPLETE) ||
1626
(status & IPG_IS_INT_REQUESTED)) {
1628
/* Increment the RFD list checked counter if interrupted
1629
* only to check the RFD list. */
1630
if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
1631
IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
1632
(IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE |
1633
IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE |
1634
IPG_IS_UPDATE_STATS)))
1635
sp->RFDListCheckedCount++;
1639
ipg_nic_rx_jumbo(dev);
1644
/* If TxDMAComplete interrupt, free used TFDs. */
1645
if (status & IPG_IS_TX_DMA_COMPLETE)
1646
ipg_nic_txfree(dev);
1648
/* TxComplete interrupts indicate one of numerous actions.
1649
* Determine what action to take based on TXSTATUS register.
1651
if (status & IPG_IS_TX_COMPLETE)
1652
ipg_nic_txcleanup(dev);
1654
/* If UpdateStats interrupt, update Linux Ethernet statistics */
1655
if (status & IPG_IS_UPDATE_STATS)
1656
ipg_nic_get_stats(dev);
1658
/* If HostError interrupt, reset IPG. */
1659
if (status & IPG_IS_HOST_ERROR) {
1660
IPG_DDEBUG_MSG("HostError Interrupt\n");
1662
schedule_delayed_work(&sp->task, 0);
1665
/* If LinkEvent interrupt, resolve autonegotiation. */
1666
if (status & IPG_IS_LINK_EVENT) {
1667
if (ipg_config_autoneg(dev) < 0)
1668
netdev_info(dev, "Auto-negotiation error\n");
1671
/* If MACCtrlFrame interrupt, do nothing. */
1672
if (status & IPG_IS_MAC_CTRL_FRAME)
1673
IPG_DEBUG_MSG("MACCtrlFrame interrupt\n");
1675
/* If RxComplete interrupt, do nothing. */
1676
if (status & IPG_IS_RX_COMPLETE)
1677
IPG_DEBUG_MSG("RxComplete interrupt\n");
1679
/* If RxEarly interrupt, do nothing. */
1680
if (status & IPG_IS_RX_EARLY)
1681
IPG_DEBUG_MSG("RxEarly interrupt\n");
1684
/* Re-enable IPG interrupts. */
1685
ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE |
1686
IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE |
1687
IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE);
1689
spin_unlock(&sp->lock);
1691
return IRQ_RETVAL(handled);
1694
static void ipg_rx_clear(struct ipg_nic_private *sp)
1698
for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
1699
if (sp->rx_buff[i]) {
1700
struct ipg_rx *rxfd = sp->rxd + i;
1702
dev_kfree_skb_irq(sp->rx_buff[i]);
1703
sp->rx_buff[i] = NULL;
1704
pci_unmap_single(sp->pdev,
1705
le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1706
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1711
static void ipg_tx_clear(struct ipg_nic_private *sp)
1715
for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
1716
if (sp->tx_buff[i]) {
1717
struct ipg_tx *txfd = sp->txd + i;
1719
pci_unmap_single(sp->pdev,
1720
le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
1721
sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
1723
dev_kfree_skb_irq(sp->tx_buff[i]);
1725
sp->tx_buff[i] = NULL;
1730
static int ipg_nic_open(struct net_device *dev)
1732
struct ipg_nic_private *sp = netdev_priv(dev);
1733
void __iomem *ioaddr = sp->ioaddr;
1734
struct pci_dev *pdev = sp->pdev;
1737
IPG_DEBUG_MSG("_nic_open\n");
1739
sp->rx_buf_sz = sp->rxsupport_size;
1741
/* Check for interrupt line conflicts, and request interrupt
1744
* IMPORTANT: Disable IPG interrupts prior to registering
1747
ipg_w16(0x0000, INT_ENABLE);
1749
/* Register the interrupt line to be used by the IPG within
1752
rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
1755
netdev_info(dev, "Error when requesting interrupt\n");
1759
dev->irq = pdev->irq;
1763
sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
1764
&sp->rxd_map, GFP_KERNEL);
1766
goto err_free_irq_0;
1768
sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
1769
&sp->txd_map, GFP_KERNEL);
1773
rc = init_rfdlist(dev);
1775
netdev_info(dev, "Error during configuration\n");
1781
rc = ipg_io_config(dev);
1783
netdev_info(dev, "Error during configuration\n");
1784
goto err_release_tfdlist_3;
1787
/* Resolve autonegotiation. */
1788
if (ipg_config_autoneg(dev) < 0)
1789
netdev_info(dev, "Auto-negotiation error\n");
1791
/* initialize JUMBO Frame control variable */
1792
sp->jumbo.found_start = 0;
1793
sp->jumbo.current_size = 0;
1794
sp->jumbo.skb = NULL;
1796
/* Enable transmit and receive operation of the IPG. */
1797
ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) &
1798
IPG_MC_RSVD_MASK, MAC_CTRL);
1800
netif_start_queue(dev);
1804
err_release_tfdlist_3:
1808
dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1810
dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1812
free_irq(pdev->irq, dev);
1816
static int ipg_nic_stop(struct net_device *dev)
1818
struct ipg_nic_private *sp = netdev_priv(dev);
1819
void __iomem *ioaddr = sp->ioaddr;
1820
struct pci_dev *pdev = sp->pdev;
1822
IPG_DEBUG_MSG("_nic_stop\n");
1824
netif_stop_queue(dev);
1826
IPG_DUMPTFDLIST(dev);
1829
(void) ipg_r16(INT_STATUS_ACK);
1831
ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1833
synchronize_irq(pdev->irq);
1834
} while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
1840
pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1841
pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1843
free_irq(pdev->irq, dev);
1848
static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
1849
struct net_device *dev)
1851
struct ipg_nic_private *sp = netdev_priv(dev);
1852
void __iomem *ioaddr = sp->ioaddr;
1853
unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
1854
unsigned long flags;
1855
struct ipg_tx *txfd;
1857
IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
1859
/* If in 10Mbps mode, stop the transmit queue so
1860
* no more transmit frames are accepted.
1862
if (sp->tenmbpsmode)
1863
netif_stop_queue(dev);
1865
if (sp->reset_current_tfd) {
1866
sp->reset_current_tfd = 0;
1870
txfd = sp->txd + entry;
1872
sp->tx_buff[entry] = skb;
1874
/* Clear all TFC fields, except TFDDONE. */
1875
txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
1877
/* Specify the TFC field within the TFD. */
1878
txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
1879
(IPG_TFC_FRAMEID & sp->tx_current) |
1880
(IPG_TFC_FRAGCOUNT & (1 << 24)));
1882
* 16--17 (WordAlign) <- 3 (disable),
1883
* 0--15 (FrameId) <- sp->tx_current,
1884
* 24--27 (FragCount) <- 1
1887
/* Request TxComplete interrupts at an interval defined
1888
* by the constant IPG_FRAMESBETWEENTXCOMPLETES.
1889
* Request TxComplete interrupt for every frame
1890
* if in 10Mbps mode to accommodate problem with 10Mbps
1893
if (sp->tenmbpsmode)
1894
txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE);
1895
txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE);
1896
/* Based on compilation option, determine if FCS is to be
1897
* appended to transmit frame by IPG.
1899
if (!(IPG_APPEND_FCS_ON_TX))
1900
txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE);
1902
/* Based on compilation option, determine if IP, TCP and/or
1903
* UDP checksums are to be added to transmit frame by IPG.
1905
if (IPG_ADD_IPCHECKSUM_ON_TX)
1906
txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE);
1908
if (IPG_ADD_TCPCHECKSUM_ON_TX)
1909
txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE);
1911
if (IPG_ADD_UDPCHECKSUM_ON_TX)
1912
txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE);
1914
/* Based on compilation option, determine if VLAN tag info is to be
1915
* inserted into transmit frame by IPG.
1917
if (IPG_INSERT_MANUAL_VLAN_TAG) {
1918
txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT |
1919
((u64) IPG_MANUAL_VLAN_VID << 32) |
1920
((u64) IPG_MANUAL_VLAN_CFI << 44) |
1921
((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
1924
/* The fragment start location within system memory is defined
1925
* by the sk_buff structure's data field. The physical address
1926
* of this location within the system's virtual memory space
1927
* is determined using the IPG_HOST2BUS_MAP function.
1929
txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
1930
skb->len, PCI_DMA_TODEVICE));
1932
/* The length of the fragment within system memory is defined by
1933
* the sk_buff structure's len field.
1935
txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN &
1936
((u64) (skb->len & 0xffff) << 48));
1938
/* Clear the TFDDone bit last to indicate the TFD is ready
1939
* for transfer to the IPG.
1941
txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
1943
spin_lock_irqsave(&sp->lock, flags);
1949
ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL);
1951
if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
1952
netif_stop_queue(dev);
1954
spin_unlock_irqrestore(&sp->lock, flags);
1956
return NETDEV_TX_OK;
1959
static void ipg_set_phy_default_param(unsigned char rev,
1960
struct net_device *dev, int phy_address)
1962
unsigned short length;
1963
unsigned char revision;
1964
const unsigned short *phy_param;
1965
unsigned short address, value;
1967
phy_param = &DefaultPhyParam[0];
1968
length = *phy_param & 0x00FF;
1969
revision = (unsigned char)((*phy_param) >> 8);
1971
while (length != 0) {
1972
if (rev == revision) {
1973
while (length > 1) {
1974
address = *phy_param;
1975
value = *(phy_param + 1);
1977
mdio_write(dev, phy_address, address, value);
1982
phy_param += length / 2;
1983
length = *phy_param & 0x00FF;
1984
revision = (unsigned char)((*phy_param) >> 8);
1990
static int read_eeprom(struct net_device *dev, int eep_addr)
1992
void __iomem *ioaddr = ipg_ioaddr(dev);
1997
value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
1998
ipg_w16(value, EEPROM_CTRL);
2000
for (i = 0; i < 1000; i++) {
2004
data = ipg_r16(EEPROM_CTRL);
2005
if (!(data & IPG_EC_EEPROM_BUSY)) {
2006
ret = ipg_r16(EEPROM_DATA);
2013
static void ipg_init_mii(struct net_device *dev)
2015
struct ipg_nic_private *sp = netdev_priv(dev);
2016
struct mii_if_info *mii_if = &sp->mii_if;
2020
mii_if->mdio_read = mdio_read;
2021
mii_if->mdio_write = mdio_write;
2022
mii_if->phy_id_mask = 0x1f;
2023
mii_if->reg_num_mask = 0x1f;
2025
mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
2027
if (phyaddr != 0x1f) {
2028
u16 mii_phyctrl, mii_1000cr;
2030
mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2031
mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
2032
GMII_PHY_1000BASETCONTROL_PreferMaster;
2033
mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
2035
mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2037
/* Set default phyparam */
2038
ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2041
mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
2042
mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
2047
static int ipg_hw_init(struct net_device *dev)
2049
struct ipg_nic_private *sp = netdev_priv(dev);
2050
void __iomem *ioaddr = sp->ioaddr;
2054
/* Read/Write and Reset EEPROM Value */
2055
/* Read LED Mode Configuration from EEPROM */
2056
sp->led_mode = read_eeprom(dev, 6);
2058
/* Reset all functions within the IPG. Do not assert
2059
* RST_OUT as not compatible with some PHYs.
2061
rc = ipg_reset(dev, IPG_RESET_MASK);
2067
/* Read MAC Address from EEPROM */
2068
for (i = 0; i < 3; i++)
2069
sp->station_addr[i] = read_eeprom(dev, 16 + i);
2071
for (i = 0; i < 3; i++)
2072
ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
2074
/* Set station address in ethernet_device structure. */
2075
dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff;
2076
dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
2077
dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff;
2078
dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
2079
dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff;
2080
dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
2085
static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2087
struct ipg_nic_private *sp = netdev_priv(dev);
2090
mutex_lock(&sp->mii_mutex);
2091
rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
2092
mutex_unlock(&sp->mii_mutex);
2097
static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
2099
struct ipg_nic_private *sp = netdev_priv(dev);
2102
/* Function to accommodate changes to Maximum Transfer Unit
2103
* (or MTU) of IPG NIC. Cannot use default function since
2104
* the default will not allow for MTU > 1500 bytes.
2107
IPG_DEBUG_MSG("_nic_change_mtu\n");
2110
* Check that the new MTU value is between 68 (14 byte header, 46 byte
2111
* payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
2113
if (new_mtu < 68 || new_mtu > 10240)
2116
err = ipg_nic_stop(dev);
2122
sp->max_rxframe_size = new_mtu;
2124
sp->rxfrag_size = new_mtu;
2125
if (sp->rxfrag_size > 4088)
2126
sp->rxfrag_size = 4088;
2128
sp->rxsupport_size = sp->max_rxframe_size;
2130
if (new_mtu > 0x0600)
2131
sp->is_jumbo = true;
2133
sp->is_jumbo = false;
2135
return ipg_nic_open(dev);
2138
static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2140
struct ipg_nic_private *sp = netdev_priv(dev);
2143
mutex_lock(&sp->mii_mutex);
2144
rc = mii_ethtool_gset(&sp->mii_if, cmd);
2145
mutex_unlock(&sp->mii_mutex);
2150
static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2152
struct ipg_nic_private *sp = netdev_priv(dev);
2155
mutex_lock(&sp->mii_mutex);
2156
rc = mii_ethtool_sset(&sp->mii_if, cmd);
2157
mutex_unlock(&sp->mii_mutex);
2162
static int ipg_nway_reset(struct net_device *dev)
2164
struct ipg_nic_private *sp = netdev_priv(dev);
2167
mutex_lock(&sp->mii_mutex);
2168
rc = mii_nway_restart(&sp->mii_if);
2169
mutex_unlock(&sp->mii_mutex);
2174
static const struct ethtool_ops ipg_ethtool_ops = {
2175
.get_settings = ipg_get_settings,
2176
.set_settings = ipg_set_settings,
2177
.nway_reset = ipg_nway_reset,
2180
static void __devexit ipg_remove(struct pci_dev *pdev)
2182
struct net_device *dev = pci_get_drvdata(pdev);
2183
struct ipg_nic_private *sp = netdev_priv(dev);
2185
IPG_DEBUG_MSG("_remove\n");
2187
/* Un-register Ethernet device. */
2188
unregister_netdev(dev);
2190
pci_iounmap(pdev, sp->ioaddr);
2192
pci_release_regions(pdev);
2195
pci_disable_device(pdev);
2196
pci_set_drvdata(pdev, NULL);
2199
static const struct net_device_ops ipg_netdev_ops = {
2200
.ndo_open = ipg_nic_open,
2201
.ndo_stop = ipg_nic_stop,
2202
.ndo_start_xmit = ipg_nic_hard_start_xmit,
2203
.ndo_get_stats = ipg_nic_get_stats,
2204
.ndo_set_rx_mode = ipg_nic_set_multicast_list,
2205
.ndo_do_ioctl = ipg_ioctl,
2206
.ndo_tx_timeout = ipg_tx_timeout,
2207
.ndo_change_mtu = ipg_nic_change_mtu,
2208
.ndo_set_mac_address = eth_mac_addr,
2209
.ndo_validate_addr = eth_validate_addr,
2212
static int __devinit ipg_probe(struct pci_dev *pdev,
2213
const struct pci_device_id *id)
2215
unsigned int i = id->driver_data;
2216
struct ipg_nic_private *sp;
2217
struct net_device *dev;
2218
void __iomem *ioaddr;
2221
rc = pci_enable_device(pdev);
2225
pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
2227
pci_set_master(pdev);
2229
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2231
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2233
pr_err("%s: DMA config failed\n", pci_name(pdev));
2239
* Initialize net device.
2241
dev = alloc_etherdev(sizeof(struct ipg_nic_private));
2243
pr_err("%s: alloc_etherdev failed\n", pci_name(pdev));
2248
sp = netdev_priv(dev);
2249
spin_lock_init(&sp->lock);
2250
mutex_init(&sp->mii_mutex);
2252
sp->is_jumbo = IPG_IS_JUMBO;
2253
sp->rxfrag_size = IPG_RXFRAG_SIZE;
2254
sp->rxsupport_size = IPG_RXSUPPORT_SIZE;
2255
sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE;
2257
/* Declare IPG NIC functions for Ethernet device methods.
2259
dev->netdev_ops = &ipg_netdev_ops;
2260
SET_NETDEV_DEV(dev, &pdev->dev);
2261
SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
2263
rc = pci_request_regions(pdev, DRV_NAME);
2265
goto err_free_dev_1;
2267
ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
2269
pr_err("%s: cannot map MMIO\n", pci_name(pdev));
2271
goto err_release_regions_2;
2274
/* Save the pointer to the PCI device information. */
2275
sp->ioaddr = ioaddr;
2279
INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
2281
pci_set_drvdata(pdev, dev);
2283
rc = ipg_hw_init(dev);
2287
rc = register_netdev(dev);
2291
netdev_info(dev, "Ethernet device registered\n");
2296
pci_iounmap(pdev, ioaddr);
2297
err_release_regions_2:
2298
pci_release_regions(pdev);
2302
pci_disable_device(pdev);
2306
static struct pci_driver ipg_pci_driver = {
2307
.name = IPG_DRIVER_NAME,
2308
.id_table = ipg_pci_tbl,
2310
.remove = __devexit_p(ipg_remove),
2313
static int __init ipg_init_module(void)
2315
return pci_register_driver(&ipg_pci_driver);
2318
static void __exit ipg_exit_module(void)
2320
pci_unregister_driver(&ipg_pci_driver);
2323
module_init(ipg_init_module);
2324
module_exit(ipg_exit_module);