2
* Copyright (c) 2008 Stefan Hajnoczi <stefanha@gmail.com>
3
* Copyright (c) 2008 Pantelis Koukousoulas <pktoss@gmail.com>
5
* This program is free software; you can redistribute it and/or
6
* modify it under the terms of the GNU General Public License as
7
* published by the Free Software Foundation; either version 2 of the
8
* License, or any later version.
10
* This program is distributed in the hope that it will be useful, but
11
* WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13
* General Public License for more details.
15
* You should have received a copy of the GNU General Public License
16
* along with this program; if not, write to the Free Software
17
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20
* This driver is a port of the b44 linux driver version 1.01
22
* Copyright (c) 2002 David S. Miller <davem@redhat.com>
23
* Copyright (c) Pekka Pietikainen <pp@ee.oulu.fi>
24
* Copyright (C) 2006 Broadcom Corporation.
26
* Some ssb bits copied from version 2.0 of the b44 driver
27
* Copyright (c) Michael Buesch
29
* Copyright (c) a lot of people too. Please respect their work.
32
FILE_LICENCE ( GPL2_OR_LATER );
41
#include <ipxe/iobuf.h>
42
#include <ipxe/malloc.h>
44
#include <ipxe/netdevice.h>
45
#include <ipxe/ethernet.h>
46
#include <ipxe/if_ether.h>
50
static inline int ring_next(int index)
52
/* B44_RING_SIZE is a power of 2 :) */
53
return (index + 1) & (B44_RING_SIZE - 1);
57
/* Memory-mapped I/O wrappers */
59
static inline u32 br32(const struct b44_private *bp, u32 reg)
61
return readl(bp->regs + reg);
65
static inline void bw32(const struct b44_private *bp, u32 reg, u32 val)
67
writel(val, bp->regs + reg);
71
static inline void bflush(const struct b44_private *bp, u32 reg, u32 timeout)
73
readl(bp->regs + reg);
78
#define VIRT_TO_B44(addr) ( virt_to_bus(addr) + SB_PCI_DMA )
82
* Check if card can access address
84
* @v address Virtual address
85
* @v address_ok Card can access address
87
static inline __attribute__ (( always_inline )) int
88
b44_address_ok ( void *address ) {
90
/* Card can address anything with a 30-bit address */
91
if ( ( virt_to_bus ( address ) & ~B44_30BIT_DMA_MASK ) == 0 )
98
* Ring cells waiting to be processed are between 'tx_cur' and 'pending'
99
* indexes in the ring.
101
static u32 pending_tx_index(struct b44_private *bp)
103
u32 pending = br32(bp, B44_DMATX_STAT);
104
pending &= DMATX_STAT_CDMASK;
106
pending /= sizeof(struct dma_desc);
107
return pending & (B44_RING_SIZE - 1);
112
* Ring cells waiting to be processed are between 'rx_cur' and 'pending'
113
* indexes in the ring.
115
static u32 pending_rx_index(struct b44_private *bp)
117
u32 pending = br32(bp, B44_DMARX_STAT);
118
pending &= DMARX_STAT_CDMASK;
120
pending /= sizeof(struct dma_desc);
121
return pending & (B44_RING_SIZE - 1);
126
* Wait until the given bit is set/cleared.
128
static int b44_wait_bit(struct b44_private *bp, unsigned long reg, u32 bit,
129
unsigned long timeout, const int clear)
133
for (i = 0; i < timeout; i++) {
134
u32 val = br32(bp, reg);
136
if (clear && !(val & bit))
139
if (!clear && (val & bit))
152
* Sonics Silicon Backplane support. SSB is a mini-bus interconnecting
153
* so-called IP Cores. One of those cores implements the Fast Ethernet
154
* functionality and another one the PCI engine.
156
* You need to switch to the core you want to talk to before actually
159
* See: http://bcm-v4.sipsolutions.net/Backplane for (reverse-engineered)
163
static inline u32 ssb_get_core_rev(struct b44_private *bp)
165
return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
169
static inline int ssb_is_core_up(struct b44_private *bp)
171
return ((br32(bp, B44_SBTMSLOW) & (SSB_CORE_DOWN | SBTMSLOW_CLOCK))
176
static u32 ssb_pci_setup(struct b44_private *bp, u32 cores)
178
u32 bar_orig, pci_rev, val;
180
pci_read_config_dword(bp->pci, SSB_BAR0_WIN, &bar_orig);
181
pci_write_config_dword(bp->pci, SSB_BAR0_WIN,
182
BCM4400_PCI_CORE_ADDR);
183
pci_rev = ssb_get_core_rev(bp);
185
val = br32(bp, B44_SBINTVEC);
187
bw32(bp, B44_SBINTVEC, val);
189
val = br32(bp, SSB_PCI_TRANS_2);
190
val |= SSB_PCI_PREF | SSB_PCI_BURST;
191
bw32(bp, SSB_PCI_TRANS_2, val);
193
pci_write_config_dword(bp->pci, SSB_BAR0_WIN, bar_orig);
199
static void ssb_core_disable(struct b44_private *bp)
201
if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
204
bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
205
b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
206
b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
208
bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
210
bflush(bp, B44_SBTMSLOW, 1);
212
bw32(bp, B44_SBTMSLOW, SSB_CORE_DOWN);
213
bflush(bp, B44_SBTMSLOW, 1);
217
static void ssb_core_reset(struct b44_private *bp)
220
const u32 mask = (SBTMSLOW_CLOCK | SBTMSLOW_FGC | SBTMSLOW_RESET);
222
ssb_core_disable(bp);
224
bw32(bp, B44_SBTMSLOW, mask);
225
bflush(bp, B44_SBTMSLOW, 1);
227
/* Clear SERR if set, this is a hw bug workaround. */
228
if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
229
bw32(bp, B44_SBTMSHIGH, 0);
231
val = br32(bp, B44_SBIMSTATE);
232
if (val & (SBIMSTATE_BAD)) {
233
bw32(bp, B44_SBIMSTATE, val & ~SBIMSTATE_BAD);
236
bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
237
bflush(bp, B44_SBTMSLOW, 1);
239
bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
240
bflush(bp, B44_SBTMSLOW, 1);
245
* Driver helper functions
249
* Chip reset provides power to the b44 MAC & PCI cores, which
250
* is necessary for MAC register access. We only do a partial
251
* reset in case of transmit/receive errors (ISTAT_ERRORS) to
252
* avoid the chip being hung for an unnecessary long time in
255
* Called-by: b44_close, b44_halt, b44_inithw(b44_open), b44_probe
257
static void b44_chip_reset(struct b44_private *bp, int reset_kind)
259
if (ssb_is_core_up(bp)) {
260
bw32(bp, B44_RCV_LAZY, 0);
262
bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
264
b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
266
bw32(bp, B44_DMATX_CTRL, 0);
268
bp->tx_dirty = bp->tx_cur = 0;
270
if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK)
271
b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
274
bw32(bp, B44_DMARX_CTRL, 0);
278
ssb_pci_setup(bp, SBINTVEC_ENET0);
283
/* Don't enable PHY if we are only doing a partial reset. */
284
if (reset_kind == B44_CHIP_RESET_PARTIAL)
287
/* Make PHY accessible. */
288
bw32(bp, B44_MDIO_CTRL,
289
(MDIO_CTRL_PREAMBLE | (0x0d & MDIO_CTRL_MAXF_MASK)));
290
bflush(bp, B44_MDIO_CTRL, 1);
292
/* Enable internal or external PHY */
293
if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
294
bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
295
bflush(bp, B44_ENET_CTRL, 1);
297
u32 val = br32(bp, B44_DEVCTRL);
298
if (val & DEVCTRL_EPR) {
299
bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
300
bflush(bp, B44_DEVCTRL, 100);
307
* called by b44_poll in the error path
309
static void b44_halt(struct b44_private *bp)
312
bw32(bp, B44_IMASK, 0);
313
bflush(bp, B44_IMASK, 1);
315
DBG("b44: powering down PHY\n");
316
bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
319
* Now reset the chip, but without enabling
320
* the MAC&PHY part of it.
321
* This has to be done _after_ we shut down the PHY
323
b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
329
* Called at device open time to get the chip ready for
332
* Called-by: b44_open
334
static void b44_init_hw(struct b44_private *bp, int reset_kind)
337
#define CTRL_MASK (DMARX_CTRL_ENABLE | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))
339
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
340
if (reset_kind == B44_FULL_RESET) {
344
/* Enable CRC32, set proper LED modes and power on PHY */
345
bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
346
bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
348
/* This sets the MAC address too. */
349
b44_set_rx_mode(bp->netdev);
351
/* MTU + eth header + possible VLAN tag + struct rx_header */
352
bw32(bp, B44_RXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
353
bw32(bp, B44_TXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
355
bw32(bp, B44_TX_HIWMARK, TX_HIWMARK_DEFLT);
356
if (reset_kind == B44_PARTIAL_RESET) {
357
bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
359
bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
360
bw32(bp, B44_DMATX_ADDR, VIRT_TO_B44(bp->tx));
362
bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
363
bw32(bp, B44_DMARX_ADDR, VIRT_TO_B44(bp->rx));
364
bw32(bp, B44_DMARX_PTR, B44_RX_RING_LEN_BYTES);
366
bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
369
val = br32(bp, B44_ENET_CTRL);
370
bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
375
/*** Management of ring descriptors ***/
378
static void b44_populate_rx_descriptor(struct b44_private *bp, u32 idx)
380
struct rx_header *rh;
383
rh = bp->rx_iobuf[idx]->data;
386
ctrl = DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET);
387
if (idx == B44_RING_LAST) {
388
ctrl |= DESC_CTRL_EOT;
390
addr = VIRT_TO_B44(bp->rx_iobuf[idx]->data);
392
bp->rx[idx].ctrl = cpu_to_le32(ctrl);
393
bp->rx[idx].addr = cpu_to_le32(addr);
394
bw32(bp, B44_DMARX_PTR, idx * sizeof(struct dma_desc));
399
* Refill RX ring descriptors with buffers. This is needed
400
* because during rx we are passing ownership of descriptor
401
* buffers to the network stack.
403
static void b44_rx_refill(struct b44_private *bp, u32 pending)
405
struct io_buffer *iobuf;
409
for (i = pending + 1; i != bp->rx_cur; i = ring_next(i)) {
410
if (bp->rx_iobuf[i] != NULL)
413
iobuf = alloc_iob(RX_PKT_BUF_SZ);
415
DBG("Refill rx ring failed!!\n");
418
if (!b44_address_ok(iobuf->data)) {
419
DBG("Refill rx ring bad address!!\n");
423
bp->rx_iobuf[i] = iobuf;
425
b44_populate_rx_descriptor(bp, i);
430
static void b44_free_rx_ring(struct b44_private *bp)
435
for (i = 0; i < B44_RING_SIZE; i++) {
436
free_iob(bp->rx_iobuf[i]);
437
bp->rx_iobuf[i] = NULL;
439
free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
445
static int b44_init_rx_ring(struct b44_private *bp)
447
b44_free_rx_ring(bp);
449
bp->rx = malloc_dma(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
452
if (!b44_address_ok(bp->rx)) {
453
free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
457
memset(bp->rx_iobuf, 0, sizeof(bp->rx_iobuf));
459
bp->rx_iobuf[0] = alloc_iob(RX_PKT_BUF_SZ);
460
b44_populate_rx_descriptor(bp, 0);
461
b44_rx_refill(bp, 0);
463
DBG("Init RX rings: rx=0x%08lx\n", VIRT_TO_B44(bp->rx));
468
static void b44_free_tx_ring(struct b44_private *bp)
471
free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
477
static int b44_init_tx_ring(struct b44_private *bp)
479
b44_free_tx_ring(bp);
481
bp->tx = malloc_dma(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
484
if (!b44_address_ok(bp->tx)) {
485
free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
489
memset(bp->tx, 0, B44_TX_RING_LEN_BYTES);
490
memset(bp->tx_iobuf, 0, sizeof(bp->tx_iobuf));
492
DBG("Init TX rings: tx=0x%08lx\n", VIRT_TO_B44(bp->tx));
497
/*** Interaction with the PHY ***/
500
static int b44_phy_read(struct b44_private *bp, int reg, u32 * val)
504
u32 arg1 = (MDIO_OP_READ << MDIO_DATA_OP_SHIFT);
505
u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
506
u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
507
u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
508
u32 argv = arg1 | arg2 | arg3 | arg4;
510
bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
511
bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
512
err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
513
*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
519
static int b44_phy_write(struct b44_private *bp, int reg, u32 val)
521
u32 arg1 = (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT);
522
u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
523
u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
524
u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
525
u32 arg5 = (val & MDIO_DATA_DATA);
526
u32 argv = arg1 | arg2 | arg3 | arg4 | arg5;
529
bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
530
bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
531
return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
535
static int b44_phy_reset(struct b44_private *bp)
540
err = b44_phy_write(bp, MII_BMCR, BMCR_RESET);
545
err = b44_phy_read(bp, MII_BMCR, &val);
547
if (val & BMCR_RESET) {
557
* The BCM44xx CAM (Content Addressable Memory) stores the MAC
560
static void b44_cam_write(struct b44_private *bp, unsigned char *data,
565
val = ((u32) data[2]) << 24;
566
val |= ((u32) data[3]) << 16;
567
val |= ((u32) data[4]) << 8;
568
val |= ((u32) data[5]) << 0;
569
bw32(bp, B44_CAM_DATA_LO, val);
572
val = (CAM_DATA_HI_VALID |
573
(((u32) data[0]) << 8) | (((u32) data[1]) << 0));
575
bw32(bp, B44_CAM_DATA_HI, val);
577
val = CAM_CTRL_WRITE | (index << CAM_CTRL_INDEX_SHIFT);
578
bw32(bp, B44_CAM_CTRL, val);
580
b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
584
static void b44_set_mac_addr(struct b44_private *bp)
587
bw32(bp, B44_CAM_CTRL, 0);
588
b44_cam_write(bp, bp->netdev->ll_addr, 0);
589
val = br32(bp, B44_CAM_CTRL);
590
bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
594
/* Read 128-bytes of EEPROM. */
595
static void b44_read_eeprom(struct b44_private *bp, u8 * data)
598
u16 *ptr = (u16 *) data;
600
for (i = 0; i < 128; i += 2)
601
ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
605
static void b44_load_mac_and_phy_addr(struct b44_private *bp)
609
/* Load MAC address, note byteswapping */
610
b44_read_eeprom(bp, &eeprom[0]);
611
bp->netdev->hw_addr[0] = eeprom[79];
612
bp->netdev->hw_addr[1] = eeprom[78];
613
bp->netdev->hw_addr[2] = eeprom[81];
614
bp->netdev->hw_addr[3] = eeprom[80];
615
bp->netdev->hw_addr[4] = eeprom[83];
616
bp->netdev->hw_addr[5] = eeprom[82];
618
/* Load PHY address */
619
bp->phy_addr = eeprom[90] & 0x1f;
623
static void b44_set_rx_mode(struct net_device *netdev)
625
struct b44_private *bp = netdev_priv(netdev);
626
unsigned char zero[6] = { 0, 0, 0, 0, 0, 0 };
630
val = br32(bp, B44_RXCONFIG);
631
val &= ~RXCONFIG_PROMISC;
632
val |= RXCONFIG_ALLMULTI;
634
b44_set_mac_addr(bp);
636
for (i = 1; i < 64; i++)
637
b44_cam_write(bp, zero, i);
639
bw32(bp, B44_RXCONFIG, val);
640
val = br32(bp, B44_CAM_CTRL);
641
bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
645
/*** Implementation of iPXE driver callbacks ***/
651
* @v id Matching entry in ID table
652
* @ret rc Return status code
654
static int b44_probe(struct pci_device *pci)
656
struct net_device *netdev;
657
struct b44_private *bp;
661
netdev = alloc_etherdev(sizeof(*bp));
665
netdev_init(netdev, &b44_operations);
666
pci_set_drvdata(pci, netdev);
667
netdev->dev = &pci->dev;
669
/* Set up private data */
670
bp = netdev_priv(netdev);
671
memset(bp, 0, sizeof(*bp));
675
/* Map device registers */
676
bp->regs = ioremap(pci->membase, B44_REGS_SIZE);
682
/* Enable PCI bus mastering */
683
adjust_pci_device(pci);
685
b44_load_mac_and_phy_addr(bp);
687
rc = register_netdev(netdev);
694
/* Link management currently not implemented */
695
netdev_link_up(netdev);
697
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
699
DBG("b44 %s (%04x:%04x) regs=%p MAC=%s\n", pci->id->name,
700
pci->id->vendor, pci->id->device, bp->regs,
701
eth_ntoa(netdev->ll_addr));
712
static void b44_remove(struct pci_device *pci)
714
struct net_device *netdev = pci_get_drvdata(pci);
715
struct b44_private *bp = netdev_priv(netdev);
717
ssb_core_disable(bp);
718
unregister_netdev(netdev);
720
netdev_nullify(netdev);
725
/** Enable or disable interrupts
727
* @v netdev Network device
728
* @v enable Interrupts should be enabled
730
static void b44_irq(struct net_device *netdev, int enable)
732
struct b44_private *bp = netdev_priv(netdev);
734
/* Interrupt mask specifies which events generate interrupts */
735
bw32(bp, B44_IMASK, enable ? IMASK_DEF : IMASK_DISABLE);
739
/** Open network device
741
* @v netdev Network device
742
* @ret rc Return status code
744
static int b44_open(struct net_device *netdev)
746
struct b44_private *bp = netdev_priv(netdev);
749
rc = b44_init_tx_ring(bp);
753
rc = b44_init_rx_ring(bp);
757
b44_init_hw(bp, B44_FULL_RESET);
759
/* Disable interrupts */
766
/** Close network device
768
* @v netdev Network device
770
static void b44_close(struct net_device *netdev)
772
struct b44_private *bp = netdev_priv(netdev);
774
b44_chip_reset(bp, B44_FULL_RESET);
775
b44_free_tx_ring(bp);
776
b44_free_rx_ring(bp);
782
* @v netdev Network device
783
* @v iobuf I/O buffer
784
* @ret rc Return status code
786
static int b44_transmit(struct net_device *netdev, struct io_buffer *iobuf)
788
struct b44_private *bp = netdev_priv(netdev);
789
u32 cur = bp->tx_cur;
792
/* Check for TX ring overflow */
793
if (bp->tx[cur].ctrl) {
794
DBG("tx overflow\n");
798
/* Check for addressability */
799
if (!b44_address_ok(iobuf->data))
802
/* Will call netdev_tx_complete() on the iobuf later */
803
bp->tx_iobuf[cur] = iobuf;
805
/* Set up TX descriptor */
806
ctrl = (iob_len(iobuf) & DESC_CTRL_LEN) |
807
DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
809
if (cur == B44_RING_LAST)
810
ctrl |= DESC_CTRL_EOT;
812
bp->tx[cur].ctrl = cpu_to_le32(ctrl);
813
bp->tx[cur].addr = cpu_to_le32(VIRT_TO_B44(iobuf->data));
815
/* Update next available descriptor index */
816
cur = ring_next(cur);
820
/* Tell card that a new TX descriptor is ready */
821
bw32(bp, B44_DMATX_PTR, cur * sizeof(struct dma_desc));
826
/** Recycles sent TX descriptors and notifies network stack
830
static void b44_tx_complete(struct b44_private *bp)
834
cur = pending_tx_index(bp);
836
for (i = bp->tx_dirty; i != cur; i = ring_next(i)) {
837
/* Free finished frame */
838
netdev_tx_complete(bp->netdev, bp->tx_iobuf[i]);
839
bp->tx_iobuf[i] = NULL;
841
/* Clear TX descriptor */
849
static void b44_process_rx_packets(struct b44_private *bp)
851
struct io_buffer *iob; /* received data */
852
struct rx_header *rh;
856
pending = pending_rx_index(bp);
858
for (i = bp->rx_cur; i != pending; i = ring_next(i)) {
859
iob = bp->rx_iobuf[i];
864
len = le16_to_cpu(rh->len);
867
* Guard against incompletely written RX descriptors.
868
* Without this, things can get really slow!
873
/* Discard CRC that is generated by the card */
876
/* Check for invalid packets and errors */
877
if (len > RX_PKT_BUF_SZ - RX_PKT_OFFSET ||
878
(rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
879
DBG("rx error len=%d flags=%04x\n", len,
880
cpu_to_le16(rh->flags));
883
netdev_rx_err(bp->netdev, iob, -EINVAL);
887
/* Clear RX descriptor */
890
bp->rx_iobuf[i] = NULL;
892
/* Hand off the IO buffer to the network stack */
893
iob_reserve(iob, RX_PKT_OFFSET);
895
netdev_rx(bp->netdev, iob);
898
b44_rx_refill(bp, pending_rx_index(bp));
902
/** Poll for completed and received packets
904
* @v netdev Network device
906
static void b44_poll(struct net_device *netdev)
908
struct b44_private *bp = netdev_priv(netdev);
911
/* Interrupt status */
912
istat = br32(bp, B44_ISTAT);
913
istat &= IMASK_DEF; /* only the events we care about */
917
if (istat & ISTAT_TX)
919
if (istat & ISTAT_RX)
920
b44_process_rx_packets(bp);
921
if (istat & ISTAT_ERRORS) {
922
DBG("b44 error istat=0x%08x\n", istat);
924
/* Reset B44 core partially to avoid long waits */
925
b44_irq(bp->netdev, 0);
927
b44_init_tx_ring(bp);
928
b44_init_rx_ring(bp);
929
b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
932
/* Acknowledge interrupt */
933
bw32(bp, B44_ISTAT, 0);
934
bflush(bp, B44_ISTAT, 1);
938
static struct net_device_operations b44_operations = {
941
.transmit = b44_transmit,
947
static struct pci_device_id b44_nics[] = {
948
PCI_ROM(0x14e4, 0x4401, "BCM4401", "BCM4401", 0),
949
PCI_ROM(0x14e4, 0x170c, "BCM4401-B0", "BCM4401-B0", 0),
950
PCI_ROM(0x14e4, 0x4402, "BCM4401-B1", "BCM4401-B1", 0),
954
struct pci_driver b44_driver __pci_driver = {
956
.id_count = sizeof b44_nics / sizeof b44_nics[0],
958
.remove = b44_remove,