1
/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
3
* Copyright (C) 2002 David S. Miller (davem@redhat.com)
4
* Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5
* Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6
* Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7
* Copyright (C) 2006 Broadcom Corporation.
8
* Copyright (C) 2007 Michael Buesch <m@bues.ch>
10
* Distribute under GPL.
13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
#include <linux/kernel.h>
16
#include <linux/module.h>
17
#include <linux/moduleparam.h>
18
#include <linux/types.h>
19
#include <linux/netdevice.h>
20
#include <linux/ethtool.h>
21
#include <linux/mii.h>
22
#include <linux/if_ether.h>
23
#include <linux/if_vlan.h>
24
#include <linux/etherdevice.h>
25
#include <linux/pci.h>
26
#include <linux/delay.h>
27
#include <linux/init.h>
28
#include <linux/interrupt.h>
29
#include <linux/dma-mapping.h>
30
#include <linux/ssb/ssb.h>
31
#include <linux/slab.h>
33
#include <asm/uaccess.h>
40
#define DRV_MODULE_NAME "b44"
41
#define DRV_MODULE_VERSION "2.0"
42
#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
44
#define B44_DEF_MSG_ENABLE \
54
/* length of time before we decide the hardware is borked,
55
* and dev->tx_timeout() should be called to fix the problem
57
#define B44_TX_TIMEOUT (5 * HZ)
59
/* hardware minimum and maximum for a single frame's data payload */
60
#define B44_MIN_MTU 60
61
#define B44_MAX_MTU 1500
63
#define B44_RX_RING_SIZE 512
64
#define B44_DEF_RX_RING_PENDING 200
65
#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
67
#define B44_TX_RING_SIZE 512
68
#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
69
#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
72
#define TX_RING_GAP(BP) \
73
(B44_TX_RING_SIZE - (BP)->tx_pending)
74
#define TX_BUFFS_AVAIL(BP) \
75
(((BP)->tx_cons <= (BP)->tx_prod) ? \
76
(BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
77
(BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
78
#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
80
#define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
81
#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
83
/* minimum number of free TX descriptors required to wake up TX process */
84
#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
86
/* b44 internal pattern match filter info */
87
#define B44_PATTERN_BASE 0x400
88
#define B44_PATTERN_SIZE 0x80
89
#define B44_PMASK_BASE 0x600
90
#define B44_PMASK_SIZE 0x10
91
#define B44_MAX_PATTERNS 16
92
#define B44_ETHIPV6UDP_HLEN 62
93
#define B44_ETHIPV4UDP_HLEN 42
95
MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
96
MODULE_DESCRIPTION(DRV_DESCRIPTION);
97
MODULE_LICENSE("GPL");
98
MODULE_VERSION(DRV_MODULE_VERSION);
100
static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
101
module_param(b44_debug, int, 0);
102
MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
105
#ifdef CONFIG_B44_PCI
106
static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
107
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110
{ 0 } /* terminate list with empty entry */
112
MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
114
static struct pci_driver b44_pci_driver = {
115
.name = DRV_MODULE_NAME,
116
.id_table = b44_pci_tbl,
118
#endif /* CONFIG_B44_PCI */
120
static const struct ssb_device_id b44_ssb_tbl[] = {
121
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
124
MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
126
static void b44_halt(struct b44 *);
127
static void b44_init_rings(struct b44 *);
129
#define B44_FULL_RESET 1
130
#define B44_FULL_RESET_SKIP_PHY 2
131
#define B44_PARTIAL_RESET 3
132
#define B44_CHIP_RESET_FULL 4
133
#define B44_CHIP_RESET_PARTIAL 5
135
static void b44_init_hw(struct b44 *, int);
137
static int dma_desc_sync_size;
140
static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141
#define _B44(x...) # x,
146
static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
148
unsigned long offset,
149
enum dma_data_direction dir)
151
dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152
dma_desc_sync_size, dir);
155
static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
157
unsigned long offset,
158
enum dma_data_direction dir)
160
dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161
dma_desc_sync_size, dir);
164
static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
166
return ssb_read32(bp->sdev, reg);
169
static inline void bw32(const struct b44 *bp,
170
unsigned long reg, unsigned long val)
172
ssb_write32(bp->sdev, reg, val);
175
static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176
u32 bit, unsigned long timeout, const int clear)
180
for (i = 0; i < timeout; i++) {
181
u32 val = br32(bp, reg);
183
if (clear && !(val & bit))
185
if (!clear && (val & bit))
191
netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
192
bit, reg, clear ? "clear" : "set");
199
static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
203
bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
204
(index << CAM_CTRL_INDEX_SHIFT)));
206
b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
208
val = br32(bp, B44_CAM_DATA_LO);
210
data[2] = (val >> 24) & 0xFF;
211
data[3] = (val >> 16) & 0xFF;
212
data[4] = (val >> 8) & 0xFF;
213
data[5] = (val >> 0) & 0xFF;
215
val = br32(bp, B44_CAM_DATA_HI);
217
data[0] = (val >> 8) & 0xFF;
218
data[1] = (val >> 0) & 0xFF;
221
static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
225
val = ((u32) data[2]) << 24;
226
val |= ((u32) data[3]) << 16;
227
val |= ((u32) data[4]) << 8;
228
val |= ((u32) data[5]) << 0;
229
bw32(bp, B44_CAM_DATA_LO, val);
230
val = (CAM_DATA_HI_VALID |
231
(((u32) data[0]) << 8) |
232
(((u32) data[1]) << 0));
233
bw32(bp, B44_CAM_DATA_HI, val);
234
bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
235
(index << CAM_CTRL_INDEX_SHIFT)));
236
b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
239
static inline void __b44_disable_ints(struct b44 *bp)
241
bw32(bp, B44_IMASK, 0);
244
static void b44_disable_ints(struct b44 *bp)
246
__b44_disable_ints(bp);
248
/* Flush posted writes. */
252
static void b44_enable_ints(struct b44 *bp)
254
bw32(bp, B44_IMASK, bp->imask);
257
static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
261
bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
262
bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
263
(MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
264
(phy_addr << MDIO_DATA_PMD_SHIFT) |
265
(reg << MDIO_DATA_RA_SHIFT) |
266
(MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
267
err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
268
*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
273
static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
275
bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
276
bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
277
(MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
278
(phy_addr << MDIO_DATA_PMD_SHIFT) |
279
(reg << MDIO_DATA_RA_SHIFT) |
280
(MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
281
(val & MDIO_DATA_DATA)));
282
return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
285
static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
287
if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
290
return __b44_readphy(bp, bp->phy_addr, reg, val);
293
static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
295
if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
298
return __b44_writephy(bp, bp->phy_addr, reg, val);
301
/* miilib interface */
302
static int b44_mii_read(struct net_device *dev, int phy_id, int location)
305
struct b44 *bp = netdev_priv(dev);
306
int rc = __b44_readphy(bp, phy_id, location, &val);
312
static void b44_mii_write(struct net_device *dev, int phy_id, int location,
315
struct b44 *bp = netdev_priv(dev);
316
__b44_writephy(bp, phy_id, location, val);
319
static int b44_phy_reset(struct b44 *bp)
324
if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
326
err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
330
err = b44_readphy(bp, MII_BMCR, &val);
332
if (val & BMCR_RESET) {
333
netdev_err(bp->dev, "PHY Reset would not complete\n");
341
static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
345
bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
346
bp->flags |= pause_flags;
348
val = br32(bp, B44_RXCONFIG);
349
if (pause_flags & B44_FLAG_RX_PAUSE)
350
val |= RXCONFIG_FLOW;
352
val &= ~RXCONFIG_FLOW;
353
bw32(bp, B44_RXCONFIG, val);
355
val = br32(bp, B44_MAC_FLOW);
356
if (pause_flags & B44_FLAG_TX_PAUSE)
357
val |= (MAC_FLOW_PAUSE_ENAB |
358
(0xc0 & MAC_FLOW_RX_HI_WATER));
360
val &= ~MAC_FLOW_PAUSE_ENAB;
361
bw32(bp, B44_MAC_FLOW, val);
364
static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
368
/* The driver supports only rx pause by default because
369
the b44 mac tx pause mechanism generates excessive
371
Use ethtool to turn on b44 tx pause if necessary.
373
if ((local & ADVERTISE_PAUSE_CAP) &&
374
(local & ADVERTISE_PAUSE_ASYM)){
375
if ((remote & LPA_PAUSE_ASYM) &&
376
!(remote & LPA_PAUSE_CAP))
377
pause_enab |= B44_FLAG_RX_PAUSE;
380
__b44_set_flow_ctrl(bp, pause_enab);
383
#ifdef CONFIG_BCM47XX
384
#include <asm/mach-bcm47xx/nvram.h>
385
static void b44_wap54g10_workaround(struct b44 *bp)
392
* workaround for bad hardware design in Linksys WAP54G v1.0
393
* see https://dev.openwrt.org/ticket/146
394
* check and reset bit "isolate"
396
if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
398
if (simple_strtoul(buf, NULL, 0) == 2) {
399
err = __b44_readphy(bp, 0, MII_BMCR, &val);
402
if (!(val & BMCR_ISOLATE))
404
val &= ~BMCR_ISOLATE;
405
err = __b44_writephy(bp, 0, MII_BMCR, val);
411
pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
414
static inline void b44_wap54g10_workaround(struct b44 *bp)
419
static int b44_setup_phy(struct b44 *bp)
424
b44_wap54g10_workaround(bp);
426
if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
428
if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
430
if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
431
val & MII_ALEDCTRL_ALLMSK)) != 0)
433
if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
435
if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
436
val | MII_TLEDCTRL_ENABLE)) != 0)
439
if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
440
u32 adv = ADVERTISE_CSMA;
442
if (bp->flags & B44_FLAG_ADV_10HALF)
443
adv |= ADVERTISE_10HALF;
444
if (bp->flags & B44_FLAG_ADV_10FULL)
445
adv |= ADVERTISE_10FULL;
446
if (bp->flags & B44_FLAG_ADV_100HALF)
447
adv |= ADVERTISE_100HALF;
448
if (bp->flags & B44_FLAG_ADV_100FULL)
449
adv |= ADVERTISE_100FULL;
451
if (bp->flags & B44_FLAG_PAUSE_AUTO)
452
adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
454
if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
456
if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
457
BMCR_ANRESTART))) != 0)
462
if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
464
bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
465
if (bp->flags & B44_FLAG_100_BASE_T)
466
bmcr |= BMCR_SPEED100;
467
if (bp->flags & B44_FLAG_FULL_DUPLEX)
468
bmcr |= BMCR_FULLDPLX;
469
if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
472
/* Since we will not be negotiating there is no safe way
473
* to determine if the link partner supports flow control
474
* or not. So just disable it completely in this case.
476
b44_set_flow_ctrl(bp, 0, 0);
483
static void b44_stats_update(struct b44 *bp)
488
val = &bp->hw_stats.tx_good_octets;
489
for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
490
*val++ += br32(bp, reg);
496
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
497
*val++ += br32(bp, reg);
501
static void b44_link_report(struct b44 *bp)
503
if (!netif_carrier_ok(bp->dev)) {
504
netdev_info(bp->dev, "Link is down\n");
506
netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
507
(bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
508
(bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
510
netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
511
(bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
512
(bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
516
static void b44_check_phy(struct b44 *bp)
520
if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
521
bp->flags |= B44_FLAG_100_BASE_T;
522
bp->flags |= B44_FLAG_FULL_DUPLEX;
523
if (!netif_carrier_ok(bp->dev)) {
524
u32 val = br32(bp, B44_TX_CTRL);
525
val |= TX_CTRL_DUPLEX;
526
bw32(bp, B44_TX_CTRL, val);
527
netif_carrier_on(bp->dev);
533
if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
534
!b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
536
if (aux & MII_AUXCTRL_SPEED)
537
bp->flags |= B44_FLAG_100_BASE_T;
539
bp->flags &= ~B44_FLAG_100_BASE_T;
540
if (aux & MII_AUXCTRL_DUPLEX)
541
bp->flags |= B44_FLAG_FULL_DUPLEX;
543
bp->flags &= ~B44_FLAG_FULL_DUPLEX;
545
if (!netif_carrier_ok(bp->dev) &&
546
(bmsr & BMSR_LSTATUS)) {
547
u32 val = br32(bp, B44_TX_CTRL);
548
u32 local_adv, remote_adv;
550
if (bp->flags & B44_FLAG_FULL_DUPLEX)
551
val |= TX_CTRL_DUPLEX;
553
val &= ~TX_CTRL_DUPLEX;
554
bw32(bp, B44_TX_CTRL, val);
556
if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
557
!b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
558
!b44_readphy(bp, MII_LPA, &remote_adv))
559
b44_set_flow_ctrl(bp, local_adv, remote_adv);
562
netif_carrier_on(bp->dev);
564
} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
566
netif_carrier_off(bp->dev);
570
if (bmsr & BMSR_RFAULT)
571
netdev_warn(bp->dev, "Remote fault detected in PHY\n");
573
netdev_warn(bp->dev, "Jabber detected in PHY\n");
577
static void b44_timer(unsigned long __opaque)
579
struct b44 *bp = (struct b44 *) __opaque;
581
spin_lock_irq(&bp->lock);
585
b44_stats_update(bp);
587
spin_unlock_irq(&bp->lock);
589
mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
592
static void b44_tx(struct b44 *bp)
596
cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
597
cur /= sizeof(struct dma_desc);
599
/* XXX needs updating when NETIF_F_SG is supported */
600
for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
601
struct ring_info *rp = &bp->tx_buffers[cons];
602
struct sk_buff *skb = rp->skb;
606
dma_unmap_single(bp->sdev->dma_dev,
611
dev_kfree_skb_irq(skb);
615
if (netif_queue_stopped(bp->dev) &&
616
TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
617
netif_wake_queue(bp->dev);
619
bw32(bp, B44_GPTIMER, 0);
622
/* Works like this. This chip writes a 'struct rx_header" 30 bytes
623
* before the DMA address you give it. So we allocate 30 more bytes
624
* for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
625
* point the chip at 30 bytes past where the rx_header will go.
627
static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
630
struct ring_info *src_map, *map;
631
struct rx_header *rh;
639
src_map = &bp->rx_buffers[src_idx];
640
dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
641
map = &bp->rx_buffers[dest_idx];
642
skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
646
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
650
/* Hardware bug work-around, the chip is unable to do PCI DMA
651
to/from anything above 1GB :-( */
652
if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
653
mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
655
if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
656
dma_unmap_single(bp->sdev->dma_dev, mapping,
657
RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
658
dev_kfree_skb_any(skb);
659
skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
662
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
665
if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
666
mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
667
if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
668
dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
669
dev_kfree_skb_any(skb);
672
bp->force_copybreak = 1;
675
rh = (struct rx_header *) skb->data;
681
map->mapping = mapping;
686
ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
687
if (dest_idx == (B44_RX_RING_SIZE - 1))
688
ctrl |= DESC_CTRL_EOT;
690
dp = &bp->rx_ring[dest_idx];
691
dp->ctrl = cpu_to_le32(ctrl);
692
dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
694
if (bp->flags & B44_FLAG_RX_RING_HACK)
695
b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
696
dest_idx * sizeof(*dp),
699
return RX_PKT_BUF_SZ;
702
static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
704
struct dma_desc *src_desc, *dest_desc;
705
struct ring_info *src_map, *dest_map;
706
struct rx_header *rh;
710
dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
711
dest_desc = &bp->rx_ring[dest_idx];
712
dest_map = &bp->rx_buffers[dest_idx];
713
src_desc = &bp->rx_ring[src_idx];
714
src_map = &bp->rx_buffers[src_idx];
716
dest_map->skb = src_map->skb;
717
rh = (struct rx_header *) src_map->skb->data;
720
dest_map->mapping = src_map->mapping;
722
if (bp->flags & B44_FLAG_RX_RING_HACK)
723
b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
724
src_idx * sizeof(*src_desc),
727
ctrl = src_desc->ctrl;
728
if (dest_idx == (B44_RX_RING_SIZE - 1))
729
ctrl |= cpu_to_le32(DESC_CTRL_EOT);
731
ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
733
dest_desc->ctrl = ctrl;
734
dest_desc->addr = src_desc->addr;
738
if (bp->flags & B44_FLAG_RX_RING_HACK)
739
b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
740
dest_idx * sizeof(*dest_desc),
743
dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
748
static int b44_rx(struct b44 *bp, int budget)
754
prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
755
prod /= sizeof(struct dma_desc);
758
while (cons != prod && budget > 0) {
759
struct ring_info *rp = &bp->rx_buffers[cons];
760
struct sk_buff *skb = rp->skb;
761
dma_addr_t map = rp->mapping;
762
struct rx_header *rh;
765
dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
768
rh = (struct rx_header *) skb->data;
769
len = le16_to_cpu(rh->len);
770
if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
771
(rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
773
b44_recycle_rx(bp, cons, bp->rx_prod);
775
bp->dev->stats.rx_dropped++;
785
len = le16_to_cpu(rh->len);
786
} while (len == 0 && i++ < 5);
794
if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
796
skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
799
dma_unmap_single(bp->sdev->dma_dev, map,
800
skb_size, DMA_FROM_DEVICE);
801
/* Leave out rx_header */
802
skb_put(skb, len + RX_PKT_OFFSET);
803
skb_pull(skb, RX_PKT_OFFSET);
805
struct sk_buff *copy_skb;
807
b44_recycle_rx(bp, cons, bp->rx_prod);
808
copy_skb = netdev_alloc_skb(bp->dev, len + 2);
809
if (copy_skb == NULL)
810
goto drop_it_no_recycle;
812
skb_reserve(copy_skb, 2);
813
skb_put(copy_skb, len);
814
/* DMA sync done above, copy just the actual packet */
815
skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
816
copy_skb->data, len);
819
skb_checksum_none_assert(skb);
820
skb->protocol = eth_type_trans(skb, bp->dev);
821
netif_receive_skb(skb);
825
bp->rx_prod = (bp->rx_prod + 1) &
826
(B44_RX_RING_SIZE - 1);
827
cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
831
bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
836
static int b44_poll(struct napi_struct *napi, int budget)
838
struct b44 *bp = container_of(napi, struct b44, napi);
842
spin_lock_irqsave(&bp->lock, flags);
844
if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
845
/* spin_lock(&bp->tx_lock); */
847
/* spin_unlock(&bp->tx_lock); */
849
if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
850
bp->istat &= ~ISTAT_RFO;
851
b44_disable_ints(bp);
852
ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
854
b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
855
netif_wake_queue(bp->dev);
858
spin_unlock_irqrestore(&bp->lock, flags);
861
if (bp->istat & ISTAT_RX)
862
work_done += b44_rx(bp, budget);
864
if (bp->istat & ISTAT_ERRORS) {
865
spin_lock_irqsave(&bp->lock, flags);
868
b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
869
netif_wake_queue(bp->dev);
870
spin_unlock_irqrestore(&bp->lock, flags);
874
if (work_done < budget) {
882
static irqreturn_t b44_interrupt(int irq, void *dev_id)
884
struct net_device *dev = dev_id;
885
struct b44 *bp = netdev_priv(dev);
889
spin_lock(&bp->lock);
891
istat = br32(bp, B44_ISTAT);
892
imask = br32(bp, B44_IMASK);
894
/* The interrupt mask register controls which interrupt bits
895
* will actually raise an interrupt to the CPU when set by hw/firmware,
896
* but doesn't mask off the bits.
902
if (unlikely(!netif_running(dev))) {
903
netdev_info(dev, "late interrupt\n");
907
if (napi_schedule_prep(&bp->napi)) {
908
/* NOTE: These writes are posted by the readback of
909
* the ISTAT register below.
912
__b44_disable_ints(bp);
913
__napi_schedule(&bp->napi);
917
bw32(bp, B44_ISTAT, istat);
920
spin_unlock(&bp->lock);
921
return IRQ_RETVAL(handled);
924
static void b44_tx_timeout(struct net_device *dev)
926
struct b44 *bp = netdev_priv(dev);
928
netdev_err(dev, "transmit timed out, resetting\n");
930
spin_lock_irq(&bp->lock);
934
b44_init_hw(bp, B44_FULL_RESET);
936
spin_unlock_irq(&bp->lock);
940
netif_wake_queue(dev);
943
static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
945
struct b44 *bp = netdev_priv(dev);
946
int rc = NETDEV_TX_OK;
948
u32 len, entry, ctrl;
952
spin_lock_irqsave(&bp->lock, flags);
954
/* This is a hard error, log it. */
955
if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
956
netif_stop_queue(dev);
957
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
961
mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
962
if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
963
struct sk_buff *bounce_skb;
965
/* Chip can't handle DMA to/from >1GB, use bounce buffer */
966
if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
967
dma_unmap_single(bp->sdev->dma_dev, mapping, len,
970
bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
974
mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
976
if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
977
if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
978
dma_unmap_single(bp->sdev->dma_dev, mapping,
980
dev_kfree_skb_any(bounce_skb);
984
skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
985
dev_kfree_skb_any(skb);
990
bp->tx_buffers[entry].skb = skb;
991
bp->tx_buffers[entry].mapping = mapping;
993
ctrl = (len & DESC_CTRL_LEN);
994
ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
995
if (entry == (B44_TX_RING_SIZE - 1))
996
ctrl |= DESC_CTRL_EOT;
998
bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
999
bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1001
if (bp->flags & B44_FLAG_TX_RING_HACK)
1002
b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1003
entry * sizeof(bp->tx_ring[0]),
1006
entry = NEXT_TX(entry);
1008
bp->tx_prod = entry;
1012
bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1013
if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1014
bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1015
if (bp->flags & B44_FLAG_REORDER_BUG)
1016
br32(bp, B44_DMATX_PTR);
1018
if (TX_BUFFS_AVAIL(bp) < 1)
1019
netif_stop_queue(dev);
1022
spin_unlock_irqrestore(&bp->lock, flags);
1027
rc = NETDEV_TX_BUSY;
1031
static int b44_change_mtu(struct net_device *dev, int new_mtu)
1033
struct b44 *bp = netdev_priv(dev);
1035
if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1038
if (!netif_running(dev)) {
1039
/* We'll just catch it later when the
1046
spin_lock_irq(&bp->lock);
1050
b44_init_hw(bp, B44_FULL_RESET);
1051
spin_unlock_irq(&bp->lock);
1053
b44_enable_ints(bp);
1058
/* Free up pending packets in all rx/tx rings.
1060
* The chip has been shut down and the driver detached from
1061
* the networking, so no interrupts or new tx packets will
1062
* end up in the driver. bp->lock is not held and we are not
1063
* in an interrupt context and thus may sleep.
1065
static void b44_free_rings(struct b44 *bp)
1067
struct ring_info *rp;
1070
for (i = 0; i < B44_RX_RING_SIZE; i++) {
1071
rp = &bp->rx_buffers[i];
1073
if (rp->skb == NULL)
1075
dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1077
dev_kfree_skb_any(rp->skb);
1081
/* XXX needs changes once NETIF_F_SG is set... */
1082
for (i = 0; i < B44_TX_RING_SIZE; i++) {
1083
rp = &bp->tx_buffers[i];
1085
if (rp->skb == NULL)
1087
dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1089
dev_kfree_skb_any(rp->skb);
1094
/* Initialize tx/rx rings for packet processing.
1096
* The chip has been shut down and the driver detached from
1097
* the networking, so no interrupts or new tx packets will
1098
* end up in the driver.
1100
static void b44_init_rings(struct b44 *bp)
1106
memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1107
memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1109
if (bp->flags & B44_FLAG_RX_RING_HACK)
1110
dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1111
DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1113
if (bp->flags & B44_FLAG_TX_RING_HACK)
1114
dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1115
DMA_TABLE_BYTES, DMA_TO_DEVICE);
1117
for (i = 0; i < bp->rx_pending; i++) {
1118
if (b44_alloc_rx_skb(bp, -1, i) < 0)
1124
* Must not be invoked with interrupt sources disabled and
1125
* the hardware shutdown down.
1127
static void b44_free_consistent(struct b44 *bp)
1129
kfree(bp->rx_buffers);
1130
bp->rx_buffers = NULL;
1131
kfree(bp->tx_buffers);
1132
bp->tx_buffers = NULL;
1134
if (bp->flags & B44_FLAG_RX_RING_HACK) {
1135
dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1136
DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1139
dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1140
bp->rx_ring, bp->rx_ring_dma);
1142
bp->flags &= ~B44_FLAG_RX_RING_HACK;
1145
if (bp->flags & B44_FLAG_TX_RING_HACK) {
1146
dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1147
DMA_TABLE_BYTES, DMA_TO_DEVICE);
1150
dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1151
bp->tx_ring, bp->tx_ring_dma);
1153
bp->flags &= ~B44_FLAG_TX_RING_HACK;
1158
* Must not be invoked with interrupt sources disabled and
1159
* the hardware shutdown down. Can sleep.
1161
static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1165
size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1166
bp->rx_buffers = kzalloc(size, gfp);
1167
if (!bp->rx_buffers)
1170
size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1171
bp->tx_buffers = kzalloc(size, gfp);
1172
if (!bp->tx_buffers)
1175
size = DMA_TABLE_BYTES;
1176
bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1177
&bp->rx_ring_dma, gfp);
1179
/* Allocation may have failed due to pci_alloc_consistent
1180
insisting on use of GFP_DMA, which is more restrictive
1181
than necessary... */
1182
struct dma_desc *rx_ring;
1183
dma_addr_t rx_ring_dma;
1185
rx_ring = kzalloc(size, gfp);
1189
rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1193
if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1194
rx_ring_dma + size > DMA_BIT_MASK(30)) {
1199
bp->rx_ring = rx_ring;
1200
bp->rx_ring_dma = rx_ring_dma;
1201
bp->flags |= B44_FLAG_RX_RING_HACK;
1204
bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1205
&bp->tx_ring_dma, gfp);
1207
/* Allocation may have failed due to ssb_dma_alloc_consistent
1208
insisting on use of GFP_DMA, which is more restrictive
1209
than necessary... */
1210
struct dma_desc *tx_ring;
1211
dma_addr_t tx_ring_dma;
1213
tx_ring = kzalloc(size, gfp);
1217
tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1221
if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1222
tx_ring_dma + size > DMA_BIT_MASK(30)) {
1227
bp->tx_ring = tx_ring;
1228
bp->tx_ring_dma = tx_ring_dma;
1229
bp->flags |= B44_FLAG_TX_RING_HACK;
1235
b44_free_consistent(bp);
1239
/* bp->lock is held. */
1240
static void b44_clear_stats(struct b44 *bp)
1244
bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1245
for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1247
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1251
/* bp->lock is held. */
1252
static void b44_chip_reset(struct b44 *bp, int reset_kind)
1254
struct ssb_device *sdev = bp->sdev;
1257
was_enabled = ssb_device_is_enabled(bp->sdev);
1259
ssb_device_enable(bp->sdev, 0);
1260
ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1263
bw32(bp, B44_RCV_LAZY, 0);
1264
bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1265
b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1266
bw32(bp, B44_DMATX_CTRL, 0);
1267
bp->tx_prod = bp->tx_cons = 0;
1268
if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1269
b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1272
bw32(bp, B44_DMARX_CTRL, 0);
1273
bp->rx_prod = bp->rx_cons = 0;
1276
b44_clear_stats(bp);
1279
* Don't enable PHY if we are doing a partial reset
1280
* we are probably going to power down
1282
if (reset_kind == B44_CHIP_RESET_PARTIAL)
1285
switch (sdev->bus->bustype) {
1286
case SSB_BUSTYPE_SSB:
1287
bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1288
(DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1290
& MDIO_CTRL_MAXF_MASK)));
1292
case SSB_BUSTYPE_PCI:
1293
bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1294
(0x0d & MDIO_CTRL_MAXF_MASK)));
1296
case SSB_BUSTYPE_PCMCIA:
1297
case SSB_BUSTYPE_SDIO:
1298
WARN_ON(1); /* A device with this bus does not exist. */
1302
br32(bp, B44_MDIO_CTRL);
1304
if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1305
bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1306
br32(bp, B44_ENET_CTRL);
1307
bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1309
u32 val = br32(bp, B44_DEVCTRL);
1311
if (val & DEVCTRL_EPR) {
1312
bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1313
br32(bp, B44_DEVCTRL);
1316
bp->flags |= B44_FLAG_INTERNAL_PHY;
1320
/* bp->lock is held. */
1321
static void b44_halt(struct b44 *bp)
1323
b44_disable_ints(bp);
1326
/* power down PHY */
1327
netdev_info(bp->dev, "powering down PHY\n");
1328
bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1329
/* now reset the chip, but without enabling the MAC&PHY
1330
* part of it. This has to be done _after_ we shut down the PHY */
1331
b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1334
/* bp->lock is held. */
1335
static void __b44_set_mac_addr(struct b44 *bp)
1337
bw32(bp, B44_CAM_CTRL, 0);
1338
if (!(bp->dev->flags & IFF_PROMISC)) {
1341
__b44_cam_write(bp, bp->dev->dev_addr, 0);
1342
val = br32(bp, B44_CAM_CTRL);
1343
bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1347
static int b44_set_mac_addr(struct net_device *dev, void *p)
1349
struct b44 *bp = netdev_priv(dev);
1350
struct sockaddr *addr = p;
1353
if (netif_running(dev))
1356
if (!is_valid_ether_addr(addr->sa_data))
1359
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1361
spin_lock_irq(&bp->lock);
1363
val = br32(bp, B44_RXCONFIG);
1364
if (!(val & RXCONFIG_CAM_ABSENT))
1365
__b44_set_mac_addr(bp);
1367
spin_unlock_irq(&bp->lock);
1372
/* Called at device open time to get the chip ready for
1373
* packet processing. Invoked with bp->lock held.
1375
static void __b44_set_rx_mode(struct net_device *);
1376
static void b44_init_hw(struct b44 *bp, int reset_kind)
1380
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1381
if (reset_kind == B44_FULL_RESET) {
1386
/* Enable CRC32, set proper LED modes and power on PHY */
1387
bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1388
bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1390
/* This sets the MAC address too. */
1391
__b44_set_rx_mode(bp->dev);
1393
/* MTU + eth header + possible VLAN tag + struct rx_header */
1394
bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1395
bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1397
bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1398
if (reset_kind == B44_PARTIAL_RESET) {
1399
bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1400
(RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1402
bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1403
bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1404
bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1405
(RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1406
bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1408
bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1409
bp->rx_prod = bp->rx_pending;
1411
bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1414
val = br32(bp, B44_ENET_CTRL);
1415
bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1418
static int b44_open(struct net_device *dev)
1420
struct b44 *bp = netdev_priv(dev);
1423
err = b44_alloc_consistent(bp, GFP_KERNEL);
1427
napi_enable(&bp->napi);
1430
b44_init_hw(bp, B44_FULL_RESET);
1434
err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1435
if (unlikely(err < 0)) {
1436
napi_disable(&bp->napi);
1437
b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1439
b44_free_consistent(bp);
1443
init_timer(&bp->timer);
1444
bp->timer.expires = jiffies + HZ;
1445
bp->timer.data = (unsigned long) bp;
1446
bp->timer.function = b44_timer;
1447
add_timer(&bp->timer);
1449
b44_enable_ints(bp);
1450
netif_start_queue(dev);
1455
#ifdef CONFIG_NET_POLL_CONTROLLER
1457
* Polling receive - used by netconsole and other diagnostic tools
1458
* to allow network i/o with interrupts disabled.
1460
static void b44_poll_controller(struct net_device *dev)
1462
disable_irq(dev->irq);
1463
b44_interrupt(dev->irq, dev);
1464
enable_irq(dev->irq);
1468
static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1471
u32 *pattern = (u32 *) pp;
1473
for (i = 0; i < bytes; i += sizeof(u32)) {
1474
bw32(bp, B44_FILT_ADDR, table_offset + i);
1475
bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1479
static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1482
int k, j, len = offset;
1483
int ethaddr_bytes = ETH_ALEN;
1485
memset(ppattern + offset, 0xff, magicsync);
1486
for (j = 0; j < magicsync; j++)
1487
set_bit(len++, (unsigned long *) pmask);
1489
for (j = 0; j < B44_MAX_PATTERNS; j++) {
1490
if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1491
ethaddr_bytes = ETH_ALEN;
1493
ethaddr_bytes = B44_PATTERN_SIZE - len;
1494
if (ethaddr_bytes <=0)
1496
for (k = 0; k< ethaddr_bytes; k++) {
1497
ppattern[offset + magicsync +
1498
(j * ETH_ALEN) + k] = macaddr[k];
1499
set_bit(len++, (unsigned long *) pmask);
1505
/* Setup magic packet patterns in the b44 WOL
1506
* pattern matching filter.
1508
static void b44_setup_pseudo_magicp(struct b44 *bp)
1512
int plen0, plen1, plen2;
1514
u8 pwol_mask[B44_PMASK_SIZE];
1516
pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1517
if (!pwol_pattern) {
1518
pr_err("Memory not available for WOL\n");
1522
/* Ipv4 magic packet pattern - pattern 0.*/
1523
memset(pwol_mask, 0, B44_PMASK_SIZE);
1524
plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1525
B44_ETHIPV4UDP_HLEN);
1527
bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1528
bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1530
/* Raw ethernet II magic packet pattern - pattern 1 */
1531
memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1532
memset(pwol_mask, 0, B44_PMASK_SIZE);
1533
plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1536
bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1537
B44_PATTERN_BASE + B44_PATTERN_SIZE);
1538
bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1539
B44_PMASK_BASE + B44_PMASK_SIZE);
1541
/* Ipv6 magic packet pattern - pattern 2 */
1542
memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1543
memset(pwol_mask, 0, B44_PMASK_SIZE);
1544
plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1545
B44_ETHIPV6UDP_HLEN);
1547
bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1548
B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1549
bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1550
B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1552
kfree(pwol_pattern);
1554
/* set these pattern's lengths: one less than each real length */
1555
val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1556
bw32(bp, B44_WKUP_LEN, val);
1558
/* enable wakeup pattern matching */
1559
val = br32(bp, B44_DEVCTRL);
1560
bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1564
#ifdef CONFIG_B44_PCI
1565
static void b44_setup_wol_pci(struct b44 *bp)
1569
if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1570
bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1571
pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1572
pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1576
static inline void b44_setup_wol_pci(struct b44 *bp) { }
1577
#endif /* CONFIG_B44_PCI */
1579
static void b44_setup_wol(struct b44 *bp)
1583
bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1585
if (bp->flags & B44_FLAG_B0_ANDLATER) {
1587
bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1589
val = bp->dev->dev_addr[2] << 24 |
1590
bp->dev->dev_addr[3] << 16 |
1591
bp->dev->dev_addr[4] << 8 |
1592
bp->dev->dev_addr[5];
1593
bw32(bp, B44_ADDR_LO, val);
1595
val = bp->dev->dev_addr[0] << 8 |
1596
bp->dev->dev_addr[1];
1597
bw32(bp, B44_ADDR_HI, val);
1599
val = br32(bp, B44_DEVCTRL);
1600
bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1603
b44_setup_pseudo_magicp(bp);
1605
b44_setup_wol_pci(bp);
1608
static int b44_close(struct net_device *dev)
1610
struct b44 *bp = netdev_priv(dev);
1612
netif_stop_queue(dev);
1614
napi_disable(&bp->napi);
1616
del_timer_sync(&bp->timer);
1618
spin_lock_irq(&bp->lock);
1622
netif_carrier_off(dev);
1624
spin_unlock_irq(&bp->lock);
1626
free_irq(dev->irq, dev);
1628
if (bp->flags & B44_FLAG_WOL_ENABLE) {
1629
b44_init_hw(bp, B44_PARTIAL_RESET);
1633
b44_free_consistent(bp);
1638
static struct net_device_stats *b44_get_stats(struct net_device *dev)
1640
struct b44 *bp = netdev_priv(dev);
1641
struct net_device_stats *nstat = &dev->stats;
1642
struct b44_hw_stats *hwstat = &bp->hw_stats;
1644
/* Convert HW stats into netdevice stats. */
1645
nstat->rx_packets = hwstat->rx_pkts;
1646
nstat->tx_packets = hwstat->tx_pkts;
1647
nstat->rx_bytes = hwstat->rx_octets;
1648
nstat->tx_bytes = hwstat->tx_octets;
1649
nstat->tx_errors = (hwstat->tx_jabber_pkts +
1650
hwstat->tx_oversize_pkts +
1651
hwstat->tx_underruns +
1652
hwstat->tx_excessive_cols +
1653
hwstat->tx_late_cols);
1654
nstat->multicast = hwstat->tx_multicast_pkts;
1655
nstat->collisions = hwstat->tx_total_cols;
1657
nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1658
hwstat->rx_undersize);
1659
nstat->rx_over_errors = hwstat->rx_missed_pkts;
1660
nstat->rx_frame_errors = hwstat->rx_align_errs;
1661
nstat->rx_crc_errors = hwstat->rx_crc_errs;
1662
nstat->rx_errors = (hwstat->rx_jabber_pkts +
1663
hwstat->rx_oversize_pkts +
1664
hwstat->rx_missed_pkts +
1665
hwstat->rx_crc_align_errs +
1666
hwstat->rx_undersize +
1667
hwstat->rx_crc_errs +
1668
hwstat->rx_align_errs +
1669
hwstat->rx_symbol_errs);
1671
nstat->tx_aborted_errors = hwstat->tx_underruns;
1673
/* Carrier lost counter seems to be broken for some devices */
1674
nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1680
static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1682
struct netdev_hw_addr *ha;
1685
num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1687
netdev_for_each_mc_addr(ha, dev) {
1690
__b44_cam_write(bp, ha->addr, i++ + 1);
1695
static void __b44_set_rx_mode(struct net_device *dev)
1697
struct b44 *bp = netdev_priv(dev);
1700
val = br32(bp, B44_RXCONFIG);
1701
val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1702
if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1703
val |= RXCONFIG_PROMISC;
1704
bw32(bp, B44_RXCONFIG, val);
1706
unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1709
__b44_set_mac_addr(bp);
1711
if ((dev->flags & IFF_ALLMULTI) ||
1712
(netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1713
val |= RXCONFIG_ALLMULTI;
1715
i = __b44_load_mcast(bp, dev);
1718
__b44_cam_write(bp, zero, i);
1720
bw32(bp, B44_RXCONFIG, val);
1721
val = br32(bp, B44_CAM_CTRL);
1722
bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1726
static void b44_set_rx_mode(struct net_device *dev)
1728
struct b44 *bp = netdev_priv(dev);
1730
spin_lock_irq(&bp->lock);
1731
__b44_set_rx_mode(dev);
1732
spin_unlock_irq(&bp->lock);
1735
static u32 b44_get_msglevel(struct net_device *dev)
1737
struct b44 *bp = netdev_priv(dev);
1738
return bp->msg_enable;
1741
static void b44_set_msglevel(struct net_device *dev, u32 value)
1743
struct b44 *bp = netdev_priv(dev);
1744
bp->msg_enable = value;
1747
static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1749
struct b44 *bp = netdev_priv(dev);
1750
struct ssb_bus *bus = bp->sdev->bus;
1752
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1753
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1754
switch (bus->bustype) {
1755
case SSB_BUSTYPE_PCI:
1756
strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1758
case SSB_BUSTYPE_SSB:
1759
strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1761
case SSB_BUSTYPE_PCMCIA:
1762
case SSB_BUSTYPE_SDIO:
1763
WARN_ON(1); /* A device with this bus does not exist. */
1768
static int b44_nway_reset(struct net_device *dev)
1770
struct b44 *bp = netdev_priv(dev);
1774
spin_lock_irq(&bp->lock);
1775
b44_readphy(bp, MII_BMCR, &bmcr);
1776
b44_readphy(bp, MII_BMCR, &bmcr);
1778
if (bmcr & BMCR_ANENABLE) {
1779
b44_writephy(bp, MII_BMCR,
1780
bmcr | BMCR_ANRESTART);
1783
spin_unlock_irq(&bp->lock);
1788
static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1790
struct b44 *bp = netdev_priv(dev);
1792
cmd->supported = (SUPPORTED_Autoneg);
1793
cmd->supported |= (SUPPORTED_100baseT_Half |
1794
SUPPORTED_100baseT_Full |
1795
SUPPORTED_10baseT_Half |
1796
SUPPORTED_10baseT_Full |
1799
cmd->advertising = 0;
1800
if (bp->flags & B44_FLAG_ADV_10HALF)
1801
cmd->advertising |= ADVERTISED_10baseT_Half;
1802
if (bp->flags & B44_FLAG_ADV_10FULL)
1803
cmd->advertising |= ADVERTISED_10baseT_Full;
1804
if (bp->flags & B44_FLAG_ADV_100HALF)
1805
cmd->advertising |= ADVERTISED_100baseT_Half;
1806
if (bp->flags & B44_FLAG_ADV_100FULL)
1807
cmd->advertising |= ADVERTISED_100baseT_Full;
1808
cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1809
ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
1810
SPEED_100 : SPEED_10));
1811
cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1812
DUPLEX_FULL : DUPLEX_HALF;
1814
cmd->phy_address = bp->phy_addr;
1815
cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1816
XCVR_INTERNAL : XCVR_EXTERNAL;
1817
cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1818
AUTONEG_DISABLE : AUTONEG_ENABLE;
1819
if (cmd->autoneg == AUTONEG_ENABLE)
1820
cmd->advertising |= ADVERTISED_Autoneg;
1821
if (!netif_running(dev)){
1822
ethtool_cmd_speed_set(cmd, 0);
1830
static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1832
struct b44 *bp = netdev_priv(dev);
1833
u32 speed = ethtool_cmd_speed(cmd);
1835
/* We do not support gigabit. */
1836
if (cmd->autoneg == AUTONEG_ENABLE) {
1837
if (cmd->advertising &
1838
(ADVERTISED_1000baseT_Half |
1839
ADVERTISED_1000baseT_Full))
1841
} else if ((speed != SPEED_100 &&
1842
speed != SPEED_10) ||
1843
(cmd->duplex != DUPLEX_HALF &&
1844
cmd->duplex != DUPLEX_FULL)) {
1848
spin_lock_irq(&bp->lock);
1850
if (cmd->autoneg == AUTONEG_ENABLE) {
1851
bp->flags &= ~(B44_FLAG_FORCE_LINK |
1852
B44_FLAG_100_BASE_T |
1853
B44_FLAG_FULL_DUPLEX |
1854
B44_FLAG_ADV_10HALF |
1855
B44_FLAG_ADV_10FULL |
1856
B44_FLAG_ADV_100HALF |
1857
B44_FLAG_ADV_100FULL);
1858
if (cmd->advertising == 0) {
1859
bp->flags |= (B44_FLAG_ADV_10HALF |
1860
B44_FLAG_ADV_10FULL |
1861
B44_FLAG_ADV_100HALF |
1862
B44_FLAG_ADV_100FULL);
1864
if (cmd->advertising & ADVERTISED_10baseT_Half)
1865
bp->flags |= B44_FLAG_ADV_10HALF;
1866
if (cmd->advertising & ADVERTISED_10baseT_Full)
1867
bp->flags |= B44_FLAG_ADV_10FULL;
1868
if (cmd->advertising & ADVERTISED_100baseT_Half)
1869
bp->flags |= B44_FLAG_ADV_100HALF;
1870
if (cmd->advertising & ADVERTISED_100baseT_Full)
1871
bp->flags |= B44_FLAG_ADV_100FULL;
1874
bp->flags |= B44_FLAG_FORCE_LINK;
1875
bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1876
if (speed == SPEED_100)
1877
bp->flags |= B44_FLAG_100_BASE_T;
1878
if (cmd->duplex == DUPLEX_FULL)
1879
bp->flags |= B44_FLAG_FULL_DUPLEX;
1882
if (netif_running(dev))
1885
spin_unlock_irq(&bp->lock);
1890
static void b44_get_ringparam(struct net_device *dev,
1891
struct ethtool_ringparam *ering)
1893
struct b44 *bp = netdev_priv(dev);
1895
ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1896
ering->rx_pending = bp->rx_pending;
1898
/* XXX ethtool lacks a tx_max_pending, oops... */
1901
static int b44_set_ringparam(struct net_device *dev,
1902
struct ethtool_ringparam *ering)
1904
struct b44 *bp = netdev_priv(dev);
1906
if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1907
(ering->rx_mini_pending != 0) ||
1908
(ering->rx_jumbo_pending != 0) ||
1909
(ering->tx_pending > B44_TX_RING_SIZE - 1))
1912
spin_lock_irq(&bp->lock);
1914
bp->rx_pending = ering->rx_pending;
1915
bp->tx_pending = ering->tx_pending;
1919
b44_init_hw(bp, B44_FULL_RESET);
1920
netif_wake_queue(bp->dev);
1921
spin_unlock_irq(&bp->lock);
1923
b44_enable_ints(bp);
1928
static void b44_get_pauseparam(struct net_device *dev,
1929
struct ethtool_pauseparam *epause)
1931
struct b44 *bp = netdev_priv(dev);
1934
(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1936
(bp->flags & B44_FLAG_RX_PAUSE) != 0;
1938
(bp->flags & B44_FLAG_TX_PAUSE) != 0;
1941
static int b44_set_pauseparam(struct net_device *dev,
1942
struct ethtool_pauseparam *epause)
1944
struct b44 *bp = netdev_priv(dev);
1946
spin_lock_irq(&bp->lock);
1947
if (epause->autoneg)
1948
bp->flags |= B44_FLAG_PAUSE_AUTO;
1950
bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1951
if (epause->rx_pause)
1952
bp->flags |= B44_FLAG_RX_PAUSE;
1954
bp->flags &= ~B44_FLAG_RX_PAUSE;
1955
if (epause->tx_pause)
1956
bp->flags |= B44_FLAG_TX_PAUSE;
1958
bp->flags &= ~B44_FLAG_TX_PAUSE;
1959
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1962
b44_init_hw(bp, B44_FULL_RESET);
1964
__b44_set_flow_ctrl(bp, bp->flags);
1966
spin_unlock_irq(&bp->lock);
1968
b44_enable_ints(bp);
1973
static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1977
memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1982
static int b44_get_sset_count(struct net_device *dev, int sset)
1986
return ARRAY_SIZE(b44_gstrings);
1992
static void b44_get_ethtool_stats(struct net_device *dev,
1993
struct ethtool_stats *stats, u64 *data)
1995
struct b44 *bp = netdev_priv(dev);
1996
u32 *val = &bp->hw_stats.tx_good_octets;
1999
spin_lock_irq(&bp->lock);
2001
b44_stats_update(bp);
2003
for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2006
spin_unlock_irq(&bp->lock);
2009
static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2011
struct b44 *bp = netdev_priv(dev);
2013
wol->supported = WAKE_MAGIC;
2014
if (bp->flags & B44_FLAG_WOL_ENABLE)
2015
wol->wolopts = WAKE_MAGIC;
2018
memset(&wol->sopass, 0, sizeof(wol->sopass));
2021
static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2023
struct b44 *bp = netdev_priv(dev);
2025
spin_lock_irq(&bp->lock);
2026
if (wol->wolopts & WAKE_MAGIC)
2027
bp->flags |= B44_FLAG_WOL_ENABLE;
2029
bp->flags &= ~B44_FLAG_WOL_ENABLE;
2030
spin_unlock_irq(&bp->lock);
2035
static const struct ethtool_ops b44_ethtool_ops = {
2036
.get_drvinfo = b44_get_drvinfo,
2037
.get_settings = b44_get_settings,
2038
.set_settings = b44_set_settings,
2039
.nway_reset = b44_nway_reset,
2040
.get_link = ethtool_op_get_link,
2041
.get_wol = b44_get_wol,
2042
.set_wol = b44_set_wol,
2043
.get_ringparam = b44_get_ringparam,
2044
.set_ringparam = b44_set_ringparam,
2045
.get_pauseparam = b44_get_pauseparam,
2046
.set_pauseparam = b44_set_pauseparam,
2047
.get_msglevel = b44_get_msglevel,
2048
.set_msglevel = b44_set_msglevel,
2049
.get_strings = b44_get_strings,
2050
.get_sset_count = b44_get_sset_count,
2051
.get_ethtool_stats = b44_get_ethtool_stats,
2054
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2056
struct mii_ioctl_data *data = if_mii(ifr);
2057
struct b44 *bp = netdev_priv(dev);
2060
if (!netif_running(dev))
2063
spin_lock_irq(&bp->lock);
2064
err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2065
spin_unlock_irq(&bp->lock);
2070
static int __devinit b44_get_invariants(struct b44 *bp)
2072
struct ssb_device *sdev = bp->sdev;
2076
bp->dma_offset = ssb_dma_translation(sdev);
2078
if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2080
addr = sdev->bus->sprom.et1mac;
2081
bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2083
addr = sdev->bus->sprom.et0mac;
2084
bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2086
/* Some ROMs have buggy PHY addresses with the high
2087
* bits set (sign extension?). Truncate them to a
2088
* valid PHY address. */
2089
bp->phy_addr &= 0x1F;
2091
memcpy(bp->dev->dev_addr, addr, 6);
2093
if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2094
pr_err("Invalid MAC address found in EEPROM\n");
2098
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2100
bp->imask = IMASK_DEF;
2102
/* XXX - really required?
2103
bp->flags |= B44_FLAG_BUGGY_TXPTR;
2106
if (bp->sdev->id.revision >= 7)
2107
bp->flags |= B44_FLAG_B0_ANDLATER;
2112
static const struct net_device_ops b44_netdev_ops = {
2113
.ndo_open = b44_open,
2114
.ndo_stop = b44_close,
2115
.ndo_start_xmit = b44_start_xmit,
2116
.ndo_get_stats = b44_get_stats,
2117
.ndo_set_rx_mode = b44_set_rx_mode,
2118
.ndo_set_mac_address = b44_set_mac_addr,
2119
.ndo_validate_addr = eth_validate_addr,
2120
.ndo_do_ioctl = b44_ioctl,
2121
.ndo_tx_timeout = b44_tx_timeout,
2122
.ndo_change_mtu = b44_change_mtu,
2123
#ifdef CONFIG_NET_POLL_CONTROLLER
2124
.ndo_poll_controller = b44_poll_controller,
2128
static int __devinit b44_init_one(struct ssb_device *sdev,
2129
const struct ssb_device_id *ent)
2131
struct net_device *dev;
2137
pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
2139
dev = alloc_etherdev(sizeof(*bp));
2141
dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
2146
SET_NETDEV_DEV(dev, sdev->dev);
2148
/* No interesting netdevice features in this card... */
2151
bp = netdev_priv(dev);
2154
bp->force_copybreak = 0;
2156
bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2158
spin_lock_init(&bp->lock);
2160
bp->rx_pending = B44_DEF_RX_RING_PENDING;
2161
bp->tx_pending = B44_DEF_TX_RING_PENDING;
2163
dev->netdev_ops = &b44_netdev_ops;
2164
netif_napi_add(dev, &bp->napi, b44_poll, 64);
2165
dev->watchdog_timeo = B44_TX_TIMEOUT;
2166
dev->irq = sdev->irq;
2167
SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2169
err = ssb_bus_powerup(sdev->bus, 0);
2172
"Failed to powerup the bus\n");
2173
goto err_out_free_dev;
2176
if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
2177
dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
2179
"Required 30BIT DMA mask unsupported by the system\n");
2180
goto err_out_powerdown;
2183
err = b44_get_invariants(bp);
2186
"Problem fetching invariants of chip, aborting\n");
2187
goto err_out_powerdown;
2190
bp->mii_if.dev = dev;
2191
bp->mii_if.mdio_read = b44_mii_read;
2192
bp->mii_if.mdio_write = b44_mii_write;
2193
bp->mii_if.phy_id = bp->phy_addr;
2194
bp->mii_if.phy_id_mask = 0x1f;
2195
bp->mii_if.reg_num_mask = 0x1f;
2197
/* By default, advertise all speed/duplex settings. */
2198
bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2199
B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2201
/* By default, auto-negotiate PAUSE. */
2202
bp->flags |= B44_FLAG_PAUSE_AUTO;
2204
err = register_netdev(dev);
2206
dev_err(sdev->dev, "Cannot register net device, aborting\n");
2207
goto err_out_powerdown;
2210
netif_carrier_off(dev);
2212
ssb_set_drvdata(sdev, dev);
2214
/* Chip reset provides power to the b44 MAC & PCI cores, which
2215
* is necessary for MAC register access.
2217
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2219
/* do a phy reset to test if there is an active phy */
2220
if (b44_phy_reset(bp) < 0)
2221
bp->phy_addr = B44_PHY_ADDR_NO_PHY;
2223
netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2228
ssb_bus_may_powerdown(sdev->bus);
2237
static void __devexit b44_remove_one(struct ssb_device *sdev)
2239
struct net_device *dev = ssb_get_drvdata(sdev);
2241
unregister_netdev(dev);
2242
ssb_device_disable(sdev, 0);
2243
ssb_bus_may_powerdown(sdev->bus);
2245
ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2246
ssb_set_drvdata(sdev, NULL);
2249
static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2251
struct net_device *dev = ssb_get_drvdata(sdev);
2252
struct b44 *bp = netdev_priv(dev);
2254
if (!netif_running(dev))
2257
del_timer_sync(&bp->timer);
2259
spin_lock_irq(&bp->lock);
2262
netif_carrier_off(bp->dev);
2263
netif_device_detach(bp->dev);
2266
spin_unlock_irq(&bp->lock);
2268
free_irq(dev->irq, dev);
2269
if (bp->flags & B44_FLAG_WOL_ENABLE) {
2270
b44_init_hw(bp, B44_PARTIAL_RESET);
2274
ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2278
static int b44_resume(struct ssb_device *sdev)
2280
struct net_device *dev = ssb_get_drvdata(sdev);
2281
struct b44 *bp = netdev_priv(dev);
2284
rc = ssb_bus_powerup(sdev->bus, 0);
2287
"Failed to powerup the bus\n");
2291
if (!netif_running(dev))
2294
spin_lock_irq(&bp->lock);
2296
b44_init_hw(bp, B44_FULL_RESET);
2297
spin_unlock_irq(&bp->lock);
2300
* As a shared interrupt, the handler can be called immediately. To be
2301
* able to check the interrupt status the hardware must already be
2302
* powered back on (b44_init_hw).
2304
rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2306
netdev_err(dev, "request_irq failed\n");
2307
spin_lock_irq(&bp->lock);
2310
spin_unlock_irq(&bp->lock);
2314
netif_device_attach(bp->dev);
2316
b44_enable_ints(bp);
2317
netif_wake_queue(dev);
2319
mod_timer(&bp->timer, jiffies + 1);
2324
static struct ssb_driver b44_ssb_driver = {
2325
.name = DRV_MODULE_NAME,
2326
.id_table = b44_ssb_tbl,
2327
.probe = b44_init_one,
2328
.remove = __devexit_p(b44_remove_one),
2329
.suspend = b44_suspend,
2330
.resume = b44_resume,
2333
static inline int __init b44_pci_init(void)
2336
#ifdef CONFIG_B44_PCI
2337
err = ssb_pcihost_register(&b44_pci_driver);
2342
static inline void __exit b44_pci_exit(void)
2344
#ifdef CONFIG_B44_PCI
2345
ssb_pcihost_unregister(&b44_pci_driver);
2349
static int __init b44_init(void)
2351
unsigned int dma_desc_align_size = dma_get_cache_alignment();
2354
/* Setup paramaters for syncing RX/TX DMA descriptors */
2355
dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2357
err = b44_pci_init();
2360
err = ssb_driver_register(&b44_ssb_driver);
2366
static void __exit b44_cleanup(void)
2368
ssb_driver_unregister(&b44_ssb_driver);
2372
module_init(b44_init);
2373
module_exit(b44_cleanup);