2
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
4
* Copyright (c) 2003 Intracom S.A.
5
* by Pantelis Antoniou <panto@intracom.gr>
7
* 2005 (c) MontaVista Software, Inc.
8
* Vitaly Bordug <vbordug@ru.mvista.com>
10
* Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11
* and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
13
* This file is licensed under the terms of the GNU General Public License
14
* version 2. This program is licensed "as is" without any warranty of any
15
* kind, whether express or implied.
18
#include <linux/module.h>
19
#include <linux/kernel.h>
20
#include <linux/types.h>
21
#include <linux/string.h>
22
#include <linux/ptrace.h>
23
#include <linux/errno.h>
24
#include <linux/ioport.h>
25
#include <linux/slab.h>
26
#include <linux/interrupt.h>
27
#include <linux/init.h>
28
#include <linux/delay.h>
29
#include <linux/netdevice.h>
30
#include <linux/etherdevice.h>
31
#include <linux/skbuff.h>
32
#include <linux/spinlock.h>
33
#include <linux/mii.h>
34
#include <linux/ethtool.h>
35
#include <linux/bitops.h>
37
#include <linux/platform_device.h>
38
#include <linux/phy.h>
40
#include <linux/of_mdio.h>
41
#include <linux/of_platform.h>
42
#include <linux/of_gpio.h>
43
#include <linux/of_net.h>
45
#include <linux/vmalloc.h>
46
#include <asm/pgtable.h>
48
#include <asm/uaccess.h>
52
/*************************************************/
54
MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
55
MODULE_DESCRIPTION("Freescale Ethernet Driver");
56
MODULE_LICENSE("GPL");
57
MODULE_VERSION(DRV_MODULE_VERSION);
59
static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
60
module_param(fs_enet_debug, int, 0);
61
MODULE_PARM_DESC(fs_enet_debug,
62
"Freescale bitmapped debugging message enable value");
64
#ifdef CONFIG_NET_POLL_CONTROLLER
65
static void fs_enet_netpoll(struct net_device *dev);
68
static void fs_set_multicast_list(struct net_device *dev)
70
struct fs_enet_private *fep = netdev_priv(dev);
72
(*fep->ops->set_multicast_list)(dev);
75
static void skb_align(struct sk_buff *skb, int align)
77
int off = ((unsigned long)skb->data) & (align - 1);
80
skb_reserve(skb, align - off);
83
/* NAPI receive function */
84
static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
86
struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
87
struct net_device *dev = fep->ndev;
88
const struct fs_platform_info *fpi = fep->fpi;
90
struct sk_buff *skb, *skbn, *skbt;
96
* First, grab all of the stats for the incoming packet.
97
* These get messed up if we get called due to a busy condition.
101
/* clear RX status bits for napi*/
102
(*fep->ops->napi_clear_rx_event)(dev);
104
while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
105
curidx = bdp - fep->rx_bd_base;
108
* Since we have allocated space to hold a complete frame,
109
* the last indicator should be set.
111
if ((sc & BD_ENET_RX_LAST) == 0)
112
dev_warn(fep->dev, "rcv is not +last\n");
117
if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
118
BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
119
fep->stats.rx_errors++;
120
/* Frame too long or too short. */
121
if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
122
fep->stats.rx_length_errors++;
123
/* Frame alignment */
124
if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
125
fep->stats.rx_frame_errors++;
127
if (sc & BD_ENET_RX_CR)
128
fep->stats.rx_crc_errors++;
130
if (sc & BD_ENET_RX_OV)
131
fep->stats.rx_crc_errors++;
133
skb = fep->rx_skbuff[curidx];
135
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
136
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
142
skb = fep->rx_skbuff[curidx];
144
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
145
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
149
* Process the incoming frame.
151
fep->stats.rx_packets++;
152
pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
153
fep->stats.rx_bytes += pkt_len + 4;
155
if (pkt_len <= fpi->rx_copybreak) {
156
/* +2 to make IP header L1 cache aligned */
157
skbn = dev_alloc_skb(pkt_len + 2);
159
skb_reserve(skbn, 2); /* align IP header */
160
skb_copy_from_linear_data(skb,
161
skbn->data, pkt_len);
168
skbn = dev_alloc_skb(ENET_RX_FRSIZE);
171
skb_align(skbn, ENET_RX_ALIGN);
175
skb_put(skb, pkt_len); /* Make room */
176
skb->protocol = eth_type_trans(skb, dev);
178
netif_receive_skb(skb);
181
"Memory squeeze, dropping packet.\n");
182
fep->stats.rx_dropped++;
187
fep->rx_skbuff[curidx] = skbn;
188
CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
189
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
192
CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
195
* Update BD pointer to next entry.
197
if ((sc & BD_ENET_RX_WRAP) == 0)
200
bdp = fep->rx_bd_base;
202
(*fep->ops->rx_bd_done)(dev);
204
if (received >= budget)
210
if (received < budget) {
213
(*fep->ops->napi_enable_rx)(dev);
218
/* non NAPI receive function */
219
static int fs_enet_rx_non_napi(struct net_device *dev)
221
struct fs_enet_private *fep = netdev_priv(dev);
222
const struct fs_platform_info *fpi = fep->fpi;
224
struct sk_buff *skb, *skbn, *skbt;
229
* First, grab all of the stats for the incoming packet.
230
* These get messed up if we get called due to a busy condition.
234
while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
236
curidx = bdp - fep->rx_bd_base;
239
* Since we have allocated space to hold a complete frame,
240
* the last indicator should be set.
242
if ((sc & BD_ENET_RX_LAST) == 0)
243
dev_warn(fep->dev, "rcv is not +last\n");
248
if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
249
BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
250
fep->stats.rx_errors++;
251
/* Frame too long or too short. */
252
if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
253
fep->stats.rx_length_errors++;
254
/* Frame alignment */
255
if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
256
fep->stats.rx_frame_errors++;
258
if (sc & BD_ENET_RX_CR)
259
fep->stats.rx_crc_errors++;
261
if (sc & BD_ENET_RX_OV)
262
fep->stats.rx_crc_errors++;
264
skb = fep->rx_skbuff[curidx];
266
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
267
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
274
skb = fep->rx_skbuff[curidx];
276
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
277
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
281
* Process the incoming frame.
283
fep->stats.rx_packets++;
284
pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
285
fep->stats.rx_bytes += pkt_len + 4;
287
if (pkt_len <= fpi->rx_copybreak) {
288
/* +2 to make IP header L1 cache aligned */
289
skbn = dev_alloc_skb(pkt_len + 2);
291
skb_reserve(skbn, 2); /* align IP header */
292
skb_copy_from_linear_data(skb,
293
skbn->data, pkt_len);
300
skbn = dev_alloc_skb(ENET_RX_FRSIZE);
303
skb_align(skbn, ENET_RX_ALIGN);
307
skb_put(skb, pkt_len); /* Make room */
308
skb->protocol = eth_type_trans(skb, dev);
313
"Memory squeeze, dropping packet.\n");
314
fep->stats.rx_dropped++;
319
fep->rx_skbuff[curidx] = skbn;
320
CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
321
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
324
CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
327
* Update BD pointer to next entry.
329
if ((sc & BD_ENET_RX_WRAP) == 0)
332
bdp = fep->rx_bd_base;
334
(*fep->ops->rx_bd_done)(dev);
342
static void fs_enet_tx(struct net_device *dev)
344
struct fs_enet_private *fep = netdev_priv(dev);
347
int dirtyidx, do_wake, do_restart;
350
spin_lock(&fep->tx_lock);
353
do_wake = do_restart = 0;
354
while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
355
dirtyidx = bdp - fep->tx_bd_base;
357
if (fep->tx_free == fep->tx_ring)
360
skb = fep->tx_skbuff[dirtyidx];
365
if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
366
BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
368
if (sc & BD_ENET_TX_HB) /* No heartbeat */
369
fep->stats.tx_heartbeat_errors++;
370
if (sc & BD_ENET_TX_LC) /* Late collision */
371
fep->stats.tx_window_errors++;
372
if (sc & BD_ENET_TX_RL) /* Retrans limit */
373
fep->stats.tx_aborted_errors++;
374
if (sc & BD_ENET_TX_UN) /* Underrun */
375
fep->stats.tx_fifo_errors++;
376
if (sc & BD_ENET_TX_CSL) /* Carrier lost */
377
fep->stats.tx_carrier_errors++;
379
if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
380
fep->stats.tx_errors++;
384
fep->stats.tx_packets++;
386
if (sc & BD_ENET_TX_READY) {
388
"HEY! Enet xmit interrupt and TX_READY.\n");
392
* Deferred means some collisions occurred during transmit,
393
* but we eventually sent the packet OK.
395
if (sc & BD_ENET_TX_DEF)
396
fep->stats.collisions++;
399
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
400
skb->len, DMA_TO_DEVICE);
403
* Free the sk buffer associated with this last transmit.
405
dev_kfree_skb_irq(skb);
406
fep->tx_skbuff[dirtyidx] = NULL;
409
* Update pointer to next buffer descriptor to be transmitted.
411
if ((sc & BD_ENET_TX_WRAP) == 0)
414
bdp = fep->tx_bd_base;
417
* Since we have freed up a buffer, the ring is no longer
427
(*fep->ops->tx_restart)(dev);
429
spin_unlock(&fep->tx_lock);
432
netif_wake_queue(dev);
436
* The interrupt handler.
437
* This is called from the MPC core interrupt.
440
fs_enet_interrupt(int irq, void *dev_id)
442
struct net_device *dev = dev_id;
443
struct fs_enet_private *fep;
444
const struct fs_platform_info *fpi;
450
fep = netdev_priv(dev);
454
while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
457
int_clr_events = int_events;
459
int_clr_events &= ~fep->ev_napi_rx;
461
(*fep->ops->clear_int_events)(dev, int_clr_events);
463
if (int_events & fep->ev_err)
464
(*fep->ops->ev_error)(dev, int_events);
466
if (int_events & fep->ev_rx) {
468
fs_enet_rx_non_napi(dev);
470
napi_ok = napi_schedule_prep(&fep->napi);
472
(*fep->ops->napi_disable_rx)(dev);
473
(*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
475
/* NOTE: it is possible for FCCs in NAPI mode */
476
/* to submit a spurious interrupt while in poll */
478
__napi_schedule(&fep->napi);
482
if (int_events & fep->ev_tx)
487
return IRQ_RETVAL(handled);
490
void fs_init_bds(struct net_device *dev)
492
struct fs_enet_private *fep = netdev_priv(dev);
499
fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
500
fep->tx_free = fep->tx_ring;
501
fep->cur_rx = fep->rx_bd_base;
504
* Initialize the receive buffer descriptors.
506
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
507
skb = dev_alloc_skb(ENET_RX_FRSIZE);
510
"Memory squeeze, unable to allocate skb\n");
513
skb_align(skb, ENET_RX_ALIGN);
514
fep->rx_skbuff[i] = skb;
516
dma_map_single(fep->dev, skb->data,
517
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
519
CBDW_DATLEN(bdp, 0); /* zero */
520
CBDW_SC(bdp, BD_ENET_RX_EMPTY |
521
((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
524
* if we failed, fillup remainder
526
for (; i < fep->rx_ring; i++, bdp++) {
527
fep->rx_skbuff[i] = NULL;
528
CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
532
* ...and the same for transmit.
534
for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
535
fep->tx_skbuff[i] = NULL;
536
CBDW_BUFADDR(bdp, 0);
538
CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
542
void fs_cleanup_bds(struct net_device *dev)
544
struct fs_enet_private *fep = netdev_priv(dev);
550
* Reset SKB transmit buffers.
552
for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
553
if ((skb = fep->tx_skbuff[i]) == NULL)
557
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
558
skb->len, DMA_TO_DEVICE);
560
fep->tx_skbuff[i] = NULL;
565
* Reset SKB receive buffers
567
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
568
if ((skb = fep->rx_skbuff[i]) == NULL)
572
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
573
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
576
fep->rx_skbuff[i] = NULL;
582
/**********************************************************************************/
584
#ifdef CONFIG_FS_ENET_MPC5121_FEC
586
* MPC5121 FEC requeries 4-byte alignment for TX data buffer!
588
static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
591
struct sk_buff *new_skb;
592
struct fs_enet_private *fep = netdev_priv(dev);
595
new_skb = dev_alloc_skb(skb->len + 4);
597
if (net_ratelimit()) {
599
"Memory squeeze, dropping tx packet.\n");
604
/* Make sure new skb is properly aligned */
605
skb_align(new_skb, 4);
607
/* Copy data to new skb ... */
608
skb_copy_from_linear_data(skb, new_skb->data, skb->len);
609
skb_put(new_skb, skb->len);
611
/* ... and free an old one */
612
dev_kfree_skb_any(skb);
618
static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
620
struct fs_enet_private *fep = netdev_priv(dev);
626
#ifdef CONFIG_FS_ENET_MPC5121_FEC
627
if (((unsigned long)skb->data) & 0x3) {
628
skb = tx_skb_align_workaround(dev, skb);
631
* We have lost packet due to memory allocation error
632
* in tx_skb_align_workaround(). Hopefully original
633
* skb is still valid, so try transmit it later.
635
return NETDEV_TX_BUSY;
639
spin_lock_irqsave(&fep->tx_lock, flags);
642
* Fill in a Tx ring entry
646
if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
647
netif_stop_queue(dev);
648
spin_unlock_irqrestore(&fep->tx_lock, flags);
651
* Ooops. All transmit buffers are full. Bail out.
652
* This should not happen, since the tx queue should be stopped.
654
dev_warn(fep->dev, "tx queue full!.\n");
655
return NETDEV_TX_BUSY;
658
curidx = bdp - fep->tx_bd_base;
660
* Clear all of the status flags.
662
CBDC_SC(bdp, BD_ENET_TX_STATS);
667
fep->tx_skbuff[curidx] = skb;
669
fep->stats.tx_bytes += skb->len;
672
* Push the data cache so the CPM does not get stale memory data.
674
CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
675
skb->data, skb->len, DMA_TO_DEVICE));
676
CBDW_DATLEN(bdp, skb->len);
679
* If this was the last BD in the ring, start at the beginning again.
681
if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
684
fep->cur_tx = fep->tx_bd_base;
687
netif_stop_queue(dev);
689
/* Trigger transmission start */
690
sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
691
BD_ENET_TX_LAST | BD_ENET_TX_TC;
693
/* note that while FEC does not have this bit
694
* it marks it as available for software use
695
* yay for hw reuse :) */
697
sc |= BD_ENET_TX_PAD;
700
(*fep->ops->tx_kickstart)(dev);
702
spin_unlock_irqrestore(&fep->tx_lock, flags);
707
static void fs_timeout(struct net_device *dev)
709
struct fs_enet_private *fep = netdev_priv(dev);
713
fep->stats.tx_errors++;
715
spin_lock_irqsave(&fep->lock, flags);
717
if (dev->flags & IFF_UP) {
718
phy_stop(fep->phydev);
719
(*fep->ops->stop)(dev);
720
(*fep->ops->restart)(dev);
721
phy_start(fep->phydev);
724
phy_start(fep->phydev);
725
wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
726
spin_unlock_irqrestore(&fep->lock, flags);
729
netif_wake_queue(dev);
732
/*-----------------------------------------------------------------------------
733
* generic link-change handler - should be sufficient for most cases
734
*-----------------------------------------------------------------------------*/
735
static void generic_adjust_link(struct net_device *dev)
737
struct fs_enet_private *fep = netdev_priv(dev);
738
struct phy_device *phydev = fep->phydev;
742
/* adjust to duplex mode */
743
if (phydev->duplex != fep->oldduplex) {
745
fep->oldduplex = phydev->duplex;
748
if (phydev->speed != fep->oldspeed) {
750
fep->oldspeed = phydev->speed;
759
fep->ops->restart(dev);
760
} else if (fep->oldlink) {
767
if (new_state && netif_msg_link(fep))
768
phy_print_status(phydev);
772
static void fs_adjust_link(struct net_device *dev)
774
struct fs_enet_private *fep = netdev_priv(dev);
777
spin_lock_irqsave(&fep->lock, flags);
779
if(fep->ops->adjust_link)
780
fep->ops->adjust_link(dev);
782
generic_adjust_link(dev);
784
spin_unlock_irqrestore(&fep->lock, flags);
787
static int fs_init_phy(struct net_device *dev)
789
struct fs_enet_private *fep = netdev_priv(dev);
790
struct phy_device *phydev;
796
phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
797
PHY_INTERFACE_MODE_MII);
799
phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
800
PHY_INTERFACE_MODE_MII);
803
dev_err(&dev->dev, "Could not attach to PHY\n");
807
fep->phydev = phydev;
812
static int fs_enet_open(struct net_device *dev)
814
struct fs_enet_private *fep = netdev_priv(dev);
818
/* to initialize the fep->cur_rx,... */
819
/* not doing this, will cause a crash in fs_enet_rx_napi */
820
fs_init_bds(fep->ndev);
822
if (fep->fpi->use_napi)
823
napi_enable(&fep->napi);
825
/* Install our interrupt handler. */
826
r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
829
dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
830
if (fep->fpi->use_napi)
831
napi_disable(&fep->napi);
835
err = fs_init_phy(dev);
837
free_irq(fep->interrupt, dev);
838
if (fep->fpi->use_napi)
839
napi_disable(&fep->napi);
842
phy_start(fep->phydev);
844
netif_start_queue(dev);
849
static int fs_enet_close(struct net_device *dev)
851
struct fs_enet_private *fep = netdev_priv(dev);
854
netif_stop_queue(dev);
855
netif_carrier_off(dev);
856
if (fep->fpi->use_napi)
857
napi_disable(&fep->napi);
858
phy_stop(fep->phydev);
860
spin_lock_irqsave(&fep->lock, flags);
861
spin_lock(&fep->tx_lock);
862
(*fep->ops->stop)(dev);
863
spin_unlock(&fep->tx_lock);
864
spin_unlock_irqrestore(&fep->lock, flags);
866
/* release any irqs */
867
phy_disconnect(fep->phydev);
869
free_irq(fep->interrupt, dev);
874
static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
876
struct fs_enet_private *fep = netdev_priv(dev);
880
/*************************************************************************/
882
static void fs_get_drvinfo(struct net_device *dev,
883
struct ethtool_drvinfo *info)
885
strcpy(info->driver, DRV_MODULE_NAME);
886
strcpy(info->version, DRV_MODULE_VERSION);
889
static int fs_get_regs_len(struct net_device *dev)
891
struct fs_enet_private *fep = netdev_priv(dev);
893
return (*fep->ops->get_regs_len)(dev);
896
static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
899
struct fs_enet_private *fep = netdev_priv(dev);
905
spin_lock_irqsave(&fep->lock, flags);
906
r = (*fep->ops->get_regs)(dev, p, &len);
907
spin_unlock_irqrestore(&fep->lock, flags);
913
static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
915
struct fs_enet_private *fep = netdev_priv(dev);
920
return phy_ethtool_gset(fep->phydev, cmd);
923
static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
925
struct fs_enet_private *fep = netdev_priv(dev);
930
return phy_ethtool_sset(fep->phydev, cmd);
933
static int fs_nway_reset(struct net_device *dev)
938
static u32 fs_get_msglevel(struct net_device *dev)
940
struct fs_enet_private *fep = netdev_priv(dev);
941
return fep->msg_enable;
944
static void fs_set_msglevel(struct net_device *dev, u32 value)
946
struct fs_enet_private *fep = netdev_priv(dev);
947
fep->msg_enable = value;
950
static const struct ethtool_ops fs_ethtool_ops = {
951
.get_drvinfo = fs_get_drvinfo,
952
.get_regs_len = fs_get_regs_len,
953
.get_settings = fs_get_settings,
954
.set_settings = fs_set_settings,
955
.nway_reset = fs_nway_reset,
956
.get_link = ethtool_op_get_link,
957
.get_msglevel = fs_get_msglevel,
958
.set_msglevel = fs_set_msglevel,
959
.get_regs = fs_get_regs,
962
static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
964
struct fs_enet_private *fep = netdev_priv(dev);
966
if (!netif_running(dev))
969
return phy_mii_ioctl(fep->phydev, rq, cmd);
972
extern int fs_mii_connect(struct net_device *dev);
973
extern void fs_mii_disconnect(struct net_device *dev);
975
/**************************************************************************************/
977
#ifdef CONFIG_FS_ENET_HAS_FEC
978
#define IS_FEC(match) ((match)->data == &fs_fec_ops)
980
#define IS_FEC(match) 0
983
static const struct net_device_ops fs_enet_netdev_ops = {
984
.ndo_open = fs_enet_open,
985
.ndo_stop = fs_enet_close,
986
.ndo_get_stats = fs_enet_get_stats,
987
.ndo_start_xmit = fs_enet_start_xmit,
988
.ndo_tx_timeout = fs_timeout,
989
.ndo_set_multicast_list = fs_set_multicast_list,
990
.ndo_do_ioctl = fs_ioctl,
991
.ndo_validate_addr = eth_validate_addr,
992
.ndo_set_mac_address = eth_mac_addr,
993
.ndo_change_mtu = eth_change_mtu,
994
#ifdef CONFIG_NET_POLL_CONTROLLER
995
.ndo_poll_controller = fs_enet_netpoll,
999
static struct of_device_id fs_enet_match[];
1000
static int __devinit fs_enet_probe(struct platform_device *ofdev)
1002
const struct of_device_id *match;
1003
struct net_device *ndev;
1004
struct fs_enet_private *fep;
1005
struct fs_platform_info *fpi;
1008
int privsize, len, ret = -ENODEV;
1010
match = of_match_device(fs_enet_match, &ofdev->dev);
1014
fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
1018
if (!IS_FEC(match)) {
1019
data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
1020
if (!data || len != 4)
1023
fpi->cp_command = *data;
1028
fpi->rx_copybreak = 240;
1030
fpi->napi_weight = 17;
1031
fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
1032
if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
1036
privsize = sizeof(*fep) +
1037
sizeof(struct sk_buff **) *
1038
(fpi->rx_ring + fpi->tx_ring);
1040
ndev = alloc_etherdev(privsize);
1046
SET_NETDEV_DEV(ndev, &ofdev->dev);
1047
dev_set_drvdata(&ofdev->dev, ndev);
1049
fep = netdev_priv(ndev);
1050
fep->dev = &ofdev->dev;
1053
fep->ops = match->data;
1055
ret = fep->ops->setup_data(ndev);
1059
fep->rx_skbuff = (struct sk_buff **)&fep[1];
1060
fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1062
spin_lock_init(&fep->lock);
1063
spin_lock_init(&fep->tx_lock);
1065
mac_addr = of_get_mac_address(ofdev->dev.of_node);
1067
memcpy(ndev->dev_addr, mac_addr, 6);
1069
ret = fep->ops->allocate_bd(ndev);
1071
goto out_cleanup_data;
1073
fep->rx_bd_base = fep->ring_base;
1074
fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1076
fep->tx_ring = fpi->tx_ring;
1077
fep->rx_ring = fpi->rx_ring;
1079
ndev->netdev_ops = &fs_enet_netdev_ops;
1080
ndev->watchdog_timeo = 2 * HZ;
1082
netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
1085
ndev->ethtool_ops = &fs_ethtool_ops;
1087
init_timer(&fep->phy_timer_list);
1089
netif_carrier_off(ndev);
1091
ret = register_netdev(ndev);
1095
pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
1100
fep->ops->free_bd(ndev);
1102
fep->ops->cleanup_data(ndev);
1105
dev_set_drvdata(&ofdev->dev, NULL);
1107
of_node_put(fpi->phy_node);
1113
static int fs_enet_remove(struct platform_device *ofdev)
1115
struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
1116
struct fs_enet_private *fep = netdev_priv(ndev);
1118
unregister_netdev(ndev);
1120
fep->ops->free_bd(ndev);
1121
fep->ops->cleanup_data(ndev);
1122
dev_set_drvdata(fep->dev, NULL);
1123
of_node_put(fep->fpi->phy_node);
1128
static struct of_device_id fs_enet_match[] = {
1129
#ifdef CONFIG_FS_ENET_HAS_SCC
1131
.compatible = "fsl,cpm1-scc-enet",
1132
.data = (void *)&fs_scc_ops,
1135
.compatible = "fsl,cpm2-scc-enet",
1136
.data = (void *)&fs_scc_ops,
1139
#ifdef CONFIG_FS_ENET_HAS_FCC
1141
.compatible = "fsl,cpm2-fcc-enet",
1142
.data = (void *)&fs_fcc_ops,
1145
#ifdef CONFIG_FS_ENET_HAS_FEC
1146
#ifdef CONFIG_FS_ENET_MPC5121_FEC
1148
.compatible = "fsl,mpc5121-fec",
1149
.data = (void *)&fs_fec_ops,
1153
.compatible = "fsl,pq1-fec-enet",
1154
.data = (void *)&fs_fec_ops,
1160
MODULE_DEVICE_TABLE(of, fs_enet_match);
1162
static struct platform_driver fs_enet_driver = {
1164
.owner = THIS_MODULE,
1166
.of_match_table = fs_enet_match,
1168
.probe = fs_enet_probe,
1169
.remove = fs_enet_remove,
1172
static int __init fs_init(void)
1174
return platform_driver_register(&fs_enet_driver);
1177
static void __exit fs_cleanup(void)
1179
platform_driver_unregister(&fs_enet_driver);
1182
#ifdef CONFIG_NET_POLL_CONTROLLER
1183
static void fs_enet_netpoll(struct net_device *dev)
1185
disable_irq(dev->irq);
1186
fs_enet_interrupt(dev->irq, dev);
1187
enable_irq(dev->irq);
1191
/**************************************************************************************/
1193
module_init(fs_init);
1194
module_exit(fs_cleanup);