2
* PXA168 ethernet driver.
3
* Most of the code is derived from mv643xx ethernet driver.
5
* Copyright (C) 2010 Marvell International Ltd.
6
* Sachin Sanap <ssanap@marvell.com>
7
* Zhangfei Gao <zgao6@marvell.com>
8
* Philip Rakity <prakity@marvell.com>
9
* Mark Brown <markb@marvell.com>
11
* This program is free software; you can redistribute it and/or
12
* modify it under the terms of the GNU General Public License
13
* as published by the Free Software Foundation; either version 2
14
* of the License, or (at your option) any later version.
16
* This program is distributed in the hope that it will be useful,
17
* but WITHOUT ANY WARRANTY; without even the implied warranty of
18
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19
* GNU General Public License for more details.
21
* You should have received a copy of the GNU General Public License
22
* along with this program; if not, write to the Free Software
23
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26
#include <linux/init.h>
27
#include <linux/dma-mapping.h>
30
#include <linux/tcp.h>
31
#include <linux/udp.h>
32
#include <linux/etherdevice.h>
33
#include <linux/bitops.h>
34
#include <linux/delay.h>
35
#include <linux/ethtool.h>
36
#include <linux/platform_device.h>
37
#include <linux/module.h>
38
#include <linux/kernel.h>
39
#include <linux/workqueue.h>
40
#include <linux/clk.h>
41
#include <linux/phy.h>
43
#include <linux/types.h>
44
#include <asm/pgtable.h>
45
#include <asm/system.h>
46
#include <asm/cacheflush.h>
47
#include <linux/pxa168_eth.h>
49
#define DRIVER_NAME "pxa168-eth"
50
#define DRIVER_VERSION "0.3"
56
#define PHY_ADDRESS 0x0000
58
#define PORT_CONFIG 0x0400
59
#define PORT_CONFIG_EXT 0x0408
60
#define PORT_COMMAND 0x0410
61
#define PORT_STATUS 0x0418
63
#define SDMA_CONFIG 0x0440
64
#define SDMA_CMD 0x0448
65
#define INT_CAUSE 0x0450
66
#define INT_W_CLEAR 0x0454
67
#define INT_MASK 0x0458
68
#define ETH_F_RX_DESC_0 0x0480
69
#define ETH_C_RX_DESC_0 0x04A0
70
#define ETH_C_TX_DESC_1 0x04E4
73
#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
74
#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
75
#define SMI_OP_W (0 << 26) /* Write operation */
76
#define SMI_OP_R (1 << 26) /* Read operation */
78
#define PHY_WAIT_ITERATIONS 10
80
#define PXA168_ETH_PHY_ADDR_DEFAULT 0
81
/* RX & TX descriptor command */
82
#define BUF_OWNED_BY_DMA (1 << 31)
84
/* RX descriptor status */
85
#define RX_EN_INT (1 << 23)
86
#define RX_FIRST_DESC (1 << 17)
87
#define RX_LAST_DESC (1 << 16)
88
#define RX_ERROR (1 << 15)
90
/* TX descriptor command */
91
#define TX_EN_INT (1 << 23)
92
#define TX_GEN_CRC (1 << 22)
93
#define TX_ZERO_PADDING (1 << 18)
94
#define TX_FIRST_DESC (1 << 17)
95
#define TX_LAST_DESC (1 << 16)
96
#define TX_ERROR (1 << 15)
99
#define SDMA_CMD_AT (1 << 31)
100
#define SDMA_CMD_TXDL (1 << 24)
101
#define SDMA_CMD_TXDH (1 << 23)
102
#define SDMA_CMD_AR (1 << 15)
103
#define SDMA_CMD_ERD (1 << 7)
105
/* Bit definitions of the Port Config Reg */
106
#define PCR_HS (1 << 12)
107
#define PCR_EN (1 << 7)
108
#define PCR_PM (1 << 0)
110
/* Bit definitions of the Port Config Extend Reg */
111
#define PCXR_2BSM (1 << 28)
112
#define PCXR_DSCP_EN (1 << 21)
113
#define PCXR_MFL_1518 (0 << 14)
114
#define PCXR_MFL_1536 (1 << 14)
115
#define PCXR_MFL_2048 (2 << 14)
116
#define PCXR_MFL_64K (3 << 14)
117
#define PCXR_FLP (1 << 11)
118
#define PCXR_PRIO_TX_OFF 3
119
#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
121
/* Bit definitions of the SDMA Config Reg */
122
#define SDCR_BSZ_OFF 12
123
#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
124
#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
125
#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
126
#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
127
#define SDCR_BLMR (1 << 6)
128
#define SDCR_BLMT (1 << 7)
129
#define SDCR_RIFB (1 << 9)
130
#define SDCR_RC_OFF 2
131
#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
134
* Bit definitions of the Interrupt Cause Reg
135
* and Interrupt MASK Reg is the same
137
#define ICR_RXBUF (1 << 0)
138
#define ICR_TXBUF_H (1 << 2)
139
#define ICR_TXBUF_L (1 << 3)
140
#define ICR_TXEND_H (1 << 6)
141
#define ICR_TXEND_L (1 << 7)
142
#define ICR_RXERR (1 << 8)
143
#define ICR_TXERR_H (1 << 10)
144
#define ICR_TXERR_L (1 << 11)
145
#define ICR_TX_UDR (1 << 13)
146
#define ICR_MII_CH (1 << 28)
148
#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
149
ICR_TXERR_H | ICR_TXERR_L |\
150
ICR_TXEND_H | ICR_TXEND_L |\
151
ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
153
#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
155
#define NUM_RX_DESCS 64
156
#define NUM_TX_DESCS 64
159
#define HASH_DELETE 1
160
#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
161
#define HOP_NUMBER 12
163
/* Bit definitions for Port status */
164
#define PORT_SPEED_100 (1 << 0)
165
#define FULL_DUPLEX (1 << 1)
166
#define FLOW_CONTROL_ENABLED (1 << 2)
167
#define LINK_UP (1 << 3)
169
/* Bit definitions for work to be done */
170
#define WORK_LINK (1 << 0)
171
#define WORK_TX_DONE (1 << 1)
176
#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
179
u32 cmd_sts; /* Descriptor command status */
180
u16 byte_cnt; /* Descriptor buffer byte count */
181
u16 buf_size; /* Buffer size */
182
u32 buf_ptr; /* Descriptor buffer pointer */
183
u32 next_desc_ptr; /* Next descriptor pointer */
187
u32 cmd_sts; /* Command/status field */
189
u16 byte_cnt; /* buffer byte count */
190
u32 buf_ptr; /* pointer to buffer for this descriptor */
191
u32 next_desc_ptr; /* Pointer to next descriptor */
194
struct pxa168_eth_private {
195
int port_num; /* User Ethernet port number */
197
int rx_resource_err; /* Rx ring resource error flag */
199
/* Next available and first returning Rx resource */
200
int rx_curr_desc_q, rx_used_desc_q;
202
/* Next available and first returning Tx resource */
203
int tx_curr_desc_q, tx_used_desc_q;
205
struct rx_desc *p_rx_desc_area;
206
dma_addr_t rx_desc_dma;
207
int rx_desc_area_size;
208
struct sk_buff **rx_skb;
210
struct tx_desc *p_tx_desc_area;
211
dma_addr_t tx_desc_dma;
212
int tx_desc_area_size;
213
struct sk_buff **tx_skb;
215
struct work_struct tx_timeout_task;
217
struct net_device *dev;
218
struct napi_struct napi;
222
struct net_device_stats stats;
223
/* Size of Tx Ring per queue */
225
/* Number of tx descriptors in use */
227
/* Size of Rx Ring per queue */
229
/* Number of rx descriptors in use */
233
* Used in case RX Ring is empty, which can occur when
234
* system does not have resources (skb's)
236
struct timer_list timeout;
237
struct mii_bus *smi_bus;
238
struct phy_device *phy;
242
struct pxa168_eth_platform_data *pd;
244
* Ethernet controller base address.
248
/* Pointer to the hardware address filter table */
253
struct addr_table_entry {
258
/* Bit fields of a Hash Table Entry */
259
enum hash_table_entry {
260
HASH_ENTRY_VALID = 1,
262
HASH_ENTRY_RECEIVE_DISCARD = 4,
263
HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
266
static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
267
static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
268
static int pxa168_init_hw(struct pxa168_eth_private *pep);
269
static void eth_port_reset(struct net_device *dev);
270
static void eth_port_start(struct net_device *dev);
271
static int pxa168_eth_open(struct net_device *dev);
272
static int pxa168_eth_stop(struct net_device *dev);
273
static int ethernet_phy_setup(struct net_device *dev);
275
static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
277
return readl(pep->base + offset);
280
static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
282
writel(data, pep->base + offset);
285
static void abort_dma(struct pxa168_eth_private *pep)
288
int max_retries = 40;
291
wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
295
while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
299
} while (max_retries-- > 0 && delay <= 0);
301
if (max_retries <= 0)
302
printk(KERN_ERR "%s : DMA Stuck\n", __func__);
305
static int ethernet_phy_get(struct pxa168_eth_private *pep)
307
unsigned int reg_data;
309
reg_data = rdl(pep, PHY_ADDRESS);
311
return (reg_data >> (5 * pep->port_num)) & 0x1f;
314
static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
317
int addr_shift = 5 * pep->port_num;
319
reg_data = rdl(pep, PHY_ADDRESS);
320
reg_data &= ~(0x1f << addr_shift);
321
reg_data |= (phy_addr & 0x1f) << addr_shift;
322
wrl(pep, PHY_ADDRESS, reg_data);
325
static void ethernet_phy_reset(struct pxa168_eth_private *pep)
329
data = phy_read(pep->phy, MII_BMCR);
334
if (phy_write(pep->phy, MII_BMCR, data) < 0)
338
data = phy_read(pep->phy, MII_BMCR);
339
} while (data >= 0 && data & BMCR_RESET);
342
static void rxq_refill(struct net_device *dev)
344
struct pxa168_eth_private *pep = netdev_priv(dev);
346
struct rx_desc *p_used_rx_desc;
349
while (pep->rx_desc_count < pep->rx_ring_size) {
352
skb = dev_alloc_skb(pep->skb_size);
356
skb_reserve(skb, SKB_DMA_REALIGN);
357
pep->rx_desc_count++;
358
/* Get 'used' Rx descriptor */
359
used_rx_desc = pep->rx_used_desc_q;
360
p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
361
size = skb->end - skb->data;
362
p_used_rx_desc->buf_ptr = dma_map_single(NULL,
366
p_used_rx_desc->buf_size = size;
367
pep->rx_skb[used_rx_desc] = skb;
369
/* Return the descriptor to DMA ownership */
371
p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
374
/* Move the used descriptor pointer to the next descriptor */
375
pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
377
/* Any Rx return cancels the Rx resource error status */
378
pep->rx_resource_err = 0;
380
skb_reserve(skb, ETH_HW_IP_ALIGN);
384
* If RX ring is empty of SKB, set a timer to try allocating
385
* again at a later time.
387
if (pep->rx_desc_count == 0) {
388
pep->timeout.expires = jiffies + (HZ / 10);
389
add_timer(&pep->timeout);
393
static inline void rxq_refill_timer_wrapper(unsigned long data)
395
struct pxa168_eth_private *pep = (void *)data;
396
napi_schedule(&pep->napi);
399
static inline u8 flip_8_bits(u8 x)
401
return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
402
| (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
403
| (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
404
| (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
407
static void nibble_swap_every_byte(unsigned char *mac_addr)
410
for (i = 0; i < ETH_ALEN; i++) {
411
mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
412
((mac_addr[i] & 0xf0) >> 4);
416
static void inverse_every_nibble(unsigned char *mac_addr)
419
for (i = 0; i < ETH_ALEN; i++)
420
mac_addr[i] = flip_8_bits(mac_addr[i]);
424
* ----------------------------------------------------------------------------
425
* This function will calculate the hash function of the address.
427
* mac_addr_orig - MAC address.
429
* return the calculated entry.
431
static u32 hash_function(unsigned char *mac_addr_orig)
438
unsigned char mac_addr[ETH_ALEN];
440
/* Make a copy of MAC address since we are going to performe bit
443
memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
445
nibble_swap_every_byte(mac_addr);
446
inverse_every_nibble(mac_addr);
448
addr0 = (mac_addr[5] >> 2) & 0x3f;
449
addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
450
addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
451
addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
453
hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
454
hash_result = hash_result & 0x07ff;
459
* ----------------------------------------------------------------------------
460
* This function will add/del an entry to the address table.
463
* mac_addr - MAC address.
464
* skip - if 1, skip this address.Used in case of deleting an entry which is a
465
* part of chain in the hash table.We cant just delete the entry since
466
* that will break the chain.We need to defragment the tables time to
468
* rd - 0 Discard packet upon match.
469
* - 1 Receive packet upon match.
471
* address table entry is added/deleted.
473
* -ENOSPC if table full
475
static int add_del_hash_entry(struct pxa168_eth_private *pep,
476
unsigned char *mac_addr,
477
u32 rd, u32 skip, int del)
479
struct addr_table_entry *entry, *start;
484
new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
485
| (((mac_addr[1] >> 0) & 0xf) << 11)
486
| (((mac_addr[0] >> 4) & 0xf) << 7)
487
| (((mac_addr[0] >> 0) & 0xf) << 3)
488
| (((mac_addr[3] >> 4) & 0x1) << 31)
489
| (((mac_addr[3] >> 0) & 0xf) << 27)
490
| (((mac_addr[2] >> 4) & 0xf) << 23)
491
| (((mac_addr[2] >> 0) & 0xf) << 19)
492
| (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
495
new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
496
| (((mac_addr[5] >> 0) & 0xf) << 11)
497
| (((mac_addr[4] >> 4) & 0xf) << 7)
498
| (((mac_addr[4] >> 0) & 0xf) << 3)
499
| (((mac_addr[3] >> 5) & 0x7) << 0);
502
* Pick the appropriate table, start scanning for free/reusable
503
* entries at the index obtained by hashing the specified MAC address
505
start = (struct addr_table_entry *)(pep->htpr);
506
entry = start + hash_function(mac_addr);
507
for (i = 0; i < HOP_NUMBER; i++) {
508
if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
511
/* if same address put in same position */
512
if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
513
(new_low & 0xfffffff8)) &&
514
(le32_to_cpu(entry->hi) == new_high)) {
518
if (entry == start + 0x7ff)
524
if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
525
(le32_to_cpu(entry->hi) != new_high) && del)
528
if (i == HOP_NUMBER) {
530
printk(KERN_INFO "%s: table section is full, need to "
531
"move to 16kB implementation?\n",
539
* Update the selected entry
545
entry->hi = cpu_to_le32(new_high);
546
entry->lo = cpu_to_le32(new_low);
553
* ----------------------------------------------------------------------------
554
* Create an addressTable entry from MAC address info
555
* found in the specifed net_device struct
557
* Input : pointer to ethernet interface network device structure
560
static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
561
unsigned char *oaddr,
564
/* Delete old entry */
566
add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
568
add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
571
static int init_hash_table(struct pxa168_eth_private *pep)
574
* Hardware expects CPU to build a hash table based on a predefined
575
* hash function and populate it based on hardware address. The
576
* location of the hash table is identified by 32-bit pointer stored
577
* in HTPR internal register. Two possible sizes exists for the hash
578
* table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
579
* (16kB of DRAM required (4 x 4 kB banks)).We currently only support
582
/* TODO: Add support for 8kB hash table and alternative hash
583
* function.Driver can dynamically switch to them if the 1/2kB hash
586
if (pep->htpr == NULL) {
587
pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
588
HASH_ADDR_TABLE_SIZE,
589
&pep->htpr_dma, GFP_KERNEL);
590
if (pep->htpr == NULL)
593
memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
594
wrl(pep, HTPR, pep->htpr_dma);
598
static void pxa168_eth_set_rx_mode(struct net_device *dev)
600
struct pxa168_eth_private *pep = netdev_priv(dev);
601
struct netdev_hw_addr *ha;
604
val = rdl(pep, PORT_CONFIG);
605
if (dev->flags & IFF_PROMISC)
609
wrl(pep, PORT_CONFIG, val);
612
* Remove the old list of MAC address and add dev->addr
613
* and multicast address.
615
memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
616
update_hash_table_mac_address(pep, NULL, dev->dev_addr);
618
netdev_for_each_mc_addr(ha, dev)
619
update_hash_table_mac_address(pep, NULL, ha->addr);
622
static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
624
struct sockaddr *sa = addr;
625
struct pxa168_eth_private *pep = netdev_priv(dev);
626
unsigned char oldMac[ETH_ALEN];
628
if (!is_valid_ether_addr(sa->sa_data))
630
memcpy(oldMac, dev->dev_addr, ETH_ALEN);
631
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
632
netif_addr_lock_bh(dev);
633
update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
634
netif_addr_unlock_bh(dev);
638
static void eth_port_start(struct net_device *dev)
640
unsigned int val = 0;
641
struct pxa168_eth_private *pep = netdev_priv(dev);
642
int tx_curr_desc, rx_curr_desc;
644
/* Perform PHY reset, if there is a PHY. */
645
if (pep->phy != NULL) {
646
struct ethtool_cmd cmd;
648
pxa168_get_settings(pep->dev, &cmd);
649
ethernet_phy_reset(pep);
650
pxa168_set_settings(pep->dev, &cmd);
653
/* Assignment of Tx CTRP of given queue */
654
tx_curr_desc = pep->tx_curr_desc_q;
655
wrl(pep, ETH_C_TX_DESC_1,
656
(u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
658
/* Assignment of Rx CRDP of given queue */
659
rx_curr_desc = pep->rx_curr_desc_q;
660
wrl(pep, ETH_C_RX_DESC_0,
661
(u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
663
wrl(pep, ETH_F_RX_DESC_0,
664
(u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
666
/* Clear all interrupts */
667
wrl(pep, INT_CAUSE, 0);
669
/* Enable all interrupts for receive, transmit and error. */
670
wrl(pep, INT_MASK, ALL_INTS);
672
val = rdl(pep, PORT_CONFIG);
674
wrl(pep, PORT_CONFIG, val);
676
/* Start RX DMA engine */
677
val = rdl(pep, SDMA_CMD);
679
wrl(pep, SDMA_CMD, val);
682
static void eth_port_reset(struct net_device *dev)
684
struct pxa168_eth_private *pep = netdev_priv(dev);
685
unsigned int val = 0;
687
/* Stop all interrupts for receive, transmit and error. */
688
wrl(pep, INT_MASK, 0);
690
/* Clear all interrupts */
691
wrl(pep, INT_CAUSE, 0);
694
val = rdl(pep, SDMA_CMD);
695
val &= ~SDMA_CMD_ERD; /* abort dma command */
697
/* Abort any transmit and receive operations and put DMA
703
val = rdl(pep, PORT_CONFIG);
705
wrl(pep, PORT_CONFIG, val);
709
* txq_reclaim - Free the tx desc data for completed descriptors
710
* If force is non-zero, frees uncompleted descriptors as well
712
static int txq_reclaim(struct net_device *dev, int force)
714
struct pxa168_eth_private *pep = netdev_priv(dev);
715
struct tx_desc *desc;
725
pep->work_todo &= ~WORK_TX_DONE;
726
while (pep->tx_desc_count > 0) {
727
tx_index = pep->tx_used_desc_q;
728
desc = &pep->p_tx_desc_area[tx_index];
729
cmd_sts = desc->cmd_sts;
730
if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
732
goto txq_reclaim_end;
735
goto txq_reclaim_end;
738
pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
739
pep->tx_desc_count--;
740
addr = desc->buf_ptr;
741
count = desc->byte_cnt;
742
skb = pep->tx_skb[tx_index];
744
pep->tx_skb[tx_index] = NULL;
746
if (cmd_sts & TX_ERROR) {
748
printk(KERN_ERR "%s: Error in TX\n", dev->name);
749
dev->stats.tx_errors++;
751
dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
753
dev_kfree_skb_irq(skb);
757
netif_tx_unlock(dev);
761
static void pxa168_eth_tx_timeout(struct net_device *dev)
763
struct pxa168_eth_private *pep = netdev_priv(dev);
765
printk(KERN_INFO "%s: TX timeout desc_count %d\n",
766
dev->name, pep->tx_desc_count);
768
schedule_work(&pep->tx_timeout_task);
771
static void pxa168_eth_tx_timeout_task(struct work_struct *work)
773
struct pxa168_eth_private *pep = container_of(work,
774
struct pxa168_eth_private,
776
struct net_device *dev = pep->dev;
777
pxa168_eth_stop(dev);
778
pxa168_eth_open(dev);
781
static int rxq_process(struct net_device *dev, int budget)
783
struct pxa168_eth_private *pep = netdev_priv(dev);
784
struct net_device_stats *stats = &dev->stats;
785
unsigned int received_packets = 0;
788
while (budget-- > 0) {
789
int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
790
struct rx_desc *rx_desc;
791
unsigned int cmd_sts;
793
/* Do not process Rx ring in case of Rx ring resource error */
794
if (pep->rx_resource_err)
796
rx_curr_desc = pep->rx_curr_desc_q;
797
rx_used_desc = pep->rx_used_desc_q;
798
rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
799
cmd_sts = rx_desc->cmd_sts;
801
if (cmd_sts & (BUF_OWNED_BY_DMA))
803
skb = pep->rx_skb[rx_curr_desc];
804
pep->rx_skb[rx_curr_desc] = NULL;
806
rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
807
pep->rx_curr_desc_q = rx_next_curr_desc;
809
/* Rx descriptors exhausted. */
810
/* Set the Rx ring resource error flag */
811
if (rx_next_curr_desc == rx_used_desc)
812
pep->rx_resource_err = 1;
813
pep->rx_desc_count--;
814
dma_unmap_single(NULL, rx_desc->buf_ptr,
820
* Note byte count includes 4 byte CRC count
823
stats->rx_bytes += rx_desc->byte_cnt;
825
* In case received a packet without first / last bits on OR
826
* the error summary bit is on, the packets needs to be droped.
828
if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
829
(RX_FIRST_DESC | RX_LAST_DESC))
830
|| (cmd_sts & RX_ERROR)) {
833
if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
834
(RX_FIRST_DESC | RX_LAST_DESC)) {
837
"%s: Rx pkt on multiple desc\n",
840
if (cmd_sts & RX_ERROR)
842
dev_kfree_skb_irq(skb);
845
* The -4 is for the CRC in the trailer of the
848
skb_put(skb, rx_desc->byte_cnt - 4);
849
skb->protocol = eth_type_trans(skb, dev);
850
netif_receive_skb(skb);
853
/* Fill RX ring with skb's */
855
return received_packets;
858
static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
859
struct net_device *dev)
864
icr = rdl(pep, INT_CAUSE);
868
wrl(pep, INT_CAUSE, ~icr);
869
if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
870
pep->work_todo |= WORK_TX_DONE;
875
if (icr & ICR_MII_CH) {
876
pep->work_todo |= WORK_LINK;
882
static void handle_link_event(struct pxa168_eth_private *pep)
884
struct net_device *dev = pep->dev;
890
port_status = rdl(pep, PORT_STATUS);
891
if (!(port_status & LINK_UP)) {
892
if (netif_carrier_ok(dev)) {
893
printk(KERN_INFO "%s: link down\n", dev->name);
894
netif_carrier_off(dev);
899
if (port_status & PORT_SPEED_100)
904
duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
905
fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
906
printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
907
"flow control %sabled\n", dev->name,
908
speed, duplex ? "full" : "half", fc ? "en" : "dis");
909
if (!netif_carrier_ok(dev))
910
netif_carrier_on(dev);
913
static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
915
struct net_device *dev = (struct net_device *)dev_id;
916
struct pxa168_eth_private *pep = netdev_priv(dev);
918
if (unlikely(!pxa168_eth_collect_events(pep, dev)))
920
/* Disable interrupts */
921
wrl(pep, INT_MASK, 0);
922
napi_schedule(&pep->napi);
926
static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
931
* Reserve 2+14 bytes for an ethernet header (the hardware
932
* automatically prepends 2 bytes of dummy data to each
933
* received packet), 16 bytes for up to four VLAN tags, and
934
* 4 bytes for the trailing FCS -- 36 bytes total.
936
skb_size = pep->dev->mtu + 36;
939
* Make sure that the skb size is a multiple of 8 bytes, as
940
* the lower three bits of the receive descriptor's buffer
941
* size field are ignored by the hardware.
943
pep->skb_size = (skb_size + 7) & ~7;
946
* If NET_SKB_PAD is smaller than a cache line,
947
* netdev_alloc_skb() will cause skb->data to be misaligned
948
* to a cache line boundary. If this is the case, include
949
* some extra space to allow re-aligning the data area.
951
pep->skb_size += SKB_DMA_REALIGN;
955
static int set_port_config_ext(struct pxa168_eth_private *pep)
959
pxa168_eth_recalc_skb_size(pep);
960
if (pep->skb_size <= 1518)
961
skb_size = PCXR_MFL_1518;
962
else if (pep->skb_size <= 1536)
963
skb_size = PCXR_MFL_1536;
964
else if (pep->skb_size <= 2048)
965
skb_size = PCXR_MFL_2048;
967
skb_size = PCXR_MFL_64K;
969
/* Extended Port Configuration */
971
PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
972
PCXR_DSCP_EN | /* Enable DSCP in IP */
973
skb_size | PCXR_FLP | /* do not force link pass */
974
PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
979
static int pxa168_init_hw(struct pxa168_eth_private *pep)
983
/* Disable interrupts */
984
wrl(pep, INT_MASK, 0);
985
wrl(pep, INT_CAUSE, 0);
986
/* Write to ICR to clear interrupts. */
987
wrl(pep, INT_W_CLEAR, 0);
988
/* Abort any transmit and receive operations and put DMA
992
/* Initialize address hash table */
993
err = init_hash_table(pep);
996
/* SDMA configuration */
997
wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
998
SDCR_RIFB | /* Rx interrupt on frame */
999
SDCR_BLMT | /* Little endian transmit */
1000
SDCR_BLMR | /* Little endian receive */
1001
SDCR_RC_MAX_RETRANS); /* Max retransmit count */
1002
/* Port Configuration */
1003
wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
1004
set_port_config_ext(pep);
1009
static int rxq_init(struct net_device *dev)
1011
struct pxa168_eth_private *pep = netdev_priv(dev);
1012
struct rx_desc *p_rx_desc;
1013
int size = 0, i = 0;
1014
int rx_desc_num = pep->rx_ring_size;
1016
/* Allocate RX skb rings */
1017
pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
1020
printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
1023
/* Allocate RX ring */
1024
pep->rx_desc_count = 0;
1025
size = pep->rx_ring_size * sizeof(struct rx_desc);
1026
pep->rx_desc_area_size = size;
1027
pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1028
&pep->rx_desc_dma, GFP_KERNEL);
1029
if (!pep->p_rx_desc_area) {
1030
printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
1034
memset((void *)pep->p_rx_desc_area, 0, size);
1035
/* initialize the next_desc_ptr links in the Rx descriptors ring */
1036
p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
1037
for (i = 0; i < rx_desc_num; i++) {
1038
p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
1039
((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
1041
/* Save Rx desc pointer to driver struct. */
1042
pep->rx_curr_desc_q = 0;
1043
pep->rx_used_desc_q = 0;
1044
pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
1051
static void rxq_deinit(struct net_device *dev)
1053
struct pxa168_eth_private *pep = netdev_priv(dev);
1056
/* Free preallocated skb's on RX rings */
1057
for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
1058
if (pep->rx_skb[curr]) {
1059
dev_kfree_skb(pep->rx_skb[curr]);
1060
pep->rx_desc_count--;
1063
if (pep->rx_desc_count)
1065
"Error in freeing Rx Ring. %d skb's still\n",
1066
pep->rx_desc_count);
1068
if (pep->p_rx_desc_area)
1069
dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
1070
pep->p_rx_desc_area, pep->rx_desc_dma);
1074
static int txq_init(struct net_device *dev)
1076
struct pxa168_eth_private *pep = netdev_priv(dev);
1077
struct tx_desc *p_tx_desc;
1078
int size = 0, i = 0;
1079
int tx_desc_num = pep->tx_ring_size;
1081
pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
1084
printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
1087
/* Allocate TX ring */
1088
pep->tx_desc_count = 0;
1089
size = pep->tx_ring_size * sizeof(struct tx_desc);
1090
pep->tx_desc_area_size = size;
1091
pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1092
&pep->tx_desc_dma, GFP_KERNEL);
1093
if (!pep->p_tx_desc_area) {
1094
printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
1098
memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
1099
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
1100
p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
1101
for (i = 0; i < tx_desc_num; i++) {
1102
p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
1103
((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
1105
pep->tx_curr_desc_q = 0;
1106
pep->tx_used_desc_q = 0;
1107
pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
1114
static void txq_deinit(struct net_device *dev)
1116
struct pxa168_eth_private *pep = netdev_priv(dev);
1118
/* Free outstanding skb's on TX ring */
1119
txq_reclaim(dev, 1);
1120
BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
1122
if (pep->p_tx_desc_area)
1123
dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
1124
pep->p_tx_desc_area, pep->tx_desc_dma);
1128
static int pxa168_eth_open(struct net_device *dev)
1130
struct pxa168_eth_private *pep = netdev_priv(dev);
1133
err = request_irq(dev->irq, pxa168_eth_int_handler,
1134
IRQF_DISABLED, dev->name, dev);
1136
dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
1139
pep->rx_resource_err = 0;
1140
err = rxq_init(dev);
1143
err = txq_init(dev);
1145
goto out_free_rx_skb;
1146
pep->rx_used_desc_q = 0;
1147
pep->rx_curr_desc_q = 0;
1149
/* Fill RX ring with skb's */
1151
pep->rx_used_desc_q = 0;
1152
pep->rx_curr_desc_q = 0;
1153
netif_carrier_off(dev);
1154
eth_port_start(dev);
1155
napi_enable(&pep->napi);
1160
free_irq(dev->irq, dev);
1164
static int pxa168_eth_stop(struct net_device *dev)
1166
struct pxa168_eth_private *pep = netdev_priv(dev);
1167
eth_port_reset(dev);
1169
/* Disable interrupts */
1170
wrl(pep, INT_MASK, 0);
1171
wrl(pep, INT_CAUSE, 0);
1172
/* Write to ICR to clear interrupts. */
1173
wrl(pep, INT_W_CLEAR, 0);
1174
napi_disable(&pep->napi);
1175
del_timer_sync(&pep->timeout);
1176
netif_carrier_off(dev);
1177
free_irq(dev->irq, dev);
1184
static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
1187
struct pxa168_eth_private *pep = netdev_priv(dev);
1189
if ((mtu > 9500) || (mtu < 68))
1193
retval = set_port_config_ext(pep);
1195
if (!netif_running(dev))
1199
* Stop and then re-open the interface. This will allocate RX
1200
* skbs of the new MTU.
1201
* There is a possible danger that the open will not succeed,
1202
* due to memory being full.
1204
pxa168_eth_stop(dev);
1205
if (pxa168_eth_open(dev)) {
1206
dev_printk(KERN_ERR, &dev->dev,
1207
"fatal error on re-opening device after "
1214
static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
1218
tx_desc_curr = pep->tx_curr_desc_q;
1219
pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
1220
BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
1221
pep->tx_desc_count++;
1223
return tx_desc_curr;
1226
static int pxa168_rx_poll(struct napi_struct *napi, int budget)
1228
struct pxa168_eth_private *pep =
1229
container_of(napi, struct pxa168_eth_private, napi);
1230
struct net_device *dev = pep->dev;
1233
if (unlikely(pep->work_todo & WORK_LINK)) {
1234
pep->work_todo &= ~(WORK_LINK);
1235
handle_link_event(pep);
1238
* We call txq_reclaim every time since in NAPI interupts are disabled
1239
* and due to this we miss the TX_DONE interrupt,which is not updated in
1240
* interrupt status register.
1242
txq_reclaim(dev, 0);
1243
if (netif_queue_stopped(dev)
1244
&& pep->tx_ring_size - pep->tx_desc_count > 1) {
1245
netif_wake_queue(dev);
1247
work_done = rxq_process(dev, budget);
1248
if (work_done < budget) {
1249
napi_complete(napi);
1250
wrl(pep, INT_MASK, ALL_INTS);
1256
static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1258
struct pxa168_eth_private *pep = netdev_priv(dev);
1259
struct net_device_stats *stats = &dev->stats;
1260
struct tx_desc *desc;
1264
tx_index = eth_alloc_tx_desc_index(pep);
1265
desc = &pep->p_tx_desc_area[tx_index];
1267
pep->tx_skb[tx_index] = skb;
1268
desc->byte_cnt = length;
1269
desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
1271
desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
1272
TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
1274
wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1276
stats->tx_bytes += skb->len;
1277
stats->tx_packets++;
1278
dev->trans_start = jiffies;
1279
if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
1280
/* We handled the current skb, but now we are out of space.*/
1281
netif_stop_queue(dev);
1284
return NETDEV_TX_OK;
1287
static int smi_wait_ready(struct pxa168_eth_private *pep)
1291
/* wait for the SMI register to become available */
1292
for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
1293
if (i == PHY_WAIT_ITERATIONS)
1301
static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
1303
struct pxa168_eth_private *pep = bus->priv;
1307
if (smi_wait_ready(pep)) {
1308
printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
1311
wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
1312
/* now wait for the data to be valid */
1313
for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
1314
if (i == PHY_WAIT_ITERATIONS) {
1316
"pxa168_eth: SMI bus read not valid\n");
1322
return val & 0xffff;
1325
static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
1328
struct pxa168_eth_private *pep = bus->priv;
1330
if (smi_wait_ready(pep)) {
1331
printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
1335
wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
1336
SMI_OP_W | (value & 0xffff));
1338
if (smi_wait_ready(pep)) {
1339
printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
1346
static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
1349
struct pxa168_eth_private *pep = netdev_priv(dev);
1350
if (pep->phy != NULL)
1351
return phy_mii_ioctl(pep->phy, ifr, cmd);
1356
static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
1358
struct mii_bus *bus = pep->smi_bus;
1359
struct phy_device *phydev;
1364
if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
1365
/* Scan entire range */
1366
start = ethernet_phy_get(pep);
1369
/* Use phy addr specific to platform */
1370
start = phy_addr & 0x1f;
1374
for (i = 0; i < num; i++) {
1375
int addr = (start + i) & 0x1f;
1376
if (bus->phy_map[addr] == NULL)
1377
mdiobus_scan(bus, addr);
1379
if (phydev == NULL) {
1380
phydev = bus->phy_map[addr];
1382
ethernet_phy_set_addr(pep, addr);
1389
static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
1391
struct phy_device *phy = pep->phy;
1392
ethernet_phy_reset(pep);
1394
phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
1397
phy->autoneg = AUTONEG_ENABLE;
1400
phy->supported &= PHY_BASIC_FEATURES;
1401
phy->advertising = phy->supported | ADVERTISED_Autoneg;
1403
phy->autoneg = AUTONEG_DISABLE;
1404
phy->advertising = 0;
1406
phy->duplex = duplex;
1408
phy_start_aneg(phy);
1411
static int ethernet_phy_setup(struct net_device *dev)
1413
struct pxa168_eth_private *pep = netdev_priv(dev);
1417
pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
1418
if (pep->phy != NULL)
1419
phy_init(pep, pep->pd->speed, pep->pd->duplex);
1420
update_hash_table_mac_address(pep, NULL, dev->dev_addr);
1425
static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1427
struct pxa168_eth_private *pep = netdev_priv(dev);
1430
err = phy_read_status(pep->phy);
1432
err = phy_ethtool_gset(pep->phy, cmd);
1437
static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1439
struct pxa168_eth_private *pep = netdev_priv(dev);
1441
return phy_ethtool_sset(pep->phy, cmd);
1444
static void pxa168_get_drvinfo(struct net_device *dev,
1445
struct ethtool_drvinfo *info)
1447
strncpy(info->driver, DRIVER_NAME, 32);
1448
strncpy(info->version, DRIVER_VERSION, 32);
1449
strncpy(info->fw_version, "N/A", 32);
1450
strncpy(info->bus_info, "N/A", 32);
1453
static const struct ethtool_ops pxa168_ethtool_ops = {
1454
.get_settings = pxa168_get_settings,
1455
.set_settings = pxa168_set_settings,
1456
.get_drvinfo = pxa168_get_drvinfo,
1457
.get_link = ethtool_op_get_link,
1460
static const struct net_device_ops pxa168_eth_netdev_ops = {
1461
.ndo_open = pxa168_eth_open,
1462
.ndo_stop = pxa168_eth_stop,
1463
.ndo_start_xmit = pxa168_eth_start_xmit,
1464
.ndo_set_rx_mode = pxa168_eth_set_rx_mode,
1465
.ndo_set_mac_address = pxa168_eth_set_mac_address,
1466
.ndo_validate_addr = eth_validate_addr,
1467
.ndo_do_ioctl = pxa168_eth_do_ioctl,
1468
.ndo_change_mtu = pxa168_eth_change_mtu,
1469
.ndo_tx_timeout = pxa168_eth_tx_timeout,
1472
static int pxa168_eth_probe(struct platform_device *pdev)
1474
struct pxa168_eth_private *pep = NULL;
1475
struct net_device *dev = NULL;
1476
struct resource *res;
1480
printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
1482
clk = clk_get(&pdev->dev, "MFUCLK");
1484
printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
1490
dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
1496
platform_set_drvdata(pdev, dev);
1497
pep = netdev_priv(dev);
1500
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1505
pep->base = ioremap(res->start, res->end - res->start + 1);
1506
if (pep->base == NULL) {
1510
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1512
dev->irq = res->start;
1513
dev->netdev_ops = &pxa168_eth_netdev_ops;
1514
dev->watchdog_timeo = 2 * HZ;
1516
SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
1518
INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
1520
printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
1521
random_ether_addr(dev->dev_addr);
1523
pep->pd = pdev->dev.platform_data;
1524
pep->rx_ring_size = NUM_RX_DESCS;
1525
if (pep->pd->rx_queue_size)
1526
pep->rx_ring_size = pep->pd->rx_queue_size;
1528
pep->tx_ring_size = NUM_TX_DESCS;
1529
if (pep->pd->tx_queue_size)
1530
pep->tx_ring_size = pep->pd->tx_queue_size;
1532
pep->port_num = pep->pd->port_number;
1533
/* Hardware supports only 3 ports */
1534
BUG_ON(pep->port_num > 2);
1535
netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
1537
memset(&pep->timeout, 0, sizeof(struct timer_list));
1538
init_timer(&pep->timeout);
1539
pep->timeout.function = rxq_refill_timer_wrapper;
1540
pep->timeout.data = (unsigned long)pep;
1542
pep->smi_bus = mdiobus_alloc();
1543
if (pep->smi_bus == NULL) {
1547
pep->smi_bus->priv = pep;
1548
pep->smi_bus->name = "pxa168_eth smi";
1549
pep->smi_bus->read = pxa168_smi_read;
1550
pep->smi_bus->write = pxa168_smi_write;
1551
snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
1552
pep->smi_bus->parent = &pdev->dev;
1553
pep->smi_bus->phy_mask = 0xffffffff;
1554
err = mdiobus_register(pep->smi_bus);
1558
pxa168_init_hw(pep);
1559
err = ethernet_phy_setup(dev);
1562
SET_NETDEV_DEV(dev, &pdev->dev);
1563
err = register_netdev(dev);
1569
mdiobus_unregister(pep->smi_bus);
1571
mdiobus_free(pep->smi_bus);
1582
static int pxa168_eth_remove(struct platform_device *pdev)
1584
struct net_device *dev = platform_get_drvdata(pdev);
1585
struct pxa168_eth_private *pep = netdev_priv(dev);
1588
dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
1589
pep->htpr, pep->htpr_dma);
1593
clk_disable(pep->clk);
1597
if (pep->phy != NULL)
1598
phy_detach(pep->phy);
1602
mdiobus_unregister(pep->smi_bus);
1603
mdiobus_free(pep->smi_bus);
1604
unregister_netdev(dev);
1605
cancel_work_sync(&pep->tx_timeout_task);
1607
platform_set_drvdata(pdev, NULL);
1611
static void pxa168_eth_shutdown(struct platform_device *pdev)
1613
struct net_device *dev = platform_get_drvdata(pdev);
1614
eth_port_reset(dev);
1618
static int pxa168_eth_resume(struct platform_device *pdev)
1623
static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
1629
#define pxa168_eth_resume NULL
1630
#define pxa168_eth_suspend NULL
1633
static struct platform_driver pxa168_eth_driver = {
1634
.probe = pxa168_eth_probe,
1635
.remove = pxa168_eth_remove,
1636
.shutdown = pxa168_eth_shutdown,
1637
.resume = pxa168_eth_resume,
1638
.suspend = pxa168_eth_suspend,
1640
.name = DRIVER_NAME,
1644
static int __init pxa168_init_module(void)
1646
return platform_driver_register(&pxa168_eth_driver);
1649
static void __exit pxa168_cleanup_module(void)
1651
platform_driver_unregister(&pxa168_eth_driver);
1654
module_init(pxa168_init_module);
1655
module_exit(pxa168_cleanup_module);
1657
MODULE_LICENSE("GPL");
1658
MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
1659
MODULE_ALIAS("platform:pxa168_eth");