~ubuntu-branches/ubuntu/precise/linux-lowlatency/precise

« back to all changes in this revision

Viewing changes to drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c

  • Committer: Package Import Robot
  • Author(s): Alessio Igor Bogani
  • Date: 2011-10-26 11:13:05 UTC
  • Revision ID: package-import@ubuntu.com-20111026111305-tz023xykf0i6eosh
Tags: upstream-3.2.0
ImportĀ upstreamĀ versionĀ 3.2.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*
 
2
 * Copyright (C) 1999 - 2010 Intel Corporation.
 
3
 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
 
4
 *
 
5
 * This code was derived from the Intel e1000e Linux driver.
 
6
 *
 
7
 * This program is free software; you can redistribute it and/or modify
 
8
 * it under the terms of the GNU General Public License as published by
 
9
 * the Free Software Foundation; version 2 of the License.
 
10
 *
 
11
 * This program is distributed in the hope that it will be useful,
 
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
14
 * GNU General Public License for more details.
 
15
 *
 
16
 * You should have received a copy of the GNU General Public License
 
17
 * along with this program; if not, write to the Free Software
 
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
 
19
 */
 
20
 
 
21
#include "pch_gbe.h"
 
22
#include "pch_gbe_api.h"
 
23
#include <linux/module.h>
 
24
 
 
25
#define DRV_VERSION     "1.00"
 
26
const char pch_driver_version[] = DRV_VERSION;
 
27
 
 
28
#define PCI_DEVICE_ID_INTEL_IOH1_GBE    0x8802          /* Pci device ID */
 
29
#define PCH_GBE_MAR_ENTRIES             16
 
30
#define PCH_GBE_SHORT_PKT               64
 
31
#define DSC_INIT16                      0xC000
 
32
#define PCH_GBE_DMA_ALIGN               0
 
33
#define PCH_GBE_DMA_PADDING             2
 
34
#define PCH_GBE_WATCHDOG_PERIOD         (1 * HZ)        /* watchdog time */
 
35
#define PCH_GBE_COPYBREAK_DEFAULT       256
 
36
#define PCH_GBE_PCI_BAR                 1
 
37
#define PCH_GBE_RESERVE_MEMORY          0x200000        /* 2MB */
 
38
 
 
39
/* Macros for ML7223 */
 
40
#define PCI_VENDOR_ID_ROHM                      0x10db
 
41
#define PCI_DEVICE_ID_ROHM_ML7223_GBE           0x8013
 
42
 
 
43
/* Macros for ML7831 */
 
44
#define PCI_DEVICE_ID_ROHM_ML7831_GBE           0x8802
 
45
 
 
46
#define PCH_GBE_TX_WEIGHT         64
 
47
#define PCH_GBE_RX_WEIGHT         64
 
48
#define PCH_GBE_RX_BUFFER_WRITE   16
 
49
 
 
50
/* Initialize the wake-on-LAN settings */
 
51
#define PCH_GBE_WL_INIT_SETTING    (PCH_GBE_WLC_MP)
 
52
 
 
53
#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
 
54
        PCH_GBE_CHIP_TYPE_INTERNAL | \
 
55
        PCH_GBE_RGMII_MODE_RGMII     \
 
56
        )
 
57
 
 
58
/* Ethertype field values */
 
59
#define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880
 
60
#define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
 
61
#define PCH_GBE_FRAME_SIZE_2048         2048
 
62
#define PCH_GBE_FRAME_SIZE_4096         4096
 
63
#define PCH_GBE_FRAME_SIZE_8192         8192
 
64
 
 
65
#define PCH_GBE_GET_DESC(R, i, type)    (&(((struct type *)((R).desc))[i]))
 
66
#define PCH_GBE_RX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
 
67
#define PCH_GBE_TX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
 
68
#define PCH_GBE_DESC_UNUSED(R) \
 
69
        ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
 
70
        (R)->next_to_clean - (R)->next_to_use - 1)
 
71
 
 
72
/* Pause packet value */
 
73
#define PCH_GBE_PAUSE_PKT1_VALUE    0x00C28001
 
74
#define PCH_GBE_PAUSE_PKT2_VALUE    0x00000100
 
75
#define PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
 
76
#define PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
 
77
 
 
78
#define PCH_GBE_ETH_ALEN            6
 
79
 
 
80
/* This defines the bits that are set in the Interrupt Mask
 
81
 * Set/Read Register.  Each bit is documented below:
 
82
 *   o RXT0   = Receiver Timer Interrupt (ring 0)
 
83
 *   o TXDW   = Transmit Descriptor Written Back
 
84
 *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
 
85
 *   o RXSEQ  = Receive Sequence Error
 
86
 *   o LSC    = Link Status Change
 
87
 */
 
88
#define PCH_GBE_INT_ENABLE_MASK ( \
 
89
        PCH_GBE_INT_RX_DMA_CMPLT |    \
 
90
        PCH_GBE_INT_RX_DSC_EMP   |    \
 
91
        PCH_GBE_INT_RX_FIFO_ERR  |    \
 
92
        PCH_GBE_INT_WOL_DET      |    \
 
93
        PCH_GBE_INT_TX_CMPLT          \
 
94
        )
 
95
 
 
96
#define PCH_GBE_INT_DISABLE_ALL         0
 
97
 
 
98
static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
 
99
 
 
100
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
 
101
static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
 
102
                               int data);
 
103
 
 
104
inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
 
105
{
 
106
        iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
 
107
}
 
108
 
 
109
/**
 
110
 * pch_gbe_mac_read_mac_addr - Read MAC address
 
111
 * @hw:             Pointer to the HW structure
 
112
 * Returns
 
113
 *      0:                      Successful.
 
114
 */
 
115
s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
 
116
{
 
117
        u32  adr1a, adr1b;
 
118
 
 
119
        adr1a = ioread32(&hw->reg->mac_adr[0].high);
 
120
        adr1b = ioread32(&hw->reg->mac_adr[0].low);
 
121
 
 
122
        hw->mac.addr[0] = (u8)(adr1a & 0xFF);
 
123
        hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
 
124
        hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
 
125
        hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
 
126
        hw->mac.addr[4] = (u8)(adr1b & 0xFF);
 
127
        hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
 
128
 
 
129
        pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
 
130
        return 0;
 
131
}
 
132
 
 
133
/**
 
134
 * pch_gbe_wait_clr_bit - Wait to clear a bit
 
135
 * @reg:        Pointer of register
 
136
 * @busy:       Busy bit
 
137
 */
 
138
static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
 
139
{
 
140
        u32 tmp;
 
141
        /* wait busy */
 
142
        tmp = 1000;
 
143
        while ((ioread32(reg) & bit) && --tmp)
 
144
                cpu_relax();
 
145
        if (!tmp)
 
146
                pr_err("Error: busy bit is not cleared\n");
 
147
}
 
148
 
 
149
/**
 
150
 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
 
151
 * @reg:        Pointer of register
 
152
 * @busy:       Busy bit
 
153
 */
 
154
static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
 
155
{
 
156
        u32 tmp;
 
157
        int ret = -1;
 
158
        /* wait busy */
 
159
        tmp = 20;
 
160
        while ((ioread32(reg) & bit) && --tmp)
 
161
                udelay(5);
 
162
        if (!tmp)
 
163
                pr_err("Error: busy bit is not cleared\n");
 
164
        else
 
165
                ret = 0;
 
166
        return ret;
 
167
}
 
168
 
 
169
/**
 
170
 * pch_gbe_mac_mar_set - Set MAC address register
 
171
 * @hw:     Pointer to the HW structure
 
172
 * @addr:   Pointer to the MAC address
 
173
 * @index:  MAC address array register
 
174
 */
 
175
static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
 
176
{
 
177
        u32 mar_low, mar_high, adrmask;
 
178
 
 
179
        pr_debug("index : 0x%x\n", index);
 
180
 
 
181
        /*
 
182
         * HW expects these in little endian so we reverse the byte order
 
183
         * from network order (big endian) to little endian
 
184
         */
 
185
        mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
 
186
                   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
 
187
        mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
 
188
        /* Stop the MAC Address of index. */
 
189
        adrmask = ioread32(&hw->reg->ADDR_MASK);
 
190
        iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
 
191
        /* wait busy */
 
192
        pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 
193
        /* Set the MAC address to the MAC address 1A/1B register */
 
194
        iowrite32(mar_high, &hw->reg->mac_adr[index].high);
 
195
        iowrite32(mar_low, &hw->reg->mac_adr[index].low);
 
196
        /* Start the MAC address of index */
 
197
        iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
 
198
        /* wait busy */
 
199
        pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 
200
}
 
201
 
 
202
/**
 
203
 * pch_gbe_mac_reset_hw - Reset hardware
 
204
 * @hw: Pointer to the HW structure
 
205
 */
 
206
static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
 
207
{
 
208
        /* Read the MAC address. and store to the private data */
 
209
        pch_gbe_mac_read_mac_addr(hw);
 
210
        iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
 
211
#ifdef PCH_GBE_MAC_IFOP_RGMII
 
212
        iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
 
213
#endif
 
214
        pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
 
215
        /* Setup the receive address */
 
216
        pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 
217
        return;
 
218
}
 
219
 
 
220
static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
 
221
{
 
222
        /* Read the MAC address. and store to the private data */
 
223
        pch_gbe_mac_read_mac_addr(hw);
 
224
        iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
 
225
        pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
 
226
        /* Setup the MAC address */
 
227
        pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 
228
        return;
 
229
}
 
230
 
 
231
/**
 
232
 * pch_gbe_mac_init_rx_addrs - Initialize receive address's
 
233
 * @hw: Pointer to the HW structure
 
234
 * @mar_count: Receive address registers
 
235
 */
 
236
static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
 
237
{
 
238
        u32 i;
 
239
 
 
240
        /* Setup the receive address */
 
241
        pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 
242
 
 
243
        /* Zero out the other receive addresses */
 
244
        for (i = 1; i < mar_count; i++) {
 
245
                iowrite32(0, &hw->reg->mac_adr[i].high);
 
246
                iowrite32(0, &hw->reg->mac_adr[i].low);
 
247
        }
 
248
        iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
 
249
        /* wait busy */
 
250
        pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 
251
}
 
252
 
 
253
 
 
254
/**
 
255
 * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
 
256
 * @hw:             Pointer to the HW structure
 
257
 * @mc_addr_list:   Array of multicast addresses to program
 
258
 * @mc_addr_count:  Number of multicast addresses to program
 
259
 * @mar_used_count: The first MAC Address register free to program
 
260
 * @mar_total_num:  Total number of supported MAC Address Registers
 
261
 */
 
262
static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
 
263
                                            u8 *mc_addr_list, u32 mc_addr_count,
 
264
                                            u32 mar_used_count, u32 mar_total_num)
 
265
{
 
266
        u32 i, adrmask;
 
267
 
 
268
        /* Load the first set of multicast addresses into the exact
 
269
         * filters (RAR).  If there are not enough to fill the RAR
 
270
         * array, clear the filters.
 
271
         */
 
272
        for (i = mar_used_count; i < mar_total_num; i++) {
 
273
                if (mc_addr_count) {
 
274
                        pch_gbe_mac_mar_set(hw, mc_addr_list, i);
 
275
                        mc_addr_count--;
 
276
                        mc_addr_list += PCH_GBE_ETH_ALEN;
 
277
                } else {
 
278
                        /* Clear MAC address mask */
 
279
                        adrmask = ioread32(&hw->reg->ADDR_MASK);
 
280
                        iowrite32((adrmask | (0x0001 << i)),
 
281
                                        &hw->reg->ADDR_MASK);
 
282
                        /* wait busy */
 
283
                        pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 
284
                        /* Clear MAC address */
 
285
                        iowrite32(0, &hw->reg->mac_adr[i].high);
 
286
                        iowrite32(0, &hw->reg->mac_adr[i].low);
 
287
                }
 
288
        }
 
289
}
 
290
 
 
291
/**
 
292
 * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
 
293
 * @hw:             Pointer to the HW structure
 
294
 * Returns
 
295
 *      0:                      Successful.
 
296
 *      Negative value:         Failed.
 
297
 */
 
298
s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
 
299
{
 
300
        struct pch_gbe_mac_info *mac = &hw->mac;
 
301
        u32 rx_fctrl;
 
302
 
 
303
        pr_debug("mac->fc = %u\n", mac->fc);
 
304
 
 
305
        rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
 
306
 
 
307
        switch (mac->fc) {
 
308
        case PCH_GBE_FC_NONE:
 
309
                rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 
310
                mac->tx_fc_enable = false;
 
311
                break;
 
312
        case PCH_GBE_FC_RX_PAUSE:
 
313
                rx_fctrl |= PCH_GBE_FL_CTRL_EN;
 
314
                mac->tx_fc_enable = false;
 
315
                break;
 
316
        case PCH_GBE_FC_TX_PAUSE:
 
317
                rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 
318
                mac->tx_fc_enable = true;
 
319
                break;
 
320
        case PCH_GBE_FC_FULL:
 
321
                rx_fctrl |= PCH_GBE_FL_CTRL_EN;
 
322
                mac->tx_fc_enable = true;
 
323
                break;
 
324
        default:
 
325
                pr_err("Flow control param set incorrectly\n");
 
326
                return -EINVAL;
 
327
        }
 
328
        if (mac->link_duplex == DUPLEX_HALF)
 
329
                rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 
330
        iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
 
331
        pr_debug("RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
 
332
                 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
 
333
        return 0;
 
334
}
 
335
 
 
336
/**
 
337
 * pch_gbe_mac_set_wol_event - Set wake-on-lan event
 
338
 * @hw:     Pointer to the HW structure
 
339
 * @wu_evt: Wake up event
 
340
 */
 
341
static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
 
342
{
 
343
        u32 addr_mask;
 
344
 
 
345
        pr_debug("wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
 
346
                 wu_evt, ioread32(&hw->reg->ADDR_MASK));
 
347
 
 
348
        if (wu_evt) {
 
349
                /* Set Wake-On-Lan address mask */
 
350
                addr_mask = ioread32(&hw->reg->ADDR_MASK);
 
351
                iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
 
352
                /* wait busy */
 
353
                pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
 
354
                iowrite32(0, &hw->reg->WOL_ST);
 
355
                iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
 
356
                iowrite32(0x02, &hw->reg->TCPIP_ACC);
 
357
                iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
 
358
        } else {
 
359
                iowrite32(0, &hw->reg->WOL_CTRL);
 
360
                iowrite32(0, &hw->reg->WOL_ST);
 
361
        }
 
362
        return;
 
363
}
 
364
 
 
365
/**
 
366
 * pch_gbe_mac_ctrl_miim - Control MIIM interface
 
367
 * @hw:   Pointer to the HW structure
 
368
 * @addr: Address of PHY
 
369
 * @dir:  Operetion. (Write or Read)
 
370
 * @reg:  Access register of PHY
 
371
 * @data: Write data.
 
372
 *
 
373
 * Returns: Read date.
 
374
 */
 
375
u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
 
376
                        u16 data)
 
377
{
 
378
        u32 data_out = 0;
 
379
        unsigned int i;
 
380
        unsigned long flags;
 
381
 
 
382
        spin_lock_irqsave(&hw->miim_lock, flags);
 
383
 
 
384
        for (i = 100; i; --i) {
 
385
                if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
 
386
                        break;
 
387
                udelay(20);
 
388
        }
 
389
        if (i == 0) {
 
390
                pr_err("pch-gbe.miim won't go Ready\n");
 
391
                spin_unlock_irqrestore(&hw->miim_lock, flags);
 
392
                return 0;       /* No way to indicate timeout error */
 
393
        }
 
394
        iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
 
395
                  (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
 
396
                  dir | data), &hw->reg->MIIM);
 
397
        for (i = 0; i < 100; i++) {
 
398
                udelay(20);
 
399
                data_out = ioread32(&hw->reg->MIIM);
 
400
                if ((data_out & PCH_GBE_MIIM_OPER_READY))
 
401
                        break;
 
402
        }
 
403
        spin_unlock_irqrestore(&hw->miim_lock, flags);
 
404
 
 
405
        pr_debug("PHY %s: reg=%d, data=0x%04X\n",
 
406
                 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
 
407
                 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
 
408
        return (u16) data_out;
 
409
}
 
410
 
 
411
/**
 
412
 * pch_gbe_mac_set_pause_packet - Set pause packet
 
413
 * @hw:   Pointer to the HW structure
 
414
 */
 
415
static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
 
416
{
 
417
        unsigned long tmp2, tmp3;
 
418
 
 
419
        /* Set Pause packet */
 
420
        tmp2 = hw->mac.addr[1];
 
421
        tmp2 = (tmp2 << 8) | hw->mac.addr[0];
 
422
        tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
 
423
 
 
424
        tmp3 = hw->mac.addr[5];
 
425
        tmp3 = (tmp3 << 8) | hw->mac.addr[4];
 
426
        tmp3 = (tmp3 << 8) | hw->mac.addr[3];
 
427
        tmp3 = (tmp3 << 8) | hw->mac.addr[2];
 
428
 
 
429
        iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
 
430
        iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
 
431
        iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
 
432
        iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
 
433
        iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
 
434
 
 
435
        /* Transmit Pause Packet */
 
436
        iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
 
437
 
 
438
        pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
 
439
                 ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
 
440
                 ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
 
441
                 ioread32(&hw->reg->PAUSE_PKT5));
 
442
 
 
443
        return;
 
444
}
 
445
 
 
446
 
 
447
/**
 
448
 * pch_gbe_alloc_queues - Allocate memory for all rings
 
449
 * @adapter:  Board private structure to initialize
 
450
 * Returns
 
451
 *      0:      Successfully
 
452
 *      Negative value: Failed
 
453
 */
 
454
static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
 
455
{
 
456
        int size;
 
457
 
 
458
        size = (int)sizeof(struct pch_gbe_tx_ring);
 
459
        adapter->tx_ring = kzalloc(size, GFP_KERNEL);
 
460
        if (!adapter->tx_ring)
 
461
                return -ENOMEM;
 
462
        size = (int)sizeof(struct pch_gbe_rx_ring);
 
463
        adapter->rx_ring = kzalloc(size, GFP_KERNEL);
 
464
        if (!adapter->rx_ring) {
 
465
                kfree(adapter->tx_ring);
 
466
                return -ENOMEM;
 
467
        }
 
468
        return 0;
 
469
}
 
470
 
 
471
/**
 
472
 * pch_gbe_init_stats - Initialize status
 
473
 * @adapter:  Board private structure to initialize
 
474
 */
 
475
static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
 
476
{
 
477
        memset(&adapter->stats, 0, sizeof(adapter->stats));
 
478
        return;
 
479
}
 
480
 
 
481
/**
 
482
 * pch_gbe_init_phy - Initialize PHY
 
483
 * @adapter:  Board private structure to initialize
 
484
 * Returns
 
485
 *      0:      Successfully
 
486
 *      Negative value: Failed
 
487
 */
 
488
static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
 
489
{
 
490
        struct net_device *netdev = adapter->netdev;
 
491
        u32 addr;
 
492
        u16 bmcr, stat;
 
493
 
 
494
        /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
 
495
        for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
 
496
                adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
 
497
                bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
 
498
                stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
 
499
                stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
 
500
                if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
 
501
                        break;
 
502
        }
 
503
        adapter->hw.phy.addr = adapter->mii.phy_id;
 
504
        pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
 
505
        if (addr == 32)
 
506
                return -EAGAIN;
 
507
        /* Selected the phy and isolate the rest */
 
508
        for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
 
509
                if (addr != adapter->mii.phy_id) {
 
510
                        pch_gbe_mdio_write(netdev, addr, MII_BMCR,
 
511
                                           BMCR_ISOLATE);
 
512
                } else {
 
513
                        bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
 
514
                        pch_gbe_mdio_write(netdev, addr, MII_BMCR,
 
515
                                           bmcr & ~BMCR_ISOLATE);
 
516
                }
 
517
        }
 
518
 
 
519
        /* MII setup */
 
520
        adapter->mii.phy_id_mask = 0x1F;
 
521
        adapter->mii.reg_num_mask = 0x1F;
 
522
        adapter->mii.dev = adapter->netdev;
 
523
        adapter->mii.mdio_read = pch_gbe_mdio_read;
 
524
        adapter->mii.mdio_write = pch_gbe_mdio_write;
 
525
        adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
 
526
        return 0;
 
527
}
 
528
 
 
529
/**
 
530
 * pch_gbe_mdio_read - The read function for mii
 
531
 * @netdev: Network interface device structure
 
532
 * @addr:   Phy ID
 
533
 * @reg:    Access location
 
534
 * Returns
 
535
 *      0:      Successfully
 
536
 *      Negative value: Failed
 
537
 */
 
538
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
 
539
{
 
540
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
541
        struct pch_gbe_hw *hw = &adapter->hw;
 
542
 
 
543
        return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
 
544
                                     (u16) 0);
 
545
}
 
546
 
 
547
/**
 
548
 * pch_gbe_mdio_write - The write function for mii
 
549
 * @netdev: Network interface device structure
 
550
 * @addr:   Phy ID (not used)
 
551
 * @reg:    Access location
 
552
 * @data:   Write data
 
553
 */
 
554
static void pch_gbe_mdio_write(struct net_device *netdev,
 
555
                               int addr, int reg, int data)
 
556
{
 
557
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
558
        struct pch_gbe_hw *hw = &adapter->hw;
 
559
 
 
560
        pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
 
561
}
 
562
 
 
563
/**
 
564
 * pch_gbe_reset_task - Reset processing at the time of transmission timeout
 
565
 * @work:  Pointer of board private structure
 
566
 */
 
567
static void pch_gbe_reset_task(struct work_struct *work)
 
568
{
 
569
        struct pch_gbe_adapter *adapter;
 
570
        adapter = container_of(work, struct pch_gbe_adapter, reset_task);
 
571
 
 
572
        rtnl_lock();
 
573
        pch_gbe_reinit_locked(adapter);
 
574
        rtnl_unlock();
 
575
}
 
576
 
 
577
/**
 
578
 * pch_gbe_reinit_locked- Re-initialization
 
579
 * @adapter:  Board private structure
 
580
 */
 
581
void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
 
582
{
 
583
        pch_gbe_down(adapter);
 
584
        pch_gbe_up(adapter);
 
585
}
 
586
 
 
587
/**
 
588
 * pch_gbe_reset - Reset GbE
 
589
 * @adapter:  Board private structure
 
590
 */
 
591
void pch_gbe_reset(struct pch_gbe_adapter *adapter)
 
592
{
 
593
        pch_gbe_mac_reset_hw(&adapter->hw);
 
594
        /* Setup the receive address. */
 
595
        pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
 
596
        if (pch_gbe_hal_init_hw(&adapter->hw))
 
597
                pr_err("Hardware Error\n");
 
598
}
 
599
 
 
600
/**
 
601
 * pch_gbe_free_irq - Free an interrupt
 
602
 * @adapter:  Board private structure
 
603
 */
 
604
static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
 
605
{
 
606
        struct net_device *netdev = adapter->netdev;
 
607
 
 
608
        free_irq(adapter->pdev->irq, netdev);
 
609
        if (adapter->have_msi) {
 
610
                pci_disable_msi(adapter->pdev);
 
611
                pr_debug("call pci_disable_msi\n");
 
612
        }
 
613
}
 
614
 
 
615
/**
 
616
 * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
 
617
 * @adapter:  Board private structure
 
618
 */
 
619
static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
 
620
{
 
621
        struct pch_gbe_hw *hw = &adapter->hw;
 
622
 
 
623
        atomic_inc(&adapter->irq_sem);
 
624
        iowrite32(0, &hw->reg->INT_EN);
 
625
        ioread32(&hw->reg->INT_ST);
 
626
        synchronize_irq(adapter->pdev->irq);
 
627
 
 
628
        pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
 
629
}
 
630
 
 
631
/**
 
632
 * pch_gbe_irq_enable - Enable default interrupt generation settings
 
633
 * @adapter:  Board private structure
 
634
 */
 
635
static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
 
636
{
 
637
        struct pch_gbe_hw *hw = &adapter->hw;
 
638
 
 
639
        if (likely(atomic_dec_and_test(&adapter->irq_sem)))
 
640
                iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
 
641
        ioread32(&hw->reg->INT_ST);
 
642
        pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
 
643
}
 
644
 
 
645
 
 
646
 
 
647
/**
 
648
 * pch_gbe_setup_tctl - configure the Transmit control registers
 
649
 * @adapter:  Board private structure
 
650
 */
 
651
static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
 
652
{
 
653
        struct pch_gbe_hw *hw = &adapter->hw;
 
654
        u32 tx_mode, tcpip;
 
655
 
 
656
        tx_mode = PCH_GBE_TM_LONG_PKT |
 
657
                PCH_GBE_TM_ST_AND_FD |
 
658
                PCH_GBE_TM_SHORT_PKT |
 
659
                PCH_GBE_TM_TH_TX_STRT_8 |
 
660
                PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
 
661
 
 
662
        iowrite32(tx_mode, &hw->reg->TX_MODE);
 
663
 
 
664
        tcpip = ioread32(&hw->reg->TCPIP_ACC);
 
665
        tcpip |= PCH_GBE_TX_TCPIPACC_EN;
 
666
        iowrite32(tcpip, &hw->reg->TCPIP_ACC);
 
667
        return;
 
668
}
 
669
 
 
670
/**
 
671
 * pch_gbe_configure_tx - Configure Transmit Unit after Reset
 
672
 * @adapter:  Board private structure
 
673
 */
 
674
static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
 
675
{
 
676
        struct pch_gbe_hw *hw = &adapter->hw;
 
677
        u32 tdba, tdlen, dctrl;
 
678
 
 
679
        pr_debug("dma addr = 0x%08llx  size = 0x%08x\n",
 
680
                 (unsigned long long)adapter->tx_ring->dma,
 
681
                 adapter->tx_ring->size);
 
682
 
 
683
        /* Setup the HW Tx Head and Tail descriptor pointers */
 
684
        tdba = adapter->tx_ring->dma;
 
685
        tdlen = adapter->tx_ring->size - 0x10;
 
686
        iowrite32(tdba, &hw->reg->TX_DSC_BASE);
 
687
        iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
 
688
        iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
 
689
 
 
690
        /* Enables Transmission DMA */
 
691
        dctrl = ioread32(&hw->reg->DMA_CTRL);
 
692
        dctrl |= PCH_GBE_TX_DMA_EN;
 
693
        iowrite32(dctrl, &hw->reg->DMA_CTRL);
 
694
}
 
695
 
 
696
/**
 
697
 * pch_gbe_setup_rctl - Configure the receive control registers
 
698
 * @adapter:  Board private structure
 
699
 */
 
700
static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
 
701
{
 
702
        struct pch_gbe_hw *hw = &adapter->hw;
 
703
        u32 rx_mode, tcpip;
 
704
 
 
705
        rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
 
706
        PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
 
707
 
 
708
        iowrite32(rx_mode, &hw->reg->RX_MODE);
 
709
 
 
710
        tcpip = ioread32(&hw->reg->TCPIP_ACC);
 
711
 
 
712
        tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
 
713
        tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
 
714
        iowrite32(tcpip, &hw->reg->TCPIP_ACC);
 
715
        return;
 
716
}
 
717
 
 
718
/**
 
719
 * pch_gbe_configure_rx - Configure Receive Unit after Reset
 
720
 * @adapter:  Board private structure
 
721
 */
 
722
static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
 
723
{
 
724
        struct pch_gbe_hw *hw = &adapter->hw;
 
725
        u32 rdba, rdlen, rctl, rxdma;
 
726
 
 
727
        pr_debug("dma adr = 0x%08llx  size = 0x%08x\n",
 
728
                 (unsigned long long)adapter->rx_ring->dma,
 
729
                 adapter->rx_ring->size);
 
730
 
 
731
        pch_gbe_mac_force_mac_fc(hw);
 
732
 
 
733
        /* Disables Receive MAC */
 
734
        rctl = ioread32(&hw->reg->MAC_RX_EN);
 
735
        iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
 
736
 
 
737
        /* Disables Receive DMA */
 
738
        rxdma = ioread32(&hw->reg->DMA_CTRL);
 
739
        rxdma &= ~PCH_GBE_RX_DMA_EN;
 
740
        iowrite32(rxdma, &hw->reg->DMA_CTRL);
 
741
 
 
742
        pr_debug("MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
 
743
                 ioread32(&hw->reg->MAC_RX_EN),
 
744
                 ioread32(&hw->reg->DMA_CTRL));
 
745
 
 
746
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
 
747
         * the Base and Length of the Rx Descriptor Ring */
 
748
        rdba = adapter->rx_ring->dma;
 
749
        rdlen = adapter->rx_ring->size - 0x10;
 
750
        iowrite32(rdba, &hw->reg->RX_DSC_BASE);
 
751
        iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
 
752
        iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
 
753
}
 
754
 
 
755
/**
 
756
 * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
 
757
 * @adapter:     Board private structure
 
758
 * @buffer_info: Buffer information structure
 
759
 */
 
760
static void pch_gbe_unmap_and_free_tx_resource(
 
761
        struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
 
762
{
 
763
        if (buffer_info->mapped) {
 
764
                dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 
765
                                 buffer_info->length, DMA_TO_DEVICE);
 
766
                buffer_info->mapped = false;
 
767
        }
 
768
        if (buffer_info->skb) {
 
769
                dev_kfree_skb_any(buffer_info->skb);
 
770
                buffer_info->skb = NULL;
 
771
        }
 
772
}
 
773
 
 
774
/**
 
775
 * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
 
776
 * @adapter:      Board private structure
 
777
 * @buffer_info:  Buffer information structure
 
778
 */
 
779
static void pch_gbe_unmap_and_free_rx_resource(
 
780
                                        struct pch_gbe_adapter *adapter,
 
781
                                        struct pch_gbe_buffer *buffer_info)
 
782
{
 
783
        if (buffer_info->mapped) {
 
784
                dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 
785
                                 buffer_info->length, DMA_FROM_DEVICE);
 
786
                buffer_info->mapped = false;
 
787
        }
 
788
        if (buffer_info->skb) {
 
789
                dev_kfree_skb_any(buffer_info->skb);
 
790
                buffer_info->skb = NULL;
 
791
        }
 
792
}
 
793
 
 
794
/**
 
795
 * pch_gbe_clean_tx_ring - Free Tx Buffers
 
796
 * @adapter:  Board private structure
 
797
 * @tx_ring:  Ring to be cleaned
 
798
 */
 
799
static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
 
800
                                   struct pch_gbe_tx_ring *tx_ring)
 
801
{
 
802
        struct pch_gbe_hw *hw = &adapter->hw;
 
803
        struct pch_gbe_buffer *buffer_info;
 
804
        unsigned long size;
 
805
        unsigned int i;
 
806
 
 
807
        /* Free all the Tx ring sk_buffs */
 
808
        for (i = 0; i < tx_ring->count; i++) {
 
809
                buffer_info = &tx_ring->buffer_info[i];
 
810
                pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
 
811
        }
 
812
        pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
 
813
 
 
814
        size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
 
815
        memset(tx_ring->buffer_info, 0, size);
 
816
 
 
817
        /* Zero out the descriptor ring */
 
818
        memset(tx_ring->desc, 0, tx_ring->size);
 
819
        tx_ring->next_to_use = 0;
 
820
        tx_ring->next_to_clean = 0;
 
821
        iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
 
822
        iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
 
823
}
 
824
 
 
825
/**
 
826
 * pch_gbe_clean_rx_ring - Free Rx Buffers
 
827
 * @adapter:  Board private structure
 
828
 * @rx_ring:  Ring to free buffers from
 
829
 */
 
830
static void
 
831
pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
 
832
                      struct pch_gbe_rx_ring *rx_ring)
 
833
{
 
834
        struct pch_gbe_hw *hw = &adapter->hw;
 
835
        struct pch_gbe_buffer *buffer_info;
 
836
        unsigned long size;
 
837
        unsigned int i;
 
838
 
 
839
        /* Free all the Rx ring sk_buffs */
 
840
        for (i = 0; i < rx_ring->count; i++) {
 
841
                buffer_info = &rx_ring->buffer_info[i];
 
842
                pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
 
843
        }
 
844
        pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
 
845
        size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
 
846
        memset(rx_ring->buffer_info, 0, size);
 
847
 
 
848
        /* Zero out the descriptor ring */
 
849
        memset(rx_ring->desc, 0, rx_ring->size);
 
850
        rx_ring->next_to_clean = 0;
 
851
        rx_ring->next_to_use = 0;
 
852
        iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
 
853
        iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
 
854
}
 
855
 
 
856
static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
 
857
                                    u16 duplex)
 
858
{
 
859
        struct pch_gbe_hw *hw = &adapter->hw;
 
860
        unsigned long rgmii = 0;
 
861
 
 
862
        /* Set the RGMII control. */
 
863
#ifdef PCH_GBE_MAC_IFOP_RGMII
 
864
        switch (speed) {
 
865
        case SPEED_10:
 
866
                rgmii = (PCH_GBE_RGMII_RATE_2_5M |
 
867
                         PCH_GBE_MAC_RGMII_CTRL_SETTING);
 
868
                break;
 
869
        case SPEED_100:
 
870
                rgmii = (PCH_GBE_RGMII_RATE_25M |
 
871
                         PCH_GBE_MAC_RGMII_CTRL_SETTING);
 
872
                break;
 
873
        case SPEED_1000:
 
874
                rgmii = (PCH_GBE_RGMII_RATE_125M |
 
875
                         PCH_GBE_MAC_RGMII_CTRL_SETTING);
 
876
                break;
 
877
        }
 
878
        iowrite32(rgmii, &hw->reg->RGMII_CTRL);
 
879
#else   /* GMII */
 
880
        rgmii = 0;
 
881
        iowrite32(rgmii, &hw->reg->RGMII_CTRL);
 
882
#endif
 
883
}
 
884
static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
 
885
                              u16 duplex)
 
886
{
 
887
        struct net_device *netdev = adapter->netdev;
 
888
        struct pch_gbe_hw *hw = &adapter->hw;
 
889
        unsigned long mode = 0;
 
890
 
 
891
        /* Set the communication mode */
 
892
        switch (speed) {
 
893
        case SPEED_10:
 
894
                mode = PCH_GBE_MODE_MII_ETHER;
 
895
                netdev->tx_queue_len = 10;
 
896
                break;
 
897
        case SPEED_100:
 
898
                mode = PCH_GBE_MODE_MII_ETHER;
 
899
                netdev->tx_queue_len = 100;
 
900
                break;
 
901
        case SPEED_1000:
 
902
                mode = PCH_GBE_MODE_GMII_ETHER;
 
903
                break;
 
904
        }
 
905
        if (duplex == DUPLEX_FULL)
 
906
                mode |= PCH_GBE_MODE_FULL_DUPLEX;
 
907
        else
 
908
                mode |= PCH_GBE_MODE_HALF_DUPLEX;
 
909
        iowrite32(mode, &hw->reg->MODE);
 
910
}
 
911
 
 
912
/**
 
913
 * pch_gbe_watchdog - Watchdog process
 
914
 * @data:  Board private structure
 
915
 */
 
916
static void pch_gbe_watchdog(unsigned long data)
 
917
{
 
918
        struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
 
919
        struct net_device *netdev = adapter->netdev;
 
920
        struct pch_gbe_hw *hw = &adapter->hw;
 
921
 
 
922
        pr_debug("right now = %ld\n", jiffies);
 
923
 
 
924
        pch_gbe_update_stats(adapter);
 
925
        if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
 
926
                struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
 
927
                netdev->tx_queue_len = adapter->tx_queue_len;
 
928
                /* mii library handles link maintenance tasks */
 
929
                if (mii_ethtool_gset(&adapter->mii, &cmd)) {
 
930
                        pr_err("ethtool get setting Error\n");
 
931
                        mod_timer(&adapter->watchdog_timer,
 
932
                                  round_jiffies(jiffies +
 
933
                                                PCH_GBE_WATCHDOG_PERIOD));
 
934
                        return;
 
935
                }
 
936
                hw->mac.link_speed = ethtool_cmd_speed(&cmd);
 
937
                hw->mac.link_duplex = cmd.duplex;
 
938
                /* Set the RGMII control. */
 
939
                pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
 
940
                                                hw->mac.link_duplex);
 
941
                /* Set the communication mode */
 
942
                pch_gbe_set_mode(adapter, hw->mac.link_speed,
 
943
                                 hw->mac.link_duplex);
 
944
                netdev_dbg(netdev,
 
945
                           "Link is Up %d Mbps %s-Duplex\n",
 
946
                           hw->mac.link_speed,
 
947
                           cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
 
948
                netif_carrier_on(netdev);
 
949
                netif_wake_queue(netdev);
 
950
        } else if ((!mii_link_ok(&adapter->mii)) &&
 
951
                   (netif_carrier_ok(netdev))) {
 
952
                netdev_dbg(netdev, "NIC Link is Down\n");
 
953
                hw->mac.link_speed = SPEED_10;
 
954
                hw->mac.link_duplex = DUPLEX_HALF;
 
955
                netif_carrier_off(netdev);
 
956
                netif_stop_queue(netdev);
 
957
        }
 
958
        mod_timer(&adapter->watchdog_timer,
 
959
                  round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
 
960
}
 
961
 
 
962
/**
 
963
 * pch_gbe_tx_queue - Carry out queuing of the transmission data
 
964
 * @adapter:  Board private structure
 
965
 * @tx_ring:  Tx descriptor ring structure
 
966
 * @skb:      Sockt buffer structure
 
967
 */
 
968
static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
 
969
                              struct pch_gbe_tx_ring *tx_ring,
 
970
                              struct sk_buff *skb)
 
971
{
 
972
        struct pch_gbe_hw *hw = &adapter->hw;
 
973
        struct pch_gbe_tx_desc *tx_desc;
 
974
        struct pch_gbe_buffer *buffer_info;
 
975
        struct sk_buff *tmp_skb;
 
976
        unsigned int frame_ctrl;
 
977
        unsigned int ring_num;
 
978
        unsigned long flags;
 
979
 
 
980
        /*-- Set frame control --*/
 
981
        frame_ctrl = 0;
 
982
        if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
 
983
                frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
 
984
        if (skb->ip_summed == CHECKSUM_NONE)
 
985
                frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
 
986
 
 
987
        /* Performs checksum processing */
 
988
        /*
 
989
         * It is because the hardware accelerator does not support a checksum,
 
990
         * when the received data size is less than 64 bytes.
 
991
         */
 
992
        if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
 
993
                frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
 
994
                              PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
 
995
                if (skb->protocol == htons(ETH_P_IP)) {
 
996
                        struct iphdr *iph = ip_hdr(skb);
 
997
                        unsigned int offset;
 
998
                        iph->check = 0;
 
999
                        iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
 
1000
                        offset = skb_transport_offset(skb);
 
1001
                        if (iph->protocol == IPPROTO_TCP) {
 
1002
                                skb->csum = 0;
 
1003
                                tcp_hdr(skb)->check = 0;
 
1004
                                skb->csum = skb_checksum(skb, offset,
 
1005
                                                         skb->len - offset, 0);
 
1006
                                tcp_hdr(skb)->check =
 
1007
                                        csum_tcpudp_magic(iph->saddr,
 
1008
                                                          iph->daddr,
 
1009
                                                          skb->len - offset,
 
1010
                                                          IPPROTO_TCP,
 
1011
                                                          skb->csum);
 
1012
                        } else if (iph->protocol == IPPROTO_UDP) {
 
1013
                                skb->csum = 0;
 
1014
                                udp_hdr(skb)->check = 0;
 
1015
                                skb->csum =
 
1016
                                        skb_checksum(skb, offset,
 
1017
                                                     skb->len - offset, 0);
 
1018
                                udp_hdr(skb)->check =
 
1019
                                        csum_tcpudp_magic(iph->saddr,
 
1020
                                                          iph->daddr,
 
1021
                                                          skb->len - offset,
 
1022
                                                          IPPROTO_UDP,
 
1023
                                                          skb->csum);
 
1024
                        }
 
1025
                }
 
1026
        }
 
1027
        spin_lock_irqsave(&tx_ring->tx_lock, flags);
 
1028
        ring_num = tx_ring->next_to_use;
 
1029
        if (unlikely((ring_num + 1) == tx_ring->count))
 
1030
                tx_ring->next_to_use = 0;
 
1031
        else
 
1032
                tx_ring->next_to_use = ring_num + 1;
 
1033
 
 
1034
        spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 
1035
        buffer_info = &tx_ring->buffer_info[ring_num];
 
1036
        tmp_skb = buffer_info->skb;
 
1037
 
 
1038
        /* [Header:14][payload] ---> [Header:14][paddong:2][payload]    */
 
1039
        memcpy(tmp_skb->data, skb->data, ETH_HLEN);
 
1040
        tmp_skb->data[ETH_HLEN] = 0x00;
 
1041
        tmp_skb->data[ETH_HLEN + 1] = 0x00;
 
1042
        tmp_skb->len = skb->len;
 
1043
        memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
 
1044
               (skb->len - ETH_HLEN));
 
1045
        /*-- Set Buffer information --*/
 
1046
        buffer_info->length = tmp_skb->len;
 
1047
        buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
 
1048
                                          buffer_info->length,
 
1049
                                          DMA_TO_DEVICE);
 
1050
        if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
 
1051
                pr_err("TX DMA map failed\n");
 
1052
                buffer_info->dma = 0;
 
1053
                buffer_info->time_stamp = 0;
 
1054
                tx_ring->next_to_use = ring_num;
 
1055
                return;
 
1056
        }
 
1057
        buffer_info->mapped = true;
 
1058
        buffer_info->time_stamp = jiffies;
 
1059
 
 
1060
        /*-- Set Tx descriptor --*/
 
1061
        tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
 
1062
        tx_desc->buffer_addr = (buffer_info->dma);
 
1063
        tx_desc->length = (tmp_skb->len);
 
1064
        tx_desc->tx_words_eob = ((tmp_skb->len + 3));
 
1065
        tx_desc->tx_frame_ctrl = (frame_ctrl);
 
1066
        tx_desc->gbec_status = (DSC_INIT16);
 
1067
 
 
1068
        if (unlikely(++ring_num == tx_ring->count))
 
1069
                ring_num = 0;
 
1070
 
 
1071
        /* Update software pointer of TX descriptor */
 
1072
        iowrite32(tx_ring->dma +
 
1073
                  (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
 
1074
                  &hw->reg->TX_DSC_SW_P);
 
1075
        dev_kfree_skb_any(skb);
 
1076
}
 
1077
 
 
1078
/**
 
1079
 * pch_gbe_update_stats - Update the board statistics counters
 
1080
 * @adapter:  Board private structure
 
1081
 */
 
1082
void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
 
1083
{
 
1084
        struct net_device *netdev = adapter->netdev;
 
1085
        struct pci_dev *pdev = adapter->pdev;
 
1086
        struct pch_gbe_hw_stats *stats = &adapter->stats;
 
1087
        unsigned long flags;
 
1088
 
 
1089
        /*
 
1090
         * Prevent stats update while adapter is being reset, or if the pci
 
1091
         * connection is down.
 
1092
         */
 
1093
        if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
 
1094
                return;
 
1095
 
 
1096
        spin_lock_irqsave(&adapter->stats_lock, flags);
 
1097
 
 
1098
        /* Update device status "adapter->stats" */
 
1099
        stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
 
1100
        stats->tx_errors = stats->tx_length_errors +
 
1101
            stats->tx_aborted_errors +
 
1102
            stats->tx_carrier_errors + stats->tx_timeout_count;
 
1103
 
 
1104
        /* Update network device status "adapter->net_stats" */
 
1105
        netdev->stats.rx_packets = stats->rx_packets;
 
1106
        netdev->stats.rx_bytes = stats->rx_bytes;
 
1107
        netdev->stats.rx_dropped = stats->rx_dropped;
 
1108
        netdev->stats.tx_packets = stats->tx_packets;
 
1109
        netdev->stats.tx_bytes = stats->tx_bytes;
 
1110
        netdev->stats.tx_dropped = stats->tx_dropped;
 
1111
        /* Fill out the OS statistics structure */
 
1112
        netdev->stats.multicast = stats->multicast;
 
1113
        netdev->stats.collisions = stats->collisions;
 
1114
        /* Rx Errors */
 
1115
        netdev->stats.rx_errors = stats->rx_errors;
 
1116
        netdev->stats.rx_crc_errors = stats->rx_crc_errors;
 
1117
        netdev->stats.rx_frame_errors = stats->rx_frame_errors;
 
1118
        /* Tx Errors */
 
1119
        netdev->stats.tx_errors = stats->tx_errors;
 
1120
        netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
 
1121
        netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
 
1122
 
 
1123
        spin_unlock_irqrestore(&adapter->stats_lock, flags);
 
1124
}
 
1125
 
 
1126
static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
 
1127
{
 
1128
        struct pch_gbe_hw *hw = &adapter->hw;
 
1129
        u32 rxdma;
 
1130
        u16 value;
 
1131
        int ret;
 
1132
 
 
1133
        /* Disable Receive DMA */
 
1134
        rxdma = ioread32(&hw->reg->DMA_CTRL);
 
1135
        rxdma &= ~PCH_GBE_RX_DMA_EN;
 
1136
        iowrite32(rxdma, &hw->reg->DMA_CTRL);
 
1137
        /* Wait Rx DMA BUS is IDLE */
 
1138
        ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
 
1139
        if (ret) {
 
1140
                /* Disable Bus master */
 
1141
                pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
 
1142
                value &= ~PCI_COMMAND_MASTER;
 
1143
                pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
 
1144
                /* Stop Receive */
 
1145
                pch_gbe_mac_reset_rx(hw);
 
1146
                /* Enable Bus master */
 
1147
                value |= PCI_COMMAND_MASTER;
 
1148
                pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
 
1149
        } else {
 
1150
                /* Stop Receive */
 
1151
                pch_gbe_mac_reset_rx(hw);
 
1152
        }
 
1153
}
 
1154
 
 
1155
static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
 
1156
{
 
1157
        u32 rxdma;
 
1158
 
 
1159
        /* Enables Receive DMA */
 
1160
        rxdma = ioread32(&hw->reg->DMA_CTRL);
 
1161
        rxdma |= PCH_GBE_RX_DMA_EN;
 
1162
        iowrite32(rxdma, &hw->reg->DMA_CTRL);
 
1163
        /* Enables Receive */
 
1164
        iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
 
1165
        return;
 
1166
}
 
1167
 
 
1168
/**
 
1169
 * pch_gbe_intr - Interrupt Handler
 
1170
 * @irq:   Interrupt number
 
1171
 * @data:  Pointer to a network interface device structure
 
1172
 * Returns
 
1173
 *      - IRQ_HANDLED:  Our interrupt
 
1174
 *      - IRQ_NONE:     Not our interrupt
 
1175
 */
 
1176
static irqreturn_t pch_gbe_intr(int irq, void *data)
 
1177
{
 
1178
        struct net_device *netdev = data;
 
1179
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
1180
        struct pch_gbe_hw *hw = &adapter->hw;
 
1181
        u32 int_st;
 
1182
        u32 int_en;
 
1183
 
 
1184
        /* Check request status */
 
1185
        int_st = ioread32(&hw->reg->INT_ST);
 
1186
        int_st = int_st & ioread32(&hw->reg->INT_EN);
 
1187
        /* When request status is no interruption factor */
 
1188
        if (unlikely(!int_st))
 
1189
                return IRQ_NONE;        /* Not our interrupt. End processing. */
 
1190
        pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
 
1191
        if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
 
1192
                adapter->stats.intr_rx_frame_err_count++;
 
1193
        if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
 
1194
                if (!adapter->rx_stop_flag) {
 
1195
                        adapter->stats.intr_rx_fifo_err_count++;
 
1196
                        pr_debug("Rx fifo over run\n");
 
1197
                        adapter->rx_stop_flag = true;
 
1198
                        int_en = ioread32(&hw->reg->INT_EN);
 
1199
                        iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
 
1200
                                  &hw->reg->INT_EN);
 
1201
                        pch_gbe_stop_receive(adapter);
 
1202
                        int_st |= ioread32(&hw->reg->INT_ST);
 
1203
                        int_st = int_st & ioread32(&hw->reg->INT_EN);
 
1204
                }
 
1205
        if (int_st & PCH_GBE_INT_RX_DMA_ERR)
 
1206
                adapter->stats.intr_rx_dma_err_count++;
 
1207
        if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
 
1208
                adapter->stats.intr_tx_fifo_err_count++;
 
1209
        if (int_st & PCH_GBE_INT_TX_DMA_ERR)
 
1210
                adapter->stats.intr_tx_dma_err_count++;
 
1211
        if (int_st & PCH_GBE_INT_TCPIP_ERR)
 
1212
                adapter->stats.intr_tcpip_err_count++;
 
1213
        /* When Rx descriptor is empty  */
 
1214
        if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
 
1215
                adapter->stats.intr_rx_dsc_empty_count++;
 
1216
                pr_debug("Rx descriptor is empty\n");
 
1217
                int_en = ioread32(&hw->reg->INT_EN);
 
1218
                iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
 
1219
                if (hw->mac.tx_fc_enable) {
 
1220
                        /* Set Pause packet */
 
1221
                        pch_gbe_mac_set_pause_packet(hw);
 
1222
                }
 
1223
        }
 
1224
 
 
1225
        /* When request status is Receive interruption */
 
1226
        if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
 
1227
            (adapter->rx_stop_flag == true)) {
 
1228
                if (likely(napi_schedule_prep(&adapter->napi))) {
 
1229
                        /* Enable only Rx Descriptor empty */
 
1230
                        atomic_inc(&adapter->irq_sem);
 
1231
                        int_en = ioread32(&hw->reg->INT_EN);
 
1232
                        int_en &=
 
1233
                            ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
 
1234
                        iowrite32(int_en, &hw->reg->INT_EN);
 
1235
                        /* Start polling for NAPI */
 
1236
                        __napi_schedule(&adapter->napi);
 
1237
                }
 
1238
        }
 
1239
        pr_debug("return = 0x%08x  INT_EN reg = 0x%08x\n",
 
1240
                 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
 
1241
        return IRQ_HANDLED;
 
1242
}
 
1243
 
 
1244
/**
 
1245
 * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
 
1246
 * @adapter:       Board private structure
 
1247
 * @rx_ring:       Rx descriptor ring
 
1248
 * @cleaned_count: Cleaned count
 
1249
 */
 
1250
static void
 
1251
pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
 
1252
                         struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
 
1253
{
 
1254
        struct net_device *netdev = adapter->netdev;
 
1255
        struct pci_dev *pdev = adapter->pdev;
 
1256
        struct pch_gbe_hw *hw = &adapter->hw;
 
1257
        struct pch_gbe_rx_desc *rx_desc;
 
1258
        struct pch_gbe_buffer *buffer_info;
 
1259
        struct sk_buff *skb;
 
1260
        unsigned int i;
 
1261
        unsigned int bufsz;
 
1262
 
 
1263
        bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
 
1264
        i = rx_ring->next_to_use;
 
1265
 
 
1266
        while ((cleaned_count--)) {
 
1267
                buffer_info = &rx_ring->buffer_info[i];
 
1268
                skb = netdev_alloc_skb(netdev, bufsz);
 
1269
                if (unlikely(!skb)) {
 
1270
                        /* Better luck next round */
 
1271
                        adapter->stats.rx_alloc_buff_failed++;
 
1272
                        break;
 
1273
                }
 
1274
                /* align */
 
1275
                skb_reserve(skb, NET_IP_ALIGN);
 
1276
                buffer_info->skb = skb;
 
1277
 
 
1278
                buffer_info->dma = dma_map_single(&pdev->dev,
 
1279
                                                  buffer_info->rx_buffer,
 
1280
                                                  buffer_info->length,
 
1281
                                                  DMA_FROM_DEVICE);
 
1282
                if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
 
1283
                        dev_kfree_skb(skb);
 
1284
                        buffer_info->skb = NULL;
 
1285
                        buffer_info->dma = 0;
 
1286
                        adapter->stats.rx_alloc_buff_failed++;
 
1287
                        break; /* while !buffer_info->skb */
 
1288
                }
 
1289
                buffer_info->mapped = true;
 
1290
                rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
 
1291
                rx_desc->buffer_addr = (buffer_info->dma);
 
1292
                rx_desc->gbec_status = DSC_INIT16;
 
1293
 
 
1294
                pr_debug("i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
 
1295
                         i, (unsigned long long)buffer_info->dma,
 
1296
                         buffer_info->length);
 
1297
 
 
1298
                if (unlikely(++i == rx_ring->count))
 
1299
                        i = 0;
 
1300
        }
 
1301
        if (likely(rx_ring->next_to_use != i)) {
 
1302
                rx_ring->next_to_use = i;
 
1303
                if (unlikely(i-- == 0))
 
1304
                        i = (rx_ring->count - 1);
 
1305
                iowrite32(rx_ring->dma +
 
1306
                          (int)sizeof(struct pch_gbe_rx_desc) * i,
 
1307
                          &hw->reg->RX_DSC_SW_P);
 
1308
        }
 
1309
        return;
 
1310
}
 
1311
 
 
1312
static int
 
1313
pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
 
1314
                         struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
 
1315
{
 
1316
        struct pci_dev *pdev = adapter->pdev;
 
1317
        struct pch_gbe_buffer *buffer_info;
 
1318
        unsigned int i;
 
1319
        unsigned int bufsz;
 
1320
        unsigned int size;
 
1321
 
 
1322
        bufsz = adapter->rx_buffer_len;
 
1323
 
 
1324
        size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
 
1325
        rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
 
1326
                                                &rx_ring->rx_buff_pool_logic,
 
1327
                                                GFP_KERNEL);
 
1328
        if (!rx_ring->rx_buff_pool) {
 
1329
                pr_err("Unable to allocate memory for the receive poll buffer\n");
 
1330
                return -ENOMEM;
 
1331
        }
 
1332
        memset(rx_ring->rx_buff_pool, 0, size);
 
1333
        rx_ring->rx_buff_pool_size = size;
 
1334
        for (i = 0; i < rx_ring->count; i++) {
 
1335
                buffer_info = &rx_ring->buffer_info[i];
 
1336
                buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
 
1337
                buffer_info->length = bufsz;
 
1338
        }
 
1339
        return 0;
 
1340
}
 
1341
 
 
1342
/**
 
1343
 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
 
1344
 * @adapter:   Board private structure
 
1345
 * @tx_ring:   Tx descriptor ring
 
1346
 */
 
1347
static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
 
1348
                                        struct pch_gbe_tx_ring *tx_ring)
 
1349
{
 
1350
        struct pch_gbe_buffer *buffer_info;
 
1351
        struct sk_buff *skb;
 
1352
        unsigned int i;
 
1353
        unsigned int bufsz;
 
1354
        struct pch_gbe_tx_desc *tx_desc;
 
1355
 
 
1356
        bufsz =
 
1357
            adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
 
1358
 
 
1359
        for (i = 0; i < tx_ring->count; i++) {
 
1360
                buffer_info = &tx_ring->buffer_info[i];
 
1361
                skb = netdev_alloc_skb(adapter->netdev, bufsz);
 
1362
                skb_reserve(skb, PCH_GBE_DMA_ALIGN);
 
1363
                buffer_info->skb = skb;
 
1364
                tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
 
1365
                tx_desc->gbec_status = (DSC_INIT16);
 
1366
        }
 
1367
        return;
 
1368
}
 
1369
 
 
1370
/**
 
1371
 * pch_gbe_clean_tx - Reclaim resources after transmit completes
 
1372
 * @adapter:   Board private structure
 
1373
 * @tx_ring:   Tx descriptor ring
 
1374
 * Returns
 
1375
 *      true:  Cleaned the descriptor
 
1376
 *      false: Not cleaned the descriptor
 
1377
 */
 
1378
static bool
 
1379
pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
 
1380
                 struct pch_gbe_tx_ring *tx_ring)
 
1381
{
 
1382
        struct pch_gbe_tx_desc *tx_desc;
 
1383
        struct pch_gbe_buffer *buffer_info;
 
1384
        struct sk_buff *skb;
 
1385
        unsigned int i;
 
1386
        unsigned int cleaned_count = 0;
 
1387
        bool cleaned = true;
 
1388
 
 
1389
        pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
 
1390
 
 
1391
        i = tx_ring->next_to_clean;
 
1392
        tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
 
1393
        pr_debug("gbec_status:0x%04x  dma_status:0x%04x\n",
 
1394
                 tx_desc->gbec_status, tx_desc->dma_status);
 
1395
 
 
1396
        while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
 
1397
                pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
 
1398
                buffer_info = &tx_ring->buffer_info[i];
 
1399
                skb = buffer_info->skb;
 
1400
 
 
1401
                if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
 
1402
                        adapter->stats.tx_aborted_errors++;
 
1403
                        pr_err("Transfer Abort Error\n");
 
1404
                } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
 
1405
                          ) {
 
1406
                        adapter->stats.tx_carrier_errors++;
 
1407
                        pr_err("Transfer Carrier Sense Error\n");
 
1408
                } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
 
1409
                          ) {
 
1410
                        adapter->stats.tx_aborted_errors++;
 
1411
                        pr_err("Transfer Collision Abort Error\n");
 
1412
                } else if ((tx_desc->gbec_status &
 
1413
                            (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
 
1414
                             PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
 
1415
                        adapter->stats.collisions++;
 
1416
                        adapter->stats.tx_packets++;
 
1417
                        adapter->stats.tx_bytes += skb->len;
 
1418
                        pr_debug("Transfer Collision\n");
 
1419
                } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
 
1420
                          ) {
 
1421
                        adapter->stats.tx_packets++;
 
1422
                        adapter->stats.tx_bytes += skb->len;
 
1423
                }
 
1424
                if (buffer_info->mapped) {
 
1425
                        pr_debug("unmap buffer_info->dma : %d\n", i);
 
1426
                        dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 
1427
                                         buffer_info->length, DMA_TO_DEVICE);
 
1428
                        buffer_info->mapped = false;
 
1429
                }
 
1430
                if (buffer_info->skb) {
 
1431
                        pr_debug("trim buffer_info->skb : %d\n", i);
 
1432
                        skb_trim(buffer_info->skb, 0);
 
1433
                }
 
1434
                tx_desc->gbec_status = DSC_INIT16;
 
1435
                if (unlikely(++i == tx_ring->count))
 
1436
                        i = 0;
 
1437
                tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
 
1438
 
 
1439
                /* weight of a sort for tx, to avoid endless transmit cleanup */
 
1440
                if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
 
1441
                        cleaned = false;
 
1442
                        break;
 
1443
                }
 
1444
        }
 
1445
        pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
 
1446
                 cleaned_count);
 
1447
        /* Recover from running out of Tx resources in xmit_frame */
 
1448
        if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
 
1449
                netif_wake_queue(adapter->netdev);
 
1450
                adapter->stats.tx_restart_count++;
 
1451
                pr_debug("Tx wake queue\n");
 
1452
        }
 
1453
        spin_lock(&adapter->tx_queue_lock);
 
1454
        tx_ring->next_to_clean = i;
 
1455
        spin_unlock(&adapter->tx_queue_lock);
 
1456
        pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
 
1457
        return cleaned;
 
1458
}
 
1459
 
 
1460
/**
 
1461
 * pch_gbe_clean_rx - Send received data up the network stack; legacy
 
1462
 * @adapter:     Board private structure
 
1463
 * @rx_ring:     Rx descriptor ring
 
1464
 * @work_done:   Completed count
 
1465
 * @work_to_do:  Request count
 
1466
 * Returns
 
1467
 *      true:  Cleaned the descriptor
 
1468
 *      false: Not cleaned the descriptor
 
1469
 */
 
1470
static bool
 
1471
pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
 
1472
                 struct pch_gbe_rx_ring *rx_ring,
 
1473
                 int *work_done, int work_to_do)
 
1474
{
 
1475
        struct net_device *netdev = adapter->netdev;
 
1476
        struct pci_dev *pdev = adapter->pdev;
 
1477
        struct pch_gbe_buffer *buffer_info;
 
1478
        struct pch_gbe_rx_desc *rx_desc;
 
1479
        u32 length;
 
1480
        unsigned int i;
 
1481
        unsigned int cleaned_count = 0;
 
1482
        bool cleaned = false;
 
1483
        struct sk_buff *skb;
 
1484
        u8 dma_status;
 
1485
        u16 gbec_status;
 
1486
        u32 tcp_ip_status;
 
1487
 
 
1488
        i = rx_ring->next_to_clean;
 
1489
 
 
1490
        while (*work_done < work_to_do) {
 
1491
                /* Check Rx descriptor status */
 
1492
                rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
 
1493
                if (rx_desc->gbec_status == DSC_INIT16)
 
1494
                        break;
 
1495
                cleaned = true;
 
1496
                cleaned_count++;
 
1497
 
 
1498
                dma_status = rx_desc->dma_status;
 
1499
                gbec_status = rx_desc->gbec_status;
 
1500
                tcp_ip_status = rx_desc->tcp_ip_status;
 
1501
                rx_desc->gbec_status = DSC_INIT16;
 
1502
                buffer_info = &rx_ring->buffer_info[i];
 
1503
                skb = buffer_info->skb;
 
1504
                buffer_info->skb = NULL;
 
1505
 
 
1506
                /* unmap dma */
 
1507
                dma_unmap_single(&pdev->dev, buffer_info->dma,
 
1508
                                   buffer_info->length, DMA_FROM_DEVICE);
 
1509
                buffer_info->mapped = false;
 
1510
 
 
1511
                pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "
 
1512
                         "TCP:0x%08x]  BufInf = 0x%p\n",
 
1513
                         i, dma_status, gbec_status, tcp_ip_status,
 
1514
                         buffer_info);
 
1515
                /* Error check */
 
1516
                if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
 
1517
                        adapter->stats.rx_frame_errors++;
 
1518
                        pr_err("Receive Not Octal Error\n");
 
1519
                } else if (unlikely(gbec_status &
 
1520
                                PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
 
1521
                        adapter->stats.rx_frame_errors++;
 
1522
                        pr_err("Receive Nibble Error\n");
 
1523
                } else if (unlikely(gbec_status &
 
1524
                                PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
 
1525
                        adapter->stats.rx_crc_errors++;
 
1526
                        pr_err("Receive CRC Error\n");
 
1527
                } else {
 
1528
                        /* get receive length */
 
1529
                        /* length convert[-3], length includes FCS length */
 
1530
                        length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
 
1531
                        if (rx_desc->rx_words_eob & 0x02)
 
1532
                                length = length - 4;
 
1533
                        /*
 
1534
                         * buffer_info->rx_buffer: [Header:14][payload]
 
1535
                         * skb->data: [Reserve:2][Header:14][payload]
 
1536
                         */
 
1537
                        memcpy(skb->data, buffer_info->rx_buffer, length);
 
1538
 
 
1539
                        /* update status of driver */
 
1540
                        adapter->stats.rx_bytes += length;
 
1541
                        adapter->stats.rx_packets++;
 
1542
                        if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
 
1543
                                adapter->stats.multicast++;
 
1544
                        /* Write meta date of skb */
 
1545
                        skb_put(skb, length);
 
1546
                        skb->protocol = eth_type_trans(skb, netdev);
 
1547
                        if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
 
1548
                                skb->ip_summed = CHECKSUM_NONE;
 
1549
                        else
 
1550
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
1551
 
 
1552
                        napi_gro_receive(&adapter->napi, skb);
 
1553
                        (*work_done)++;
 
1554
                        pr_debug("Receive skb->ip_summed: %d length: %d\n",
 
1555
                                 skb->ip_summed, length);
 
1556
                }
 
1557
                /* return some buffers to hardware, one at a time is too slow */
 
1558
                if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
 
1559
                        pch_gbe_alloc_rx_buffers(adapter, rx_ring,
 
1560
                                                 cleaned_count);
 
1561
                        cleaned_count = 0;
 
1562
                }
 
1563
                if (++i == rx_ring->count)
 
1564
                        i = 0;
 
1565
        }
 
1566
        rx_ring->next_to_clean = i;
 
1567
        if (cleaned_count)
 
1568
                pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 
1569
        return cleaned;
 
1570
}
 
1571
 
 
1572
/**
 
1573
 * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
 
1574
 * @adapter:  Board private structure
 
1575
 * @tx_ring:  Tx descriptor ring (for a specific queue) to setup
 
1576
 * Returns
 
1577
 *      0:              Successfully
 
1578
 *      Negative value: Failed
 
1579
 */
 
1580
int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
 
1581
                                struct pch_gbe_tx_ring *tx_ring)
 
1582
{
 
1583
        struct pci_dev *pdev = adapter->pdev;
 
1584
        struct pch_gbe_tx_desc *tx_desc;
 
1585
        int size;
 
1586
        int desNo;
 
1587
 
 
1588
        size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
 
1589
        tx_ring->buffer_info = vzalloc(size);
 
1590
        if (!tx_ring->buffer_info) {
 
1591
                pr_err("Unable to allocate memory for the buffer information\n");
 
1592
                return -ENOMEM;
 
1593
        }
 
1594
 
 
1595
        tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
 
1596
 
 
1597
        tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
 
1598
                                           &tx_ring->dma, GFP_KERNEL);
 
1599
        if (!tx_ring->desc) {
 
1600
                vfree(tx_ring->buffer_info);
 
1601
                pr_err("Unable to allocate memory for the transmit descriptor ring\n");
 
1602
                return -ENOMEM;
 
1603
        }
 
1604
        memset(tx_ring->desc, 0, tx_ring->size);
 
1605
 
 
1606
        tx_ring->next_to_use = 0;
 
1607
        tx_ring->next_to_clean = 0;
 
1608
        spin_lock_init(&tx_ring->tx_lock);
 
1609
 
 
1610
        for (desNo = 0; desNo < tx_ring->count; desNo++) {
 
1611
                tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
 
1612
                tx_desc->gbec_status = DSC_INIT16;
 
1613
        }
 
1614
        pr_debug("tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx\n"
 
1615
                 "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
 
1616
                 tx_ring->desc, (unsigned long long)tx_ring->dma,
 
1617
                 tx_ring->next_to_clean, tx_ring->next_to_use);
 
1618
        return 0;
 
1619
}
 
1620
 
 
1621
/**
 
1622
 * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
 
1623
 * @adapter:  Board private structure
 
1624
 * @rx_ring:  Rx descriptor ring (for a specific queue) to setup
 
1625
 * Returns
 
1626
 *      0:              Successfully
 
1627
 *      Negative value: Failed
 
1628
 */
 
1629
int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
 
1630
                                struct pch_gbe_rx_ring *rx_ring)
 
1631
{
 
1632
        struct pci_dev *pdev = adapter->pdev;
 
1633
        struct pch_gbe_rx_desc *rx_desc;
 
1634
        int size;
 
1635
        int desNo;
 
1636
 
 
1637
        size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
 
1638
        rx_ring->buffer_info = vzalloc(size);
 
1639
        if (!rx_ring->buffer_info) {
 
1640
                pr_err("Unable to allocate memory for the receive descriptor ring\n");
 
1641
                return -ENOMEM;
 
1642
        }
 
1643
        rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
 
1644
        rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
 
1645
                                           &rx_ring->dma, GFP_KERNEL);
 
1646
 
 
1647
        if (!rx_ring->desc) {
 
1648
                pr_err("Unable to allocate memory for the receive descriptor ring\n");
 
1649
                vfree(rx_ring->buffer_info);
 
1650
                return -ENOMEM;
 
1651
        }
 
1652
        memset(rx_ring->desc, 0, rx_ring->size);
 
1653
        rx_ring->next_to_clean = 0;
 
1654
        rx_ring->next_to_use = 0;
 
1655
        for (desNo = 0; desNo < rx_ring->count; desNo++) {
 
1656
                rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
 
1657
                rx_desc->gbec_status = DSC_INIT16;
 
1658
        }
 
1659
        pr_debug("rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx "
 
1660
                 "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
 
1661
                 rx_ring->desc, (unsigned long long)rx_ring->dma,
 
1662
                 rx_ring->next_to_clean, rx_ring->next_to_use);
 
1663
        return 0;
 
1664
}
 
1665
 
 
1666
/**
 
1667
 * pch_gbe_free_tx_resources - Free Tx Resources
 
1668
 * @adapter:  Board private structure
 
1669
 * @tx_ring:  Tx descriptor ring for a specific queue
 
1670
 */
 
1671
void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
 
1672
                                struct pch_gbe_tx_ring *tx_ring)
 
1673
{
 
1674
        struct pci_dev *pdev = adapter->pdev;
 
1675
 
 
1676
        pch_gbe_clean_tx_ring(adapter, tx_ring);
 
1677
        vfree(tx_ring->buffer_info);
 
1678
        tx_ring->buffer_info = NULL;
 
1679
        pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
 
1680
        tx_ring->desc = NULL;
 
1681
}
 
1682
 
 
1683
/**
 
1684
 * pch_gbe_free_rx_resources - Free Rx Resources
 
1685
 * @adapter:  Board private structure
 
1686
 * @rx_ring:  Ring to clean the resources from
 
1687
 */
 
1688
void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
 
1689
                                struct pch_gbe_rx_ring *rx_ring)
 
1690
{
 
1691
        struct pci_dev *pdev = adapter->pdev;
 
1692
 
 
1693
        pch_gbe_clean_rx_ring(adapter, rx_ring);
 
1694
        vfree(rx_ring->buffer_info);
 
1695
        rx_ring->buffer_info = NULL;
 
1696
        pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
 
1697
        rx_ring->desc = NULL;
 
1698
}
 
1699
 
 
1700
/**
 
1701
 * pch_gbe_request_irq - Allocate an interrupt line
 
1702
 * @adapter:  Board private structure
 
1703
 * Returns
 
1704
 *      0:              Successfully
 
1705
 *      Negative value: Failed
 
1706
 */
 
1707
static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
 
1708
{
 
1709
        struct net_device *netdev = adapter->netdev;
 
1710
        int err;
 
1711
        int flags;
 
1712
 
 
1713
        flags = IRQF_SHARED;
 
1714
        adapter->have_msi = false;
 
1715
        err = pci_enable_msi(adapter->pdev);
 
1716
        pr_debug("call pci_enable_msi\n");
 
1717
        if (err) {
 
1718
                pr_debug("call pci_enable_msi - Error: %d\n", err);
 
1719
        } else {
 
1720
                flags = 0;
 
1721
                adapter->have_msi = true;
 
1722
        }
 
1723
        err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
 
1724
                          flags, netdev->name, netdev);
 
1725
        if (err)
 
1726
                pr_err("Unable to allocate interrupt Error: %d\n", err);
 
1727
        pr_debug("adapter->have_msi : %d  flags : 0x%04x  return : 0x%04x\n",
 
1728
                 adapter->have_msi, flags, err);
 
1729
        return err;
 
1730
}
 
1731
 
 
1732
 
 
1733
static void pch_gbe_set_multi(struct net_device *netdev);
 
1734
/**
 
1735
 * pch_gbe_up - Up GbE network device
 
1736
 * @adapter:  Board private structure
 
1737
 * Returns
 
1738
 *      0:              Successfully
 
1739
 *      Negative value: Failed
 
1740
 */
 
1741
int pch_gbe_up(struct pch_gbe_adapter *adapter)
 
1742
{
 
1743
        struct net_device *netdev = adapter->netdev;
 
1744
        struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
 
1745
        struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
 
1746
        int err;
 
1747
 
 
1748
        /* hardware has been reset, we need to reload some things */
 
1749
        pch_gbe_set_multi(netdev);
 
1750
 
 
1751
        pch_gbe_setup_tctl(adapter);
 
1752
        pch_gbe_configure_tx(adapter);
 
1753
        pch_gbe_setup_rctl(adapter);
 
1754
        pch_gbe_configure_rx(adapter);
 
1755
 
 
1756
        err = pch_gbe_request_irq(adapter);
 
1757
        if (err) {
 
1758
                pr_err("Error: can't bring device up\n");
 
1759
                return err;
 
1760
        }
 
1761
        err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
 
1762
        if (err) {
 
1763
                pr_err("Error: can't bring device up\n");
 
1764
                return err;
 
1765
        }
 
1766
        pch_gbe_alloc_tx_buffers(adapter, tx_ring);
 
1767
        pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
 
1768
        adapter->tx_queue_len = netdev->tx_queue_len;
 
1769
        pch_gbe_start_receive(&adapter->hw);
 
1770
 
 
1771
        mod_timer(&adapter->watchdog_timer, jiffies);
 
1772
 
 
1773
        napi_enable(&adapter->napi);
 
1774
        pch_gbe_irq_enable(adapter);
 
1775
        netif_start_queue(adapter->netdev);
 
1776
 
 
1777
        return 0;
 
1778
}
 
1779
 
 
1780
/**
 
1781
 * pch_gbe_down - Down GbE network device
 
1782
 * @adapter:  Board private structure
 
1783
 */
 
1784
void pch_gbe_down(struct pch_gbe_adapter *adapter)
 
1785
{
 
1786
        struct net_device *netdev = adapter->netdev;
 
1787
        struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
 
1788
 
 
1789
        /* signal that we're down so the interrupt handler does not
 
1790
         * reschedule our watchdog timer */
 
1791
        napi_disable(&adapter->napi);
 
1792
        atomic_set(&adapter->irq_sem, 0);
 
1793
 
 
1794
        pch_gbe_irq_disable(adapter);
 
1795
        pch_gbe_free_irq(adapter);
 
1796
 
 
1797
        del_timer_sync(&adapter->watchdog_timer);
 
1798
 
 
1799
        netdev->tx_queue_len = adapter->tx_queue_len;
 
1800
        netif_carrier_off(netdev);
 
1801
        netif_stop_queue(netdev);
 
1802
 
 
1803
        pch_gbe_reset(adapter);
 
1804
        pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
 
1805
        pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
 
1806
 
 
1807
        pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
 
1808
                            rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
 
1809
        rx_ring->rx_buff_pool_logic = 0;
 
1810
        rx_ring->rx_buff_pool_size = 0;
 
1811
        rx_ring->rx_buff_pool = NULL;
 
1812
}
 
1813
 
 
1814
/**
 
1815
 * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
 
1816
 * @adapter:  Board private structure to initialize
 
1817
 * Returns
 
1818
 *      0:              Successfully
 
1819
 *      Negative value: Failed
 
1820
 */
 
1821
static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
 
1822
{
 
1823
        struct pch_gbe_hw *hw = &adapter->hw;
 
1824
        struct net_device *netdev = adapter->netdev;
 
1825
 
 
1826
        adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
 
1827
        hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
1828
        hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
1829
 
 
1830
        /* Initialize the hardware-specific values */
 
1831
        if (pch_gbe_hal_setup_init_funcs(hw)) {
 
1832
                pr_err("Hardware Initialization Failure\n");
 
1833
                return -EIO;
 
1834
        }
 
1835
        if (pch_gbe_alloc_queues(adapter)) {
 
1836
                pr_err("Unable to allocate memory for queues\n");
 
1837
                return -ENOMEM;
 
1838
        }
 
1839
        spin_lock_init(&adapter->hw.miim_lock);
 
1840
        spin_lock_init(&adapter->tx_queue_lock);
 
1841
        spin_lock_init(&adapter->stats_lock);
 
1842
        spin_lock_init(&adapter->ethtool_lock);
 
1843
        atomic_set(&adapter->irq_sem, 0);
 
1844
        pch_gbe_irq_disable(adapter);
 
1845
 
 
1846
        pch_gbe_init_stats(adapter);
 
1847
 
 
1848
        pr_debug("rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
 
1849
                 (u32) adapter->rx_buffer_len,
 
1850
                 hw->mac.min_frame_size, hw->mac.max_frame_size);
 
1851
        return 0;
 
1852
}
 
1853
 
 
1854
/**
 
1855
 * pch_gbe_open - Called when a network interface is made active
 
1856
 * @netdev:     Network interface device structure
 
1857
 * Returns
 
1858
 *      0:              Successfully
 
1859
 *      Negative value: Failed
 
1860
 */
 
1861
static int pch_gbe_open(struct net_device *netdev)
 
1862
{
 
1863
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
1864
        struct pch_gbe_hw *hw = &adapter->hw;
 
1865
        int err;
 
1866
 
 
1867
        /* allocate transmit descriptors */
 
1868
        err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
 
1869
        if (err)
 
1870
                goto err_setup_tx;
 
1871
        /* allocate receive descriptors */
 
1872
        err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
 
1873
        if (err)
 
1874
                goto err_setup_rx;
 
1875
        pch_gbe_hal_power_up_phy(hw);
 
1876
        err = pch_gbe_up(adapter);
 
1877
        if (err)
 
1878
                goto err_up;
 
1879
        pr_debug("Success End\n");
 
1880
        return 0;
 
1881
 
 
1882
err_up:
 
1883
        if (!adapter->wake_up_evt)
 
1884
                pch_gbe_hal_power_down_phy(hw);
 
1885
        pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
 
1886
err_setup_rx:
 
1887
        pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
 
1888
err_setup_tx:
 
1889
        pch_gbe_reset(adapter);
 
1890
        pr_err("Error End\n");
 
1891
        return err;
 
1892
}
 
1893
 
 
1894
/**
 
1895
 * pch_gbe_stop - Disables a network interface
 
1896
 * @netdev:  Network interface device structure
 
1897
 * Returns
 
1898
 *      0: Successfully
 
1899
 */
 
1900
static int pch_gbe_stop(struct net_device *netdev)
 
1901
{
 
1902
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
1903
        struct pch_gbe_hw *hw = &adapter->hw;
 
1904
 
 
1905
        pch_gbe_down(adapter);
 
1906
        if (!adapter->wake_up_evt)
 
1907
                pch_gbe_hal_power_down_phy(hw);
 
1908
        pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
 
1909
        pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
 
1910
        return 0;
 
1911
}
 
1912
 
 
1913
/**
 
1914
 * pch_gbe_xmit_frame - Packet transmitting start
 
1915
 * @skb:     Socket buffer structure
 
1916
 * @netdev:  Network interface device structure
 
1917
 * Returns
 
1918
 *      - NETDEV_TX_OK:   Normal end
 
1919
 *      - NETDEV_TX_BUSY: Error end
 
1920
 */
 
1921
static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
1922
{
 
1923
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
1924
        struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
 
1925
        unsigned long flags;
 
1926
 
 
1927
        if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
 
1928
                pr_err("Transfer length Error: skb len: %d > max: %d\n",
 
1929
                       skb->len, adapter->hw.mac.max_frame_size);
 
1930
                dev_kfree_skb_any(skb);
 
1931
                adapter->stats.tx_length_errors++;
 
1932
                return NETDEV_TX_OK;
 
1933
        }
 
1934
        if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
 
1935
                /* Collision - tell upper layer to requeue */
 
1936
                return NETDEV_TX_LOCKED;
 
1937
        }
 
1938
        if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
 
1939
                netif_stop_queue(netdev);
 
1940
                spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 
1941
                pr_debug("Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
 
1942
                         tx_ring->next_to_use, tx_ring->next_to_clean);
 
1943
                return NETDEV_TX_BUSY;
 
1944
        }
 
1945
        spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 
1946
 
 
1947
        /* CRC,ITAG no support */
 
1948
        pch_gbe_tx_queue(adapter, tx_ring, skb);
 
1949
        return NETDEV_TX_OK;
 
1950
}
 
1951
 
 
1952
/**
 
1953
 * pch_gbe_get_stats - Get System Network Statistics
 
1954
 * @netdev:  Network interface device structure
 
1955
 * Returns:  The current stats
 
1956
 */
 
1957
static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
 
1958
{
 
1959
        /* only return the current stats */
 
1960
        return &netdev->stats;
 
1961
}
 
1962
 
 
1963
/**
 
1964
 * pch_gbe_set_multi - Multicast and Promiscuous mode set
 
1965
 * @netdev:   Network interface device structure
 
1966
 */
 
1967
static void pch_gbe_set_multi(struct net_device *netdev)
 
1968
{
 
1969
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
1970
        struct pch_gbe_hw *hw = &adapter->hw;
 
1971
        struct netdev_hw_addr *ha;
 
1972
        u8 *mta_list;
 
1973
        u32 rctl;
 
1974
        int i;
 
1975
        int mc_count;
 
1976
 
 
1977
        pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
 
1978
 
 
1979
        /* Check for Promiscuous and All Multicast modes */
 
1980
        rctl = ioread32(&hw->reg->RX_MODE);
 
1981
        mc_count = netdev_mc_count(netdev);
 
1982
        if ((netdev->flags & IFF_PROMISC)) {
 
1983
                rctl &= ~PCH_GBE_ADD_FIL_EN;
 
1984
                rctl &= ~PCH_GBE_MLT_FIL_EN;
 
1985
        } else if ((netdev->flags & IFF_ALLMULTI)) {
 
1986
                /* all the multicasting receive permissions */
 
1987
                rctl |= PCH_GBE_ADD_FIL_EN;
 
1988
                rctl &= ~PCH_GBE_MLT_FIL_EN;
 
1989
        } else {
 
1990
                if (mc_count >= PCH_GBE_MAR_ENTRIES) {
 
1991
                        /* all the multicasting receive permissions */
 
1992
                        rctl |= PCH_GBE_ADD_FIL_EN;
 
1993
                        rctl &= ~PCH_GBE_MLT_FIL_EN;
 
1994
                } else {
 
1995
                        rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
 
1996
                }
 
1997
        }
 
1998
        iowrite32(rctl, &hw->reg->RX_MODE);
 
1999
 
 
2000
        if (mc_count >= PCH_GBE_MAR_ENTRIES)
 
2001
                return;
 
2002
        mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
 
2003
        if (!mta_list)
 
2004
                return;
 
2005
 
 
2006
        /* The shared function expects a packed array of only addresses. */
 
2007
        i = 0;
 
2008
        netdev_for_each_mc_addr(ha, netdev) {
 
2009
                if (i == mc_count)
 
2010
                        break;
 
2011
                memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
 
2012
        }
 
2013
        pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
 
2014
                                        PCH_GBE_MAR_ENTRIES);
 
2015
        kfree(mta_list);
 
2016
 
 
2017
        pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
 
2018
                 ioread32(&hw->reg->RX_MODE), mc_count);
 
2019
}
 
2020
 
 
2021
/**
 
2022
 * pch_gbe_set_mac - Change the Ethernet Address of the NIC
 
2023
 * @netdev: Network interface device structure
 
2024
 * @addr:   Pointer to an address structure
 
2025
 * Returns
 
2026
 *      0:              Successfully
 
2027
 *      -EADDRNOTAVAIL: Failed
 
2028
 */
 
2029
static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
 
2030
{
 
2031
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
2032
        struct sockaddr *skaddr = addr;
 
2033
        int ret_val;
 
2034
 
 
2035
        if (!is_valid_ether_addr(skaddr->sa_data)) {
 
2036
                ret_val = -EADDRNOTAVAIL;
 
2037
        } else {
 
2038
                memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
 
2039
                memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
 
2040
                pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
 
2041
                ret_val = 0;
 
2042
        }
 
2043
        pr_debug("ret_val : 0x%08x\n", ret_val);
 
2044
        pr_debug("dev_addr : %pM\n", netdev->dev_addr);
 
2045
        pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
 
2046
        pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
 
2047
                 ioread32(&adapter->hw.reg->mac_adr[0].high),
 
2048
                 ioread32(&adapter->hw.reg->mac_adr[0].low));
 
2049
        return ret_val;
 
2050
}
 
2051
 
 
2052
/**
 
2053
 * pch_gbe_change_mtu - Change the Maximum Transfer Unit
 
2054
 * @netdev:   Network interface device structure
 
2055
 * @new_mtu:  New value for maximum frame size
 
2056
 * Returns
 
2057
 *      0:              Successfully
 
2058
 *      -EINVAL:        Failed
 
2059
 */
 
2060
static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
 
2061
{
 
2062
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
2063
        int max_frame;
 
2064
        unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
 
2065
        int err;
 
2066
 
 
2067
        max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
2068
        if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
 
2069
                (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
 
2070
                pr_err("Invalid MTU setting\n");
 
2071
                return -EINVAL;
 
2072
        }
 
2073
        if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
 
2074
                adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
 
2075
        else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
 
2076
                adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
 
2077
        else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
 
2078
                adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
 
2079
        else
 
2080
                adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
 
2081
 
 
2082
        if (netif_running(netdev)) {
 
2083
                pch_gbe_down(adapter);
 
2084
                err = pch_gbe_up(adapter);
 
2085
                if (err) {
 
2086
                        adapter->rx_buffer_len = old_rx_buffer_len;
 
2087
                        pch_gbe_up(adapter);
 
2088
                        return -ENOMEM;
 
2089
                } else {
 
2090
                        netdev->mtu = new_mtu;
 
2091
                        adapter->hw.mac.max_frame_size = max_frame;
 
2092
                }
 
2093
        } else {
 
2094
                pch_gbe_reset(adapter);
 
2095
                netdev->mtu = new_mtu;
 
2096
                adapter->hw.mac.max_frame_size = max_frame;
 
2097
        }
 
2098
 
 
2099
        pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
 
2100
                 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
 
2101
                 adapter->hw.mac.max_frame_size);
 
2102
        return 0;
 
2103
}
 
2104
 
 
2105
/**
 
2106
 * pch_gbe_set_features - Reset device after features changed
 
2107
 * @netdev:   Network interface device structure
 
2108
 * @features:  New features
 
2109
 * Returns
 
2110
 *      0:              HW state updated successfully
 
2111
 */
 
2112
static int pch_gbe_set_features(struct net_device *netdev, u32 features)
 
2113
{
 
2114
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
2115
        u32 changed = features ^ netdev->features;
 
2116
 
 
2117
        if (!(changed & NETIF_F_RXCSUM))
 
2118
                return 0;
 
2119
 
 
2120
        if (netif_running(netdev))
 
2121
                pch_gbe_reinit_locked(adapter);
 
2122
        else
 
2123
                pch_gbe_reset(adapter);
 
2124
 
 
2125
        return 0;
 
2126
}
 
2127
 
 
2128
/**
 
2129
 * pch_gbe_ioctl - Controls register through a MII interface
 
2130
 * @netdev:   Network interface device structure
 
2131
 * @ifr:      Pointer to ifr structure
 
2132
 * @cmd:      Control command
 
2133
 * Returns
 
2134
 *      0:      Successfully
 
2135
 *      Negative value: Failed
 
2136
 */
 
2137
static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 
2138
{
 
2139
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
2140
 
 
2141
        pr_debug("cmd : 0x%04x\n", cmd);
 
2142
 
 
2143
        return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
 
2144
}
 
2145
 
 
2146
/**
 
2147
 * pch_gbe_tx_timeout - Respond to a Tx Hang
 
2148
 * @netdev:   Network interface device structure
 
2149
 */
 
2150
static void pch_gbe_tx_timeout(struct net_device *netdev)
 
2151
{
 
2152
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
2153
 
 
2154
        /* Do the reset outside of interrupt context */
 
2155
        adapter->stats.tx_timeout_count++;
 
2156
        schedule_work(&adapter->reset_task);
 
2157
}
 
2158
 
 
2159
/**
 
2160
 * pch_gbe_napi_poll - NAPI receive and transfer polling callback
 
2161
 * @napi:    Pointer of polling device struct
 
2162
 * @budget:  The maximum number of a packet
 
2163
 * Returns
 
2164
 *      false:  Exit the polling mode
 
2165
 *      true:   Continue the polling mode
 
2166
 */
 
2167
static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
 
2168
{
 
2169
        struct pch_gbe_adapter *adapter =
 
2170
            container_of(napi, struct pch_gbe_adapter, napi);
 
2171
        int work_done = 0;
 
2172
        bool poll_end_flag = false;
 
2173
        bool cleaned = false;
 
2174
        u32 int_en;
 
2175
 
 
2176
        pr_debug("budget : %d\n", budget);
 
2177
 
 
2178
        pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
 
2179
        cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
 
2180
 
 
2181
        if (!cleaned)
 
2182
                work_done = budget;
 
2183
        /* If no Tx and not enough Rx work done,
 
2184
         * exit the polling mode
 
2185
         */
 
2186
        if (work_done < budget)
 
2187
                poll_end_flag = true;
 
2188
 
 
2189
        if (poll_end_flag) {
 
2190
                napi_complete(napi);
 
2191
                if (adapter->rx_stop_flag) {
 
2192
                        adapter->rx_stop_flag = false;
 
2193
                        pch_gbe_start_receive(&adapter->hw);
 
2194
                }
 
2195
                pch_gbe_irq_enable(adapter);
 
2196
        } else
 
2197
                if (adapter->rx_stop_flag) {
 
2198
                        adapter->rx_stop_flag = false;
 
2199
                        pch_gbe_start_receive(&adapter->hw);
 
2200
                        int_en = ioread32(&adapter->hw.reg->INT_EN);
 
2201
                        iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
 
2202
                                &adapter->hw.reg->INT_EN);
 
2203
                }
 
2204
 
 
2205
        pr_debug("poll_end_flag : %d  work_done : %d  budget : %d\n",
 
2206
                 poll_end_flag, work_done, budget);
 
2207
 
 
2208
        return work_done;
 
2209
}
 
2210
 
 
2211
#ifdef CONFIG_NET_POLL_CONTROLLER
 
2212
/**
 
2213
 * pch_gbe_netpoll - Used by things like netconsole to send skbs
 
2214
 * @netdev:  Network interface device structure
 
2215
 */
 
2216
static void pch_gbe_netpoll(struct net_device *netdev)
 
2217
{
 
2218
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
2219
 
 
2220
        disable_irq(adapter->pdev->irq);
 
2221
        pch_gbe_intr(adapter->pdev->irq, netdev);
 
2222
        enable_irq(adapter->pdev->irq);
 
2223
}
 
2224
#endif
 
2225
 
 
2226
static const struct net_device_ops pch_gbe_netdev_ops = {
 
2227
        .ndo_open = pch_gbe_open,
 
2228
        .ndo_stop = pch_gbe_stop,
 
2229
        .ndo_start_xmit = pch_gbe_xmit_frame,
 
2230
        .ndo_get_stats = pch_gbe_get_stats,
 
2231
        .ndo_set_mac_address = pch_gbe_set_mac,
 
2232
        .ndo_tx_timeout = pch_gbe_tx_timeout,
 
2233
        .ndo_change_mtu = pch_gbe_change_mtu,
 
2234
        .ndo_set_features = pch_gbe_set_features,
 
2235
        .ndo_do_ioctl = pch_gbe_ioctl,
 
2236
        .ndo_set_rx_mode = pch_gbe_set_multi,
 
2237
#ifdef CONFIG_NET_POLL_CONTROLLER
 
2238
        .ndo_poll_controller = pch_gbe_netpoll,
 
2239
#endif
 
2240
};
 
2241
 
 
2242
static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
 
2243
                                                pci_channel_state_t state)
 
2244
{
 
2245
        struct net_device *netdev = pci_get_drvdata(pdev);
 
2246
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
2247
 
 
2248
        netif_device_detach(netdev);
 
2249
        if (netif_running(netdev))
 
2250
                pch_gbe_down(adapter);
 
2251
        pci_disable_device(pdev);
 
2252
        /* Request a slot slot reset. */
 
2253
        return PCI_ERS_RESULT_NEED_RESET;
 
2254
}
 
2255
 
 
2256
static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
 
2257
{
 
2258
        struct net_device *netdev = pci_get_drvdata(pdev);
 
2259
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
2260
        struct pch_gbe_hw *hw = &adapter->hw;
 
2261
 
 
2262
        if (pci_enable_device(pdev)) {
 
2263
                pr_err("Cannot re-enable PCI device after reset\n");
 
2264
                return PCI_ERS_RESULT_DISCONNECT;
 
2265
        }
 
2266
        pci_set_master(pdev);
 
2267
        pci_enable_wake(pdev, PCI_D0, 0);
 
2268
        pch_gbe_hal_power_up_phy(hw);
 
2269
        pch_gbe_reset(adapter);
 
2270
        /* Clear wake up status */
 
2271
        pch_gbe_mac_set_wol_event(hw, 0);
 
2272
 
 
2273
        return PCI_ERS_RESULT_RECOVERED;
 
2274
}
 
2275
 
 
2276
static void pch_gbe_io_resume(struct pci_dev *pdev)
 
2277
{
 
2278
        struct net_device *netdev = pci_get_drvdata(pdev);
 
2279
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
2280
 
 
2281
        if (netif_running(netdev)) {
 
2282
                if (pch_gbe_up(adapter)) {
 
2283
                        pr_debug("can't bring device back up after reset\n");
 
2284
                        return;
 
2285
                }
 
2286
        }
 
2287
        netif_device_attach(netdev);
 
2288
}
 
2289
 
 
2290
static int __pch_gbe_suspend(struct pci_dev *pdev)
 
2291
{
 
2292
        struct net_device *netdev = pci_get_drvdata(pdev);
 
2293
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
2294
        struct pch_gbe_hw *hw = &adapter->hw;
 
2295
        u32 wufc = adapter->wake_up_evt;
 
2296
        int retval = 0;
 
2297
 
 
2298
        netif_device_detach(netdev);
 
2299
        if (netif_running(netdev))
 
2300
                pch_gbe_down(adapter);
 
2301
        if (wufc) {
 
2302
                pch_gbe_set_multi(netdev);
 
2303
                pch_gbe_setup_rctl(adapter);
 
2304
                pch_gbe_configure_rx(adapter);
 
2305
                pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
 
2306
                                        hw->mac.link_duplex);
 
2307
                pch_gbe_set_mode(adapter, hw->mac.link_speed,
 
2308
                                        hw->mac.link_duplex);
 
2309
                pch_gbe_mac_set_wol_event(hw, wufc);
 
2310
                pci_disable_device(pdev);
 
2311
        } else {
 
2312
                pch_gbe_hal_power_down_phy(hw);
 
2313
                pch_gbe_mac_set_wol_event(hw, wufc);
 
2314
                pci_disable_device(pdev);
 
2315
        }
 
2316
        return retval;
 
2317
}
 
2318
 
 
2319
#ifdef CONFIG_PM
 
2320
static int pch_gbe_suspend(struct device *device)
 
2321
{
 
2322
        struct pci_dev *pdev = to_pci_dev(device);
 
2323
 
 
2324
        return __pch_gbe_suspend(pdev);
 
2325
}
 
2326
 
 
2327
static int pch_gbe_resume(struct device *device)
 
2328
{
 
2329
        struct pci_dev *pdev = to_pci_dev(device);
 
2330
        struct net_device *netdev = pci_get_drvdata(pdev);
 
2331
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
2332
        struct pch_gbe_hw *hw = &adapter->hw;
 
2333
        u32 err;
 
2334
 
 
2335
        err = pci_enable_device(pdev);
 
2336
        if (err) {
 
2337
                pr_err("Cannot enable PCI device from suspend\n");
 
2338
                return err;
 
2339
        }
 
2340
        pci_set_master(pdev);
 
2341
        pch_gbe_hal_power_up_phy(hw);
 
2342
        pch_gbe_reset(adapter);
 
2343
        /* Clear wake on lan control and status */
 
2344
        pch_gbe_mac_set_wol_event(hw, 0);
 
2345
 
 
2346
        if (netif_running(netdev))
 
2347
                pch_gbe_up(adapter);
 
2348
        netif_device_attach(netdev);
 
2349
 
 
2350
        return 0;
 
2351
}
 
2352
#endif /* CONFIG_PM */
 
2353
 
 
2354
static void pch_gbe_shutdown(struct pci_dev *pdev)
 
2355
{
 
2356
        __pch_gbe_suspend(pdev);
 
2357
        if (system_state == SYSTEM_POWER_OFF) {
 
2358
                pci_wake_from_d3(pdev, true);
 
2359
                pci_set_power_state(pdev, PCI_D3hot);
 
2360
        }
 
2361
}
 
2362
 
 
2363
static void pch_gbe_remove(struct pci_dev *pdev)
 
2364
{
 
2365
        struct net_device *netdev = pci_get_drvdata(pdev);
 
2366
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
2367
 
 
2368
        cancel_work_sync(&adapter->reset_task);
 
2369
        unregister_netdev(netdev);
 
2370
 
 
2371
        pch_gbe_hal_phy_hw_reset(&adapter->hw);
 
2372
 
 
2373
        kfree(adapter->tx_ring);
 
2374
        kfree(adapter->rx_ring);
 
2375
 
 
2376
        iounmap(adapter->hw.reg);
 
2377
        pci_release_regions(pdev);
 
2378
        free_netdev(netdev);
 
2379
        pci_disable_device(pdev);
 
2380
}
 
2381
 
 
2382
static int pch_gbe_probe(struct pci_dev *pdev,
 
2383
                          const struct pci_device_id *pci_id)
 
2384
{
 
2385
        struct net_device *netdev;
 
2386
        struct pch_gbe_adapter *adapter;
 
2387
        int ret;
 
2388
 
 
2389
        ret = pci_enable_device(pdev);
 
2390
        if (ret)
 
2391
                return ret;
 
2392
 
 
2393
        if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
 
2394
                || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
 
2395
                ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 
2396
                if (ret) {
 
2397
                        ret = pci_set_consistent_dma_mask(pdev,
 
2398
                                                          DMA_BIT_MASK(32));
 
2399
                        if (ret) {
 
2400
                                dev_err(&pdev->dev, "ERR: No usable DMA "
 
2401
                                        "configuration, aborting\n");
 
2402
                                goto err_disable_device;
 
2403
                        }
 
2404
                }
 
2405
        }
 
2406
 
 
2407
        ret = pci_request_regions(pdev, KBUILD_MODNAME);
 
2408
        if (ret) {
 
2409
                dev_err(&pdev->dev,
 
2410
                        "ERR: Can't reserve PCI I/O and memory resources\n");
 
2411
                goto err_disable_device;
 
2412
        }
 
2413
        pci_set_master(pdev);
 
2414
 
 
2415
        netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
 
2416
        if (!netdev) {
 
2417
                ret = -ENOMEM;
 
2418
                dev_err(&pdev->dev,
 
2419
                        "ERR: Can't allocate and set up an Ethernet device\n");
 
2420
                goto err_release_pci;
 
2421
        }
 
2422
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
2423
 
 
2424
        pci_set_drvdata(pdev, netdev);
 
2425
        adapter = netdev_priv(netdev);
 
2426
        adapter->netdev = netdev;
 
2427
        adapter->pdev = pdev;
 
2428
        adapter->hw.back = adapter;
 
2429
        adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
 
2430
        if (!adapter->hw.reg) {
 
2431
                ret = -EIO;
 
2432
                dev_err(&pdev->dev, "Can't ioremap\n");
 
2433
                goto err_free_netdev;
 
2434
        }
 
2435
 
 
2436
        netdev->netdev_ops = &pch_gbe_netdev_ops;
 
2437
        netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
 
2438
        netif_napi_add(netdev, &adapter->napi,
 
2439
                       pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
 
2440
        netdev->hw_features = NETIF_F_RXCSUM |
 
2441
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 
2442
        netdev->features = netdev->hw_features;
 
2443
        pch_gbe_set_ethtool_ops(netdev);
 
2444
 
 
2445
        pch_gbe_mac_load_mac_addr(&adapter->hw);
 
2446
        pch_gbe_mac_reset_hw(&adapter->hw);
 
2447
 
 
2448
        /* setup the private structure */
 
2449
        ret = pch_gbe_sw_init(adapter);
 
2450
        if (ret)
 
2451
                goto err_iounmap;
 
2452
 
 
2453
        /* Initialize PHY */
 
2454
        ret = pch_gbe_init_phy(adapter);
 
2455
        if (ret) {
 
2456
                dev_err(&pdev->dev, "PHY initialize error\n");
 
2457
                goto err_free_adapter;
 
2458
        }
 
2459
        pch_gbe_hal_get_bus_info(&adapter->hw);
 
2460
 
 
2461
        /* Read the MAC address. and store to the private data */
 
2462
        ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
 
2463
        if (ret) {
 
2464
                dev_err(&pdev->dev, "MAC address Read Error\n");
 
2465
                goto err_free_adapter;
 
2466
        }
 
2467
 
 
2468
        memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
 
2469
        if (!is_valid_ether_addr(netdev->dev_addr)) {
 
2470
                dev_err(&pdev->dev, "Invalid MAC Address\n");
 
2471
                ret = -EIO;
 
2472
                goto err_free_adapter;
 
2473
        }
 
2474
        setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
 
2475
                    (unsigned long)adapter);
 
2476
 
 
2477
        INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
 
2478
 
 
2479
        pch_gbe_check_options(adapter);
 
2480
 
 
2481
        /* initialize the wol settings based on the eeprom settings */
 
2482
        adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
 
2483
        dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
 
2484
 
 
2485
        /* reset the hardware with the new settings */
 
2486
        pch_gbe_reset(adapter);
 
2487
 
 
2488
        ret = register_netdev(netdev);
 
2489
        if (ret)
 
2490
                goto err_free_adapter;
 
2491
        /* tell the stack to leave us alone until pch_gbe_open() is called */
 
2492
        netif_carrier_off(netdev);
 
2493
        netif_stop_queue(netdev);
 
2494
 
 
2495
        dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
 
2496
 
 
2497
        device_set_wakeup_enable(&pdev->dev, 1);
 
2498
        return 0;
 
2499
 
 
2500
err_free_adapter:
 
2501
        pch_gbe_hal_phy_hw_reset(&adapter->hw);
 
2502
        kfree(adapter->tx_ring);
 
2503
        kfree(adapter->rx_ring);
 
2504
err_iounmap:
 
2505
        iounmap(adapter->hw.reg);
 
2506
err_free_netdev:
 
2507
        free_netdev(netdev);
 
2508
err_release_pci:
 
2509
        pci_release_regions(pdev);
 
2510
err_disable_device:
 
2511
        pci_disable_device(pdev);
 
2512
        return ret;
 
2513
}
 
2514
 
 
2515
static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
 
2516
        {.vendor = PCI_VENDOR_ID_INTEL,
 
2517
         .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
 
2518
         .subvendor = PCI_ANY_ID,
 
2519
         .subdevice = PCI_ANY_ID,
 
2520
         .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
 
2521
         .class_mask = (0xFFFF00)
 
2522
         },
 
2523
        {.vendor = PCI_VENDOR_ID_ROHM,
 
2524
         .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
 
2525
         .subvendor = PCI_ANY_ID,
 
2526
         .subdevice = PCI_ANY_ID,
 
2527
         .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
 
2528
         .class_mask = (0xFFFF00)
 
2529
         },
 
2530
        {.vendor = PCI_VENDOR_ID_ROHM,
 
2531
         .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
 
2532
         .subvendor = PCI_ANY_ID,
 
2533
         .subdevice = PCI_ANY_ID,
 
2534
         .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
 
2535
         .class_mask = (0xFFFF00)
 
2536
         },
 
2537
        /* required last entry */
 
2538
        {0}
 
2539
};
 
2540
 
 
2541
#ifdef CONFIG_PM
 
2542
static const struct dev_pm_ops pch_gbe_pm_ops = {
 
2543
        .suspend = pch_gbe_suspend,
 
2544
        .resume = pch_gbe_resume,
 
2545
        .freeze = pch_gbe_suspend,
 
2546
        .thaw = pch_gbe_resume,
 
2547
        .poweroff = pch_gbe_suspend,
 
2548
        .restore = pch_gbe_resume,
 
2549
};
 
2550
#endif
 
2551
 
 
2552
static struct pci_error_handlers pch_gbe_err_handler = {
 
2553
        .error_detected = pch_gbe_io_error_detected,
 
2554
        .slot_reset = pch_gbe_io_slot_reset,
 
2555
        .resume = pch_gbe_io_resume
 
2556
};
 
2557
 
 
2558
static struct pci_driver pch_gbe_driver = {
 
2559
        .name = KBUILD_MODNAME,
 
2560
        .id_table = pch_gbe_pcidev_id,
 
2561
        .probe = pch_gbe_probe,
 
2562
        .remove = pch_gbe_remove,
 
2563
#ifdef CONFIG_PM
 
2564
        .driver.pm = &pch_gbe_pm_ops,
 
2565
#endif
 
2566
        .shutdown = pch_gbe_shutdown,
 
2567
        .err_handler = &pch_gbe_err_handler
 
2568
};
 
2569
 
 
2570
 
 
2571
static int __init pch_gbe_init_module(void)
 
2572
{
 
2573
        int ret;
 
2574
 
 
2575
        ret = pci_register_driver(&pch_gbe_driver);
 
2576
        if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
 
2577
                if (copybreak == 0) {
 
2578
                        pr_info("copybreak disabled\n");
 
2579
                } else {
 
2580
                        pr_info("copybreak enabled for packets <= %u bytes\n",
 
2581
                                copybreak);
 
2582
                }
 
2583
        }
 
2584
        return ret;
 
2585
}
 
2586
 
 
2587
static void __exit pch_gbe_exit_module(void)
 
2588
{
 
2589
        pci_unregister_driver(&pch_gbe_driver);
 
2590
}
 
2591
 
 
2592
module_init(pch_gbe_init_module);
 
2593
module_exit(pch_gbe_exit_module);
 
2594
 
 
2595
MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
 
2596
MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
 
2597
MODULE_LICENSE("GPL");
 
2598
MODULE_VERSION(DRV_VERSION);
 
2599
MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
 
2600
 
 
2601
module_param(copybreak, uint, 0644);
 
2602
MODULE_PARM_DESC(copybreak,
 
2603
        "Maximum size of packet that is copied to a new buffer on receive");
 
2604
 
 
2605
/* pch_gbe_main.c */