2
* Tehuti Networks(R) Network Driver
3
* ethtool interface implementation
4
* Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation; either version 2 of the License, or
9
* (at your option) any later version.
13
* RX HW/SW interaction overview
14
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
15
* There are 2 types of RX communication channels between driver and NIC.
16
* 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming
17
* traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds
18
* info about buffer's location, size and ID. An ID field is used to identify a
19
* buffer when it's returned with data via RXD Fifo (see below)
20
* 2) RX Data Fifo - RXD - holds descriptors of full buffers. This Fifo is
21
* filled by HW and is readen by SW. Each descriptor holds status and ID.
22
* HW pops descriptor from RXF Fifo, stores ID, fills buffer with incoming data,
23
* via dma moves it into host memory, builds new RXD descriptor with same ID,
24
* pushes it into RXD Fifo and raises interrupt to indicate new RX data.
26
* Current NIC configuration (registers + firmware) makes NIC use 2 RXF Fifos.
27
* One holds 1.5K packets and another - 26K packets. Depending on incoming
28
* packet size, HW desides on a RXF Fifo to pop buffer from. When packet is
29
* filled with data, HW builds new RXD descriptor for it and push it into single
32
* RX SW Data Structures
33
* ~~~~~~~~~~~~~~~~~~~~~
34
* skb db - used to keep track of all skbs owned by SW and their dma addresses.
35
* For RX case, ownership lasts from allocating new empty skb for RXF until
36
* accepting full skb from RXD and passing it to OS. Each RXF Fifo has its own
37
* skb db. Implemented as array with bitmask.
38
* fifo - keeps info about fifo's size and location, relevant HW registers,
39
* usage and skb db. Each RXD and RXF Fifo has its own fifo structure.
40
* Implemented as simple struct.
42
* RX SW Execution Flow
43
* ~~~~~~~~~~~~~~~~~~~~
44
* Upon initialization (ifconfig up) driver creates RX fifos and initializes
45
* relevant registers. At the end of init phase, driver enables interrupts.
46
* NIC sees that there is no RXF buffers and raises
47
* RD_INTR interrupt, isr fills skbs and Rx begins.
48
* Driver has two receive operation modes:
49
* NAPI - interrupt-driven mixed with polling
50
* interrupt-driven only
52
* Interrupt-driven only flow is following. When buffer is ready, HW raises
53
* interrupt and isr is called. isr collects all available packets
54
* (bdx_rx_receive), refills skbs (bdx_rx_alloc_skbs) and exit.
56
* Rx buffer allocation note
57
* ~~~~~~~~~~~~~~~~~~~~~~~~~
58
* Driver cares to feed such amount of RxF descriptors that respective amount of
59
* RxD descriptors can not fill entire RxD fifo. The main reason is lack of
60
* overflow check in Bordeaux for RxD fifo free/used size.
61
* FIXME: this is NOT fully implemented, more work should be done
65
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
70
{0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
71
{0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
72
{0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
76
MODULE_DEVICE_TABLE(pci, bdx_pci_tbl);
78
/* Definitions needed by ISR or NAPI functions */
79
static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f);
80
static void bdx_tx_cleanup(struct bdx_priv *priv);
81
static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget);
83
/* Definitions needed by FW loading */
84
static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size);
86
/* Definitions needed by hw_start */
87
static int bdx_tx_init(struct bdx_priv *priv);
88
static int bdx_rx_init(struct bdx_priv *priv);
90
/* Definitions needed by bdx_close */
91
static void bdx_rx_free(struct bdx_priv *priv);
92
static void bdx_tx_free(struct bdx_priv *priv);
94
/* Definitions needed by bdx_probe */
95
static void bdx_set_ethtool_ops(struct net_device *netdev);
97
/*************************************************************************
99
*************************************************************************/
101
static void print_hw_id(struct pci_dev *pdev)
103
struct pci_nic *nic = pci_get_drvdata(pdev);
104
u16 pci_link_status = 0;
107
pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
108
pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
110
pr_info("%s%s\n", BDX_NIC_NAME,
111
nic->port_num == 1 ? "" : ", 2-Port");
112
pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
113
readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
114
readl(nic->regs + FPGA_SEED),
115
GET_LINK_STATUS_LANES(pci_link_status),
116
GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
119
static void print_fw_id(struct pci_nic *nic)
121
pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
124
static void print_eth_id(struct net_device *ndev)
126
netdev_info(ndev, "%s, Port %c\n",
127
BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
131
/*************************************************************************
133
*************************************************************************/
135
#define bdx_enable_interrupts(priv) \
136
do { WRITE_REG(priv, regIMR, IR_RUN); } while (0)
137
#define bdx_disable_interrupts(priv) \
138
do { WRITE_REG(priv, regIMR, 0); } while (0)
141
* create TX/RX descriptor fifo for host-NIC communication.
142
* 1K extra space is allocated at the end of the fifo to simplify
143
* processing of descriptors that wraps around fifo's end
144
* @priv - NIC private structure
145
* @f - fifo to initialize
146
* @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
147
* @reg_XXX - offsets of registers relative to base address
149
* Returns 0 on success, negative value on failure
153
bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
154
u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR)
156
u16 memsz = FIFO_SIZE * (1 << fsz_type);
158
memset(f, 0, sizeof(struct fifo));
159
/* pci_alloc_consistent gives us 4k-aligned memory */
160
f->va = pci_alloc_consistent(priv->pdev,
161
memsz + FIFO_EXTRA_SPACE, &f->da);
163
pr_err("pci_alloc_consistent failed\n");
166
f->reg_CFG0 = reg_CFG0;
167
f->reg_CFG1 = reg_CFG1;
168
f->reg_RPTR = reg_RPTR;
169
f->reg_WPTR = reg_WPTR;
173
f->size_mask = memsz - 1;
174
WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type));
175
WRITE_REG(priv, reg_CFG1, H32_64(f->da));
180
/* bdx_fifo_free - free all resources used by fifo
181
* @priv - NIC private structure
182
* @f - fifo to release
184
static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
188
pci_free_consistent(priv->pdev,
189
f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
196
* bdx_link_changed - notifies OS about hw link state.
197
* @bdx_priv - hw adapter structure
199
static void bdx_link_changed(struct bdx_priv *priv)
201
u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT;
204
if (netif_carrier_ok(priv->ndev)) {
205
netif_stop_queue(priv->ndev);
206
netif_carrier_off(priv->ndev);
207
netdev_err(priv->ndev, "Link Down\n");
210
if (!netif_carrier_ok(priv->ndev)) {
211
netif_wake_queue(priv->ndev);
212
netif_carrier_on(priv->ndev);
213
netdev_err(priv->ndev, "Link Up\n");
218
static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
220
if (isr & IR_RX_FREE_0) {
221
bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
225
if (isr & IR_LNKCHG0)
226
bdx_link_changed(priv);
228
if (isr & IR_PCIE_LINK)
229
netdev_err(priv->ndev, "PCI-E Link Fault\n");
231
if (isr & IR_PCIE_TOUT)
232
netdev_err(priv->ndev, "PCI-E Time Out\n");
236
/* bdx_isr - Interrupt Service Routine for Bordeaux NIC
237
* @irq - interrupt number
238
* @ndev - network device
239
* @regs - CPU registers
241
* Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise
243
* It reads ISR register to know interrupt reasons, and proceed them one by one.
244
* Reasons of interest are:
245
* RX_DESC - new packet has arrived and RXD fifo holds its descriptor
246
* RX_FREE - number of free Rx buffers in RXF fifo gets low
247
* TX_FREE - packet was transmited and RXF fifo holds its descriptor
250
static irqreturn_t bdx_isr_napi(int irq, void *dev)
252
struct net_device *ndev = dev;
253
struct bdx_priv *priv = netdev_priv(ndev);
257
isr = (READ_REG(priv, regISR) & IR_RUN);
258
if (unlikely(!isr)) {
259
bdx_enable_interrupts(priv);
260
return IRQ_NONE; /* Not our interrupt */
264
bdx_isr_extra(priv, isr);
266
if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
267
if (likely(napi_schedule_prep(&priv->napi))) {
268
__napi_schedule(&priv->napi);
271
/* NOTE: we get here if intr has slipped into window
272
* between these lines in bdx_poll:
273
* bdx_enable_interrupts(priv);
275
* currently intrs are disabled (since we read ISR),
276
* and we have failed to register next poll.
277
* so we read the regs to trigger chip
278
* and allow further interupts. */
279
READ_REG(priv, regTXF_WPTR_0);
280
READ_REG(priv, regRXD_WPTR_0);
284
bdx_enable_interrupts(priv);
288
static int bdx_poll(struct napi_struct *napi, int budget)
290
struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi);
294
bdx_tx_cleanup(priv);
295
work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget);
296
if ((work_done < budget) ||
297
(priv->napi_stop++ >= 30)) {
298
DBG("rx poll is done. backing to isr-driven\n");
300
/* from time to time we exit to let NAPI layer release
301
* device lock and allow waiting tasks (eg rmmod) to advance) */
305
bdx_enable_interrupts(priv);
310
/* bdx_fw_load - loads firmware to NIC
311
* @priv - NIC private structure
312
* Firmware is loaded via TXD fifo, so it must be initialized first.
313
* Firware must be loaded once per NIC not per PCI device provided by NIC (NIC
314
* can have few of them). So all drivers use semaphore register to choose one
315
* that will actually load FW to NIC.
318
static int bdx_fw_load(struct bdx_priv *priv)
320
const struct firmware *fw = NULL;
325
master = READ_REG(priv, regINIT_SEMAPHORE);
326
if (!READ_REG(priv, regINIT_STATUS) && master) {
327
rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
330
bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
333
for (i = 0; i < 200; i++) {
334
if (READ_REG(priv, regINIT_STATUS)) {
343
WRITE_REG(priv, regINIT_SEMAPHORE, 1);
345
release_firmware(fw);
348
netdev_err(priv->ndev, "firmware loading failed\n");
350
DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
351
READ_REG(priv, regVPC),
352
READ_REG(priv, regVIC),
353
READ_REG(priv, regINIT_STATUS), i);
356
DBG("%s: firmware loading success\n", priv->ndev->name);
361
static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
366
DBG("mac0=%x mac1=%x mac2=%x\n",
367
READ_REG(priv, regUNC_MAC0_A),
368
READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
370
val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
371
WRITE_REG(priv, regUNC_MAC2_A, val);
372
val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
373
WRITE_REG(priv, regUNC_MAC1_A, val);
374
val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
375
WRITE_REG(priv, regUNC_MAC0_A, val);
377
DBG("mac0=%x mac1=%x mac2=%x\n",
378
READ_REG(priv, regUNC_MAC0_A),
379
READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
383
/* bdx_hw_start - inits registers and starts HW's Rx and Tx engines
384
* @priv - NIC private structure
386
static int bdx_hw_start(struct bdx_priv *priv)
389
struct net_device *ndev = priv->ndev;
392
bdx_link_changed(priv);
394
/* 10G overall max length (vlan, eth&ip header, ip payload, crc) */
395
WRITE_REG(priv, regFRM_LENGTH, 0X3FE0);
396
WRITE_REG(priv, regPAUSE_QUANT, 0x96);
397
WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010);
398
WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010);
399
WRITE_REG(priv, regRX_FULLNESS, 0);
400
WRITE_REG(priv, regTX_FULLNESS, 0);
401
WRITE_REG(priv, regCTRLST,
402
regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA);
404
WRITE_REG(priv, regVGLB, 0);
405
WRITE_REG(priv, regMAX_FRAME_A,
406
priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL);
408
DBG("RDINTCM=%08x\n", priv->rdintcm); /*NOTE: test script uses this */
409
WRITE_REG(priv, regRDINTCM0, priv->rdintcm);
410
WRITE_REG(priv, regRDINTCM2, 0); /*cpu_to_le32(rcm.val)); */
412
DBG("TDINTCM=%08x\n", priv->tdintcm); /*NOTE: test script uses this */
413
WRITE_REG(priv, regTDINTCM0, priv->tdintcm); /* old val = 0x300064 */
415
/* Enable timer interrupt once in 2 secs. */
416
/*WRITE_REG(priv, regGTMR0, ((GTMR_SEC * 2) & GTMR_DATA)); */
417
bdx_restore_mac(priv->ndev, priv);
419
WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
420
GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
422
#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
424
rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
428
bdx_enable_interrupts(priv);
436
static void bdx_hw_stop(struct bdx_priv *priv)
439
bdx_disable_interrupts(priv);
440
free_irq(priv->pdev->irq, priv->ndev);
442
netif_carrier_off(priv->ndev);
443
netif_stop_queue(priv->ndev);
448
static int bdx_hw_reset_direct(void __iomem *regs)
453
/* reset sequences: read, write 1, read, write 0 */
454
val = readl(regs + regCLKPLL);
455
writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL);
457
val = readl(regs + regCLKPLL);
458
writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL);
460
/* check that the PLLs are locked and reset ended */
461
for (i = 0; i < 70; i++, mdelay(10))
462
if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
463
/* do any PCI-E read transaction */
464
readl(regs + regRXD_CFG0_0);
467
pr_err("HW reset failed\n");
468
return 1; /* failure */
471
static int bdx_hw_reset(struct bdx_priv *priv)
476
if (priv->port == 0) {
477
/* reset sequences: read, write 1, read, write 0 */
478
val = READ_REG(priv, regCLKPLL);
479
WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8);
481
val = READ_REG(priv, regCLKPLL);
482
WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST);
484
/* check that the PLLs are locked and reset ended */
485
for (i = 0; i < 70; i++, mdelay(10))
486
if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
487
/* do any PCI-E read transaction */
488
READ_REG(priv, regRXD_CFG0_0);
491
pr_err("HW reset failed\n");
492
return 1; /* failure */
495
static int bdx_sw_reset(struct bdx_priv *priv)
500
/* 1. load MAC (obsolete) */
501
/* 2. disable Rx (and Tx) */
502
WRITE_REG(priv, regGMAC_RXF_A, 0);
504
/* 3. disable port */
505
WRITE_REG(priv, regDIS_PORT, 1);
506
/* 4. disable queue */
507
WRITE_REG(priv, regDIS_QU, 1);
508
/* 5. wait until hw is disabled */
509
for (i = 0; i < 50; i++) {
510
if (READ_REG(priv, regRST_PORT) & 1)
515
netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
517
/* 6. disable intrs */
518
WRITE_REG(priv, regRDINTCM0, 0);
519
WRITE_REG(priv, regTDINTCM0, 0);
520
WRITE_REG(priv, regIMR, 0);
521
READ_REG(priv, regISR);
524
WRITE_REG(priv, regRST_QU, 1);
526
WRITE_REG(priv, regRST_PORT, 1);
527
/* 9. zero all read and write pointers */
528
for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
529
DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
530
for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
531
WRITE_REG(priv, i, 0);
532
/* 10. unseet port disable */
533
WRITE_REG(priv, regDIS_PORT, 0);
534
/* 11. unset queue disable */
535
WRITE_REG(priv, regDIS_QU, 0);
536
/* 12. unset queue reset */
537
WRITE_REG(priv, regRST_QU, 0);
538
/* 13. unset port reset */
539
WRITE_REG(priv, regRST_PORT, 0);
541
/* skiped. will be done later */
542
/* 15. save MAC (obsolete) */
543
for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
544
DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
549
/* bdx_reset - performs right type of reset depending on hw type */
550
static int bdx_reset(struct bdx_priv *priv)
553
RET((priv->pdev->device == 0x3009)
555
: bdx_sw_reset(priv));
559
* bdx_close - Disables a network interface
560
* @netdev: network interface device structure
562
* Returns 0, this is not allowed to fail
564
* The close entry point is called when an interface is de-activated
565
* by the OS. The hardware is still under the drivers control, but
566
* needs to be disabled. A global MAC reset is issued to stop the
567
* hardware, and all transmit and receive resources are freed.
569
static int bdx_close(struct net_device *ndev)
571
struct bdx_priv *priv = NULL;
574
priv = netdev_priv(ndev);
576
napi_disable(&priv->napi);
586
* bdx_open - Called when a network interface is made active
587
* @netdev: network interface device structure
589
* Returns 0 on success, negative value on failure
591
* The open entry point is called when a network interface is made
592
* active by the system (IFF_UP). At this point all resources needed
593
* for transmit and receive operations are allocated, the interrupt
594
* handler is registered with the OS, the watchdog timer is started,
595
* and the stack is notified that the interface is ready.
597
static int bdx_open(struct net_device *ndev)
599
struct bdx_priv *priv;
603
priv = netdev_priv(ndev);
605
if (netif_running(ndev))
606
netif_stop_queue(priv->ndev);
608
if ((rc = bdx_tx_init(priv)) ||
609
(rc = bdx_rx_init(priv)) ||
610
(rc = bdx_fw_load(priv)))
613
bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
615
rc = bdx_hw_start(priv);
619
napi_enable(&priv->napi);
621
print_fw_id(priv->nic);
630
static int bdx_range_check(struct bdx_priv *priv, u32 offset)
632
return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ?
636
static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
638
struct bdx_priv *priv = netdev_priv(ndev);
644
DBG("jiffies=%ld cmd=%d\n", jiffies, cmd);
645
if (cmd != SIOCDEVPRIVATE) {
646
error = copy_from_user(data, ifr->ifr_data, sizeof(data));
648
pr_err("can't copy from user\n");
651
DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
654
if (!capable(CAP_SYS_RAWIO))
660
error = bdx_range_check(priv, data[1]);
663
data[2] = READ_REG(priv, data[1]);
664
DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
666
error = copy_to_user(ifr->ifr_data, data, sizeof(data));
672
error = bdx_range_check(priv, data[1]);
675
WRITE_REG(priv, data[1], data[2]);
676
DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]);
685
static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
688
if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
689
RET(bdx_ioctl_priv(ndev, ifr, cmd));
695
* __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
696
* by passing VLAN filter table to hardware
697
* @ndev network device
699
* @op add or kill operation
701
static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
703
struct bdx_priv *priv = netdev_priv(ndev);
707
DBG2("vid=%d value=%d\n", (int)vid, enable);
708
if (unlikely(vid >= 4096)) {
709
pr_err("invalid VID: %u (> 4096)\n", vid);
712
reg = regVLAN_0 + (vid / 32) * 4;
714
val = READ_REG(priv, reg);
715
DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit);
720
DBG2("new val %x\n", val);
721
WRITE_REG(priv, reg, val);
726
* bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table
727
* @ndev network device
728
* @vid VLAN vid to add
730
static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
732
__bdx_vlan_rx_vid(ndev, vid, 1);
736
* bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table
737
* @ndev network device
738
* @vid VLAN vid to kill
740
static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
742
__bdx_vlan_rx_vid(ndev, vid, 0);
746
* bdx_vlan_rx_register - kernel hook for adding VLAN group
747
* @ndev network device
751
bdx_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
753
struct bdx_priv *priv = netdev_priv(ndev);
756
DBG("device='%s', group='%p'\n", ndev->name, grp);
762
* bdx_change_mtu - Change the Maximum Transfer Unit
763
* @netdev: network interface device structure
764
* @new_mtu: new value for maximum frame size
766
* Returns 0 on success, negative on failure
768
static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
772
if (new_mtu == ndev->mtu)
775
/* enforce minimum frame size */
776
if (new_mtu < ETH_ZLEN) {
777
netdev_err(ndev, "mtu %d is less then minimal %d\n",
783
if (netif_running(ndev)) {
790
static void bdx_setmulti(struct net_device *ndev)
792
struct bdx_priv *priv = netdev_priv(ndev);
795
GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN;
799
/* IMF - imperfect (hash) rx multicat filter */
800
/* PMF - perfect rx multicat filter */
802
/* FIXME: RXE(OFF) */
803
if (ndev->flags & IFF_PROMISC) {
804
rxf_val |= GMAC_RX_FILTER_PRM;
805
} else if (ndev->flags & IFF_ALLMULTI) {
806
/* set IMF to accept all multicast frmaes */
807
for (i = 0; i < MAC_MCST_HASH_NUM; i++)
808
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
809
} else if (!netdev_mc_empty(ndev)) {
811
struct netdev_hw_addr *ha;
814
/* set IMF to deny all multicast frames */
815
for (i = 0; i < MAC_MCST_HASH_NUM; i++)
816
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0);
817
/* set PMF to deny all multicast frames */
818
for (i = 0; i < MAC_MCST_NUM; i++) {
819
WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0);
820
WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0);
823
/* use PMF to accept first MAC_MCST_NUM (15) addresses */
824
/* TBD: sort addresses and write them in ascending order
825
* into RX_MAC_MCST regs. we skip this phase now and accept ALL
826
* multicast frames throu IMF */
827
/* accept the rest of addresses throu IMF */
828
netdev_for_each_mc_addr(ha, ndev) {
830
for (i = 0; i < ETH_ALEN; i++)
832
reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
833
val = READ_REG(priv, reg);
834
val |= (1 << (hash % 32));
835
WRITE_REG(priv, reg, val);
839
DBG("only own mac %d\n", netdev_mc_count(ndev));
840
rxf_val |= GMAC_RX_FILTER_AB;
842
WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
848
static int bdx_set_mac(struct net_device *ndev, void *p)
850
struct bdx_priv *priv = netdev_priv(ndev);
851
struct sockaddr *addr = p;
855
if (netif_running(dev))
858
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
859
bdx_restore_mac(ndev, priv);
863
static int bdx_read_mac(struct bdx_priv *priv)
865
u16 macAddress[3], i;
868
macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
869
macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
870
macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
871
macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
872
macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
873
macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
874
for (i = 0; i < 3; i++) {
875
priv->ndev->dev_addr[i * 2 + 1] = macAddress[i];
876
priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8;
881
static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg)
885
val = READ_REG(priv, reg);
886
val |= ((u64) READ_REG(priv, reg + 8)) << 32;
890
/*Do the statistics-update work*/
891
static void bdx_update_stats(struct bdx_priv *priv)
893
struct bdx_stats *stats = &priv->hw_stats;
894
u64 *stats_vector = (u64 *) stats;
898
/*Fill HW structure */
900
/*First 12 statistics - 0x7200 - 0x72B0 */
901
for (i = 0; i < 12; i++) {
902
stats_vector[i] = bdx_read_l2stat(priv, addr);
905
BDX_ASSERT(addr != 0x72C0);
906
/* 0x72C0-0x72E0 RSRV */
908
for (; i < 16; i++) {
909
stats_vector[i] = bdx_read_l2stat(priv, addr);
912
BDX_ASSERT(addr != 0x7330);
913
/* 0x7330-0x7360 RSRV */
915
for (; i < 19; i++) {
916
stats_vector[i] = bdx_read_l2stat(priv, addr);
919
BDX_ASSERT(addr != 0x73A0);
920
/* 0x73A0-0x73B0 RSRV */
922
for (; i < 23; i++) {
923
stats_vector[i] = bdx_read_l2stat(priv, addr);
926
BDX_ASSERT(addr != 0x7400);
927
BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
930
static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
932
static void print_rxfd(struct rxf_desc *rxfd);
934
/*************************************************************************
936
*************************************************************************/
938
static void bdx_rxdb_destroy(struct rxdb *db)
943
static struct rxdb *bdx_rxdb_create(int nelem)
948
db = vmalloc(sizeof(struct rxdb)
949
+ (nelem * sizeof(int))
950
+ (nelem * sizeof(struct rx_map)));
951
if (likely(db != NULL)) {
952
db->stack = (int *)(db + 1);
953
db->elems = (void *)(db->stack + nelem);
956
for (i = 0; i < nelem; i++)
957
db->stack[i] = nelem - i - 1; /* to make first allocs
964
static inline int bdx_rxdb_alloc_elem(struct rxdb *db)
966
BDX_ASSERT(db->top <= 0);
967
return db->stack[--(db->top)];
970
static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n)
972
BDX_ASSERT((n < 0) || (n >= db->nelem));
973
return db->elems + n;
976
static inline int bdx_rxdb_available(struct rxdb *db)
981
static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
983
BDX_ASSERT((n >= db->nelem) || (n < 0));
984
db->stack[(db->top)++] = n;
987
/*************************************************************************
989
*************************************************************************/
991
/* bdx_rx_init - initialize RX all related HW and SW resources
992
* @priv - NIC private structure
994
* Returns 0 on success, negative value on failure
996
* It creates rxf and rxd fifos, update relevant HW registers, preallocate
997
* skb for rx. It assumes that Rx is desabled in HW
998
* funcs are grouped for better cache usage
1000
* RxD fifo is smaller than RxF fifo by design. Upon high load, RxD will be
1001
* filled and packets will be dropped by nic without getting into host or
1002
* cousing interrupt. Anyway, in that condition, host has no chance to process
1003
* all packets, but dropping in nic is cheaper, since it takes 0 cpu cycles
1006
/* TBD: ensure proper packet size */
1008
static int bdx_rx_init(struct bdx_priv *priv)
1012
if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size,
1013
regRXD_CFG0_0, regRXD_CFG1_0,
1014
regRXD_RPTR_0, regRXD_WPTR_0))
1016
if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size,
1017
regRXF_CFG0_0, regRXF_CFG1_0,
1018
regRXF_RPTR_0, regRXF_WPTR_0))
1020
priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
1021
sizeof(struct rxf_desc));
1025
priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
1029
netdev_err(priv->ndev, "Rx init failed\n");
1033
/* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
1034
* @priv - NIC private structure
1037
static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1040
struct rxdb *db = priv->rxdb;
1044
DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db),
1045
db->nelem - bdx_rxdb_available(db));
1046
while (bdx_rxdb_available(db) > 0) {
1047
i = bdx_rxdb_alloc_elem(db);
1048
dm = bdx_rxdb_addr_elem(db, i);
1051
for (i = 0; i < db->nelem; i++) {
1052
dm = bdx_rxdb_addr_elem(db, i);
1054
pci_unmap_single(priv->pdev,
1055
dm->dma, f->m.pktsz,
1056
PCI_DMA_FROMDEVICE);
1057
dev_kfree_skb(dm->skb);
1062
/* bdx_rx_free - release all Rx resources
1063
* @priv - NIC private structure
1064
* It assumes that Rx is desabled in HW
1066
static void bdx_rx_free(struct bdx_priv *priv)
1070
bdx_rx_free_skbs(priv, &priv->rxf_fifo0);
1071
bdx_rxdb_destroy(priv->rxdb);
1074
bdx_fifo_free(priv, &priv->rxf_fifo0.m);
1075
bdx_fifo_free(priv, &priv->rxd_fifo0.m);
1080
/*************************************************************************
1082
*************************************************************************/
1084
/* bdx_rx_alloc_skbs - fill rxf fifo with new skbs
1085
* @priv - nic's private structure
1086
* @f - RXF fifo that needs skbs
1087
* It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo.
1088
* skb's virtual and physical addresses are stored in skb db.
1089
* To calculate free space, func uses cached values of RPTR and WPTR
1090
* When needed, it also updates RPTR and WPTR.
1093
/* TBD: do not update WPTR if no desc were written */
1095
static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1097
struct sk_buff *skb;
1098
struct rxf_desc *rxfd;
1100
int dno, delta, idx;
1101
struct rxdb *db = priv->rxdb;
1104
dno = bdx_rxdb_available(db) - 1;
1106
skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN);
1108
pr_err("NO MEM: dev_alloc_skb failed\n");
1111
skb->dev = priv->ndev;
1112
skb_reserve(skb, NET_IP_ALIGN);
1114
idx = bdx_rxdb_alloc_elem(db);
1115
dm = bdx_rxdb_addr_elem(db, idx);
1116
dm->dma = pci_map_single(priv->pdev,
1117
skb->data, f->m.pktsz,
1118
PCI_DMA_FROMDEVICE);
1120
rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1121
rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
1123
rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1124
rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1125
rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1128
f->m.wptr += sizeof(struct rxf_desc);
1129
delta = f->m.wptr - f->m.memsz;
1130
if (unlikely(delta >= 0)) {
1133
memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1134
DBG("wrapped descriptor\n");
1139
/*TBD: to do - delayed rxf wptr like in txd */
1140
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1145
NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
1146
struct sk_buff *skb)
1149
DBG("rxdd->flags.bits.vtag=%d vlgrp=%p\n", GET_RXD_VTAG(rxd_val1),
1151
if (priv->vlgrp && GET_RXD_VTAG(rxd_val1)) {
1152
DBG("%s: vlan rcv vlan '%x' vtag '%x', device name '%s'\n",
1154
GET_RXD_VLAN_ID(rxd_vlan),
1155
GET_RXD_VTAG(rxd_val1),
1156
vlan_group_get_device(priv->vlgrp,
1157
GET_RXD_VLAN_ID(rxd_vlan))->name);
1158
/* NAPI variant of receive functions */
1159
vlan_hwaccel_receive_skb(skb, priv->vlgrp,
1160
GET_RXD_VLAN_TCI(rxd_vlan));
1162
netif_receive_skb(skb);
1166
static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1168
struct rxf_desc *rxfd;
1172
struct sk_buff *skb;
1176
DBG("priv=%p rxdd=%p\n", priv, rxdd);
1177
f = &priv->rxf_fifo0;
1179
DBG("db=%p f=%p\n", db, f);
1180
dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1183
rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1184
rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
1185
rxfd->va_lo = rxdd->va_lo;
1186
rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1187
rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1188
rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1191
f->m.wptr += sizeof(struct rxf_desc);
1192
delta = f->m.wptr - f->m.memsz;
1193
if (unlikely(delta >= 0)) {
1196
memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1197
DBG("wrapped descriptor\n");
1203
/* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
1204
* NOTE: a special treatment is given to non-continuous descriptors
1205
* that start near the end, wraps around and continue at the beginning. a second
1206
* part is copied right after the first, and then descriptor is interpreted as
1207
* normal. fifo has an extra space to allow such operations
1208
* @priv - nic's private structure
1209
* @f - RXF fifo that needs skbs
1212
/* TBD: replace memcpy func call by explicite inline asm */
1214
static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1216
struct net_device *ndev = priv->ndev;
1217
struct sk_buff *skb, *skb2;
1218
struct rxd_desc *rxdd;
1220
struct rxf_fifo *rxf_fifo;
1223
int max_done = BDX_MAX_RX_DONE;
1224
struct rxdb *db = NULL;
1225
/* Unmarshalled descriptor - copy of descriptor in host order */
1233
f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR;
1235
size = f->m.wptr - f->m.rptr;
1237
size = f->m.memsz + size; /* size is negative :-) */
1241
rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr);
1242
rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1);
1244
len = CPU_CHIP_SWAP16(rxdd->len);
1246
rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan);
1248
print_rxdd(rxdd, rxd_val1, len, rxd_vlan);
1250
tmp_len = GET_RXD_BC(rxd_val1) << 3;
1251
BDX_ASSERT(tmp_len <= 0);
1253
if (size < 0) /* test for partially arrived descriptor */
1256
f->m.rptr += tmp_len;
1258
tmp_len = f->m.rptr - f->m.memsz;
1259
if (unlikely(tmp_len >= 0)) {
1260
f->m.rptr = tmp_len;
1262
DBG("wrapped desc rptr=%d tmp_len=%d\n",
1263
f->m.rptr, tmp_len);
1264
memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
1268
if (unlikely(GET_RXD_ERR(rxd_val1))) {
1269
DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
1270
ndev->stats.rx_errors++;
1271
bdx_recycle_skb(priv, rxdd);
1275
rxf_fifo = &priv->rxf_fifo0;
1277
dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1280
if (len < BDX_COPYBREAK &&
1281
(skb2 = dev_alloc_skb(len + NET_IP_ALIGN))) {
1282
skb_reserve(skb2, NET_IP_ALIGN);
1283
/*skb_put(skb2, len); */
1284
pci_dma_sync_single_for_cpu(priv->pdev,
1285
dm->dma, rxf_fifo->m.pktsz,
1286
PCI_DMA_FROMDEVICE);
1287
memcpy(skb2->data, skb->data, len);
1288
bdx_recycle_skb(priv, rxdd);
1291
pci_unmap_single(priv->pdev,
1292
dm->dma, rxf_fifo->m.pktsz,
1293
PCI_DMA_FROMDEVICE);
1294
bdx_rxdb_free_elem(db, rxdd->va_lo);
1297
ndev->stats.rx_bytes += len;
1300
skb->protocol = eth_type_trans(skb, ndev);
1302
/* Non-IP packets aren't checksum-offloaded */
1303
if (GET_RXD_PKT_ID(rxd_val1) == 0)
1304
skb_checksum_none_assert(skb);
1306
skb->ip_summed = CHECKSUM_UNNECESSARY;
1308
NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
1310
if (++done >= max_done)
1314
ndev->stats.rx_packets += done;
1316
/* FIXME: do smth to minimize pci accesses */
1317
WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1319
bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
1324
/*************************************************************************
1325
* Debug / Temprorary Code *
1326
*************************************************************************/
1327
static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1330
DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
1331
GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
1332
GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
1333
GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
1334
GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan),
1335
GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo,
1339
static void print_rxfd(struct rxf_desc *rxfd)
1341
DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n"
1342
"info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1343
rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
1347
* TX HW/SW interaction overview
1348
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1349
* There are 2 types of TX communication channels between driver and NIC.
1350
* 1) TX Free Fifo - TXF - holds ack descriptors for sent packets
1351
* 2) TX Data Fifo - TXD - holds descriptors of full buffers.
1353
* Currently NIC supports TSO, checksuming and gather DMA
1354
* UFO and IP fragmentation is on the way
1356
* RX SW Data Structures
1357
* ~~~~~~~~~~~~~~~~~~~~~
1358
* txdb - used to keep track of all skbs owned by SW and their dma addresses.
1359
* For TX case, ownership lasts from geting packet via hard_xmit and until HW
1360
* acknowledges sent by TXF descriptors.
1361
* Implemented as cyclic buffer.
1362
* fifo - keeps info about fifo's size and location, relevant HW registers,
1363
* usage and skb db. Each RXD and RXF Fifo has its own fifo structure.
1364
* Implemented as simple struct.
1366
* TX SW Execution Flow
1367
* ~~~~~~~~~~~~~~~~~~~~
1368
* OS calls driver's hard_xmit method with packet to sent.
1369
* Driver creates DMA mappings, builds TXD descriptors and kicks HW
1370
* by updating TXD WPTR.
1371
* When packet is sent, HW write us TXF descriptor and SW frees original skb.
1372
* To prevent TXD fifo overflow without reading HW registers every time,
1373
* SW deploys "tx level" technique.
1374
* Upon strart up, tx level is initialized to TXD fifo length.
1375
* For every sent packet, SW gets its TXD descriptor sizei
1376
* (from precalculated array) and substructs it from tx level.
1377
* The size is also stored in txdb. When TXF ack arrives, SW fetch size of
1378
* original TXD descriptor from txdb and adds it to tx level.
1379
* When Tx level drops under some predefined treshhold, the driver
1380
* stops the TX queue. When TX level rises above that level,
1381
* the tx queue is enabled again.
1383
* This technique avoids eccessive reading of RPTR and WPTR registers.
1384
* As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput.
1387
/*************************************************************************
1389
*************************************************************************/
1390
static inline int bdx_tx_db_size(struct txdb *db)
1392
int taken = db->wptr - db->rptr;
1394
taken = db->size + 1 + taken; /* (size + 1) equals memsz */
1396
return db->size - taken;
1399
/* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap
1401
* @ptr - read or write pointer
1403
static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1405
BDX_ASSERT(db == NULL || pptr == NULL); /* sanity */
1407
BDX_ASSERT(*pptr != db->rptr && /* expect either read */
1408
*pptr != db->wptr); /* or write pointer */
1410
BDX_ASSERT(*pptr < db->start || /* pointer has to be */
1411
*pptr >= db->end); /* in range */
1414
if (unlikely(*pptr == db->end))
1418
/* bdx_tx_db_inc_rptr - increment read pointer
1421
static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1423
BDX_ASSERT(db->rptr == db->wptr); /* can't read from empty db */
1424
__bdx_tx_db_ptr_next(db, &db->rptr);
1427
/* bdx_tx_db_inc_rptr - increment write pointer
1430
static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1432
__bdx_tx_db_ptr_next(db, &db->wptr);
1433
BDX_ASSERT(db->rptr == db->wptr); /* we can not get empty db as
1434
a result of write */
1437
/* bdx_tx_db_init - creates and initializes tx db
1439
* @sz_type - size of tx fifo
1440
* Returns 0 on success, error code otherwise
1442
static int bdx_tx_db_init(struct txdb *d, int sz_type)
1444
int memsz = FIFO_SIZE * (1 << (sz_type + 1));
1446
d->start = vmalloc(memsz);
1451
* In order to differentiate between db is empty and db is full
1452
* states at least one element should always be empty in order to
1453
* avoid rptr == wptr which means db is empty
1455
d->size = memsz / sizeof(struct tx_map) - 1;
1456
d->end = d->start + d->size + 1; /* just after last element */
1458
/* all dbs are created equally empty */
1465
/* bdx_tx_db_close - closes tx db and frees all memory
1468
static void bdx_tx_db_close(struct txdb *d)
1470
BDX_ASSERT(d == NULL);
1476
/*************************************************************************
1478
*************************************************************************/
1480
/* sizes of tx desc (including padding if needed) as function
1481
* of skb's frag number */
1484
u16 qwords; /* qword = 64 bit */
1485
} txd_sizes[MAX_SKB_FRAGS + 1];
1487
/* txdb_map_skb - creates and stores dma mappings for skb's data blocks
1488
* @priv - NIC private structure
1489
* @skb - socket buffer to map
1491
* It makes dma mappings for skb's data blocks and writes them to PBL of
1492
* new tx descriptor. It also stores them in the tx db, so they could be
1493
* unmaped after data was sent. It is reponsibility of a caller to make
1494
* sure that there is enough space in the tx db. Last element holds pointer
1495
* to skb itself and marked with zero length
1498
bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
1499
struct txd_desc *txdd)
1501
struct txdb *db = &priv->txdb;
1502
struct pbl *pbl = &txdd->pbl[0];
1503
int nr_frags = skb_shinfo(skb)->nr_frags;
1506
db->wptr->len = skb_headlen(skb);
1507
db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
1508
db->wptr->len, PCI_DMA_TODEVICE);
1509
pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1510
pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1511
pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1512
DBG("=== pbl len: 0x%x ================\n", pbl->len);
1513
DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo);
1514
DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi);
1515
bdx_tx_db_inc_wptr(db);
1517
for (i = 0; i < nr_frags; i++) {
1518
struct skb_frag_struct *frag;
1520
frag = &skb_shinfo(skb)->frags[i];
1521
db->wptr->len = frag->size;
1522
db->wptr->addr.dma =
1523
pci_map_page(priv->pdev, frag->page, frag->page_offset,
1524
frag->size, PCI_DMA_TODEVICE);
1527
pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1528
pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1529
pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1530
bdx_tx_db_inc_wptr(db);
1533
/* add skb clean up info. */
1534
db->wptr->len = -txd_sizes[nr_frags].bytes;
1535
db->wptr->addr.skb = skb;
1536
bdx_tx_db_inc_wptr(db);
1539
/* init_txd_sizes - precalculate sizes of descriptors for skbs up to 16 frags
1540
* number of frags is used as index to fetch correct descriptors size,
1541
* instead of calculating it each time */
1542
static void __init init_txd_sizes(void)
1546
/* 7 - is number of lwords in txd with one phys buffer
1547
* 3 - is number of lwords used for every additional phys buffer */
1548
for (i = 0; i < MAX_SKB_FRAGS + 1; i++) {
1549
lwords = 7 + (i * 3);
1551
lwords++; /* pad it with 1 lword */
1552
txd_sizes[i].qwords = lwords >> 1;
1553
txd_sizes[i].bytes = lwords << 2;
1557
/* bdx_tx_init - initialize all Tx related stuff.
1558
* Namely, TXD and TXF fifos, database etc */
1559
static int bdx_tx_init(struct bdx_priv *priv)
1561
if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size,
1563
regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0))
1565
if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size,
1567
regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0))
1570
/* The TX db has to keep mappings for all packets sent (on TxD)
1571
* and not yet reclaimed (on TxF) */
1572
if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)))
1575
priv->tx_level = BDX_MAX_TX_LEVEL;
1576
#ifdef BDX_DELAY_WPTR
1577
priv->tx_update_mark = priv->tx_level - 1024;
1582
netdev_err(priv->ndev, "Tx init failed\n");
1587
* bdx_tx_space - calculates available space in TX fifo
1588
* @priv - NIC private structure
1589
* Returns available space in TX fifo in bytes
1591
static inline int bdx_tx_space(struct bdx_priv *priv)
1593
struct txd_fifo *f = &priv->txd_fifo0;
1596
f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR;
1597
fsize = f->m.rptr - f->m.wptr;
1599
fsize = f->m.memsz + fsize;
1603
/* bdx_tx_transmit - send packet to NIC
1604
* @skb - packet to send
1605
* ndev - network device assigned to NIC
1607
* o NETDEV_TX_OK everything ok.
1608
* o NETDEV_TX_BUSY Cannot transmit packet, try later
1609
* Usually a bug, means queue start/stop flow control is broken in
1610
* the driver. Note: the driver must NOT put the skb in its DMA ring.
1611
* o NETDEV_TX_LOCKED Locking failed, please retry quickly.
1613
static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1614
struct net_device *ndev)
1616
struct bdx_priv *priv = netdev_priv(ndev);
1617
struct txd_fifo *f = &priv->txd_fifo0;
1618
int txd_checksum = 7; /* full checksum */
1620
int txd_vlan_id = 0;
1624
int nr_frags = skb_shinfo(skb)->nr_frags;
1625
struct txd_desc *txdd;
1627
unsigned long flags;
1630
local_irq_save(flags);
1631
if (!spin_trylock(&priv->tx_lock)) {
1632
local_irq_restore(flags);
1633
DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
1634
BDX_DRV_NAME, ndev->name);
1635
return NETDEV_TX_LOCKED;
1638
/* build tx descriptor */
1639
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
1640
txdd = (struct txd_desc *)(f->m.va + f->m.wptr);
1641
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
1644
if (skb_shinfo(skb)->gso_size) {
1645
txd_mss = skb_shinfo(skb)->gso_size;
1647
DBG("skb %p skb len %d gso size = %d\n", skb, skb->len,
1651
if (vlan_tx_tag_present(skb)) {
1652
/*Cut VLAN ID to 12 bits */
1653
txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12);
1657
txdd->length = CPU_CHIP_SWAP16(skb->len);
1658
txdd->mss = CPU_CHIP_SWAP16(txd_mss);
1660
CPU_CHIP_SWAP32(TXD_W1_VAL
1661
(txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag,
1662
txd_lgsnd, txd_vlan_id));
1663
DBG("=== TxD desc =====================\n");
1664
DBG("=== w1: 0x%x ================\n", txdd->txd_val1);
1665
DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length);
1667
bdx_tx_map_skb(priv, skb, txdd);
1669
/* increment TXD write pointer. In case of
1670
fifo wrapping copy reminder of the descriptor
1672
f->m.wptr += txd_sizes[nr_frags].bytes;
1673
len = f->m.wptr - f->m.memsz;
1674
if (unlikely(len >= 0)) {
1677
BDX_ASSERT(len > f->m.memsz);
1678
memcpy(f->m.va, f->m.va + f->m.memsz, len);
1681
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* finished with valid wptr */
1683
priv->tx_level -= txd_sizes[nr_frags].bytes;
1684
BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1685
#ifdef BDX_DELAY_WPTR
1686
if (priv->tx_level > priv->tx_update_mark) {
1687
/* Force memory writes to complete before letting h/w
1688
know there are new descriptors to fetch.
1689
(might be needed on platforms like IA64)
1691
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1693
if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) {
1695
WRITE_REG(priv, f->m.reg_WPTR,
1696
f->m.wptr & TXF_WPTR_WR_PTR);
1700
/* Force memory writes to complete before letting h/w
1701
know there are new descriptors to fetch.
1702
(might be needed on platforms like IA64)
1704
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1708
ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1710
ndev->stats.tx_packets++;
1711
ndev->stats.tx_bytes += skb->len;
1713
if (priv->tx_level < BDX_MIN_TX_LEVEL) {
1714
DBG("%s: %s: TX Q STOP level %d\n",
1715
BDX_DRV_NAME, ndev->name, priv->tx_level);
1716
netif_stop_queue(ndev);
1719
spin_unlock_irqrestore(&priv->tx_lock, flags);
1720
return NETDEV_TX_OK;
1723
/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
1724
* @priv - bdx adapter
1725
* It scans TXF fifo for descriptors, frees DMA mappings and reports to OS
1726
* that those packets were sent
1728
static void bdx_tx_cleanup(struct bdx_priv *priv)
1730
struct txf_fifo *f = &priv->txf_fifo0;
1731
struct txdb *db = &priv->txdb;
1735
f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK;
1736
BDX_ASSERT(f->m.rptr >= f->m.memsz); /* started with valid rptr */
1738
while (f->m.wptr != f->m.rptr) {
1739
f->m.rptr += BDX_TXF_DESC_SZ;
1740
f->m.rptr &= f->m.size_mask;
1742
/* unmap all the fragments */
1743
/* first has to come tx_maps containing dma */
1744
BDX_ASSERT(db->rptr->len == 0);
1746
BDX_ASSERT(db->rptr->addr.dma == 0);
1747
pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1748
db->rptr->len, PCI_DMA_TODEVICE);
1749
bdx_tx_db_inc_rptr(db);
1750
} while (db->rptr->len > 0);
1751
tx_level -= db->rptr->len; /* '-' koz len is negative */
1753
/* now should come skb pointer - free it */
1754
dev_kfree_skb_irq(db->rptr->addr.skb);
1755
bdx_tx_db_inc_rptr(db);
1758
/* let h/w know which TXF descriptors were cleaned */
1759
BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz);
1760
WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1762
/* We reclaimed resources, so in case the Q is stopped by xmit callback,
1763
* we resume the transmition and use tx_lock to synchronize with xmit.*/
1764
spin_lock(&priv->tx_lock);
1765
priv->tx_level += tx_level;
1766
BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1767
#ifdef BDX_DELAY_WPTR
1768
if (priv->tx_noupd) {
1770
WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR,
1771
priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);
1775
if (unlikely(netif_queue_stopped(priv->ndev) &&
1776
netif_carrier_ok(priv->ndev) &&
1777
(priv->tx_level >= BDX_MIN_TX_LEVEL))) {
1778
DBG("%s: %s: TX Q WAKE level %d\n",
1779
BDX_DRV_NAME, priv->ndev->name, priv->tx_level);
1780
netif_wake_queue(priv->ndev);
1782
spin_unlock(&priv->tx_lock);
1785
/* bdx_tx_free_skbs - frees all skbs from TXD fifo.
1786
* It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
1788
static void bdx_tx_free_skbs(struct bdx_priv *priv)
1790
struct txdb *db = &priv->txdb;
1793
while (db->rptr != db->wptr) {
1794
if (likely(db->rptr->len))
1795
pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1796
db->rptr->len, PCI_DMA_TODEVICE);
1798
dev_kfree_skb(db->rptr->addr.skb);
1799
bdx_tx_db_inc_rptr(db);
1804
/* bdx_tx_free - frees all Tx resources */
1805
static void bdx_tx_free(struct bdx_priv *priv)
1808
bdx_tx_free_skbs(priv);
1809
bdx_fifo_free(priv, &priv->txd_fifo0.m);
1810
bdx_fifo_free(priv, &priv->txf_fifo0.m);
1811
bdx_tx_db_close(&priv->txdb);
1814
/* bdx_tx_push_desc - push descriptor to TxD fifo
1815
* @priv - NIC private structure
1816
* @data - desc's data
1817
* @size - desc's size
1819
* Pushes desc to TxD fifo and overlaps it if needed.
1820
* NOTE: this func does not check for available space. this is responsibility
1821
* of the caller. Neither does it check that data size is smaller than
1824
static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
1826
struct txd_fifo *f = &priv->txd_fifo0;
1827
int i = f->m.memsz - f->m.wptr;
1833
memcpy(f->m.va + f->m.wptr, data, size);
1836
memcpy(f->m.va + f->m.wptr, data, i);
1837
f->m.wptr = size - i;
1838
memcpy(f->m.va, data + i, f->m.wptr);
1840
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1843
/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
1844
* @priv - NIC private structure
1845
* @data - desc's data
1846
* @size - desc's size
1848
* NOTE: this func does check for available space and, if necessary, waits for
1849
* NIC to read existing data before writing new one.
1851
static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1857
/* we substruct 8 because when fifo is full rptr == wptr
1858
which also means that fifo is empty, we can understand
1859
the difference, but could hw do the same ??? :) */
1860
int avail = bdx_tx_space(priv) - 8;
1862
if (timer++ > 300) { /* prevent endless loop */
1863
DBG("timeout while writing desc to TxD fifo\n");
1866
udelay(50); /* give hw a chance to clean fifo */
1869
avail = min(avail, size);
1870
DBG("about to push %d bytes starting %p size %d\n", avail,
1872
bdx_tx_push_desc(priv, data, avail);
1879
static const struct net_device_ops bdx_netdev_ops = {
1880
.ndo_open = bdx_open,
1881
.ndo_stop = bdx_close,
1882
.ndo_start_xmit = bdx_tx_transmit,
1883
.ndo_validate_addr = eth_validate_addr,
1884
.ndo_do_ioctl = bdx_ioctl,
1885
.ndo_set_multicast_list = bdx_setmulti,
1886
.ndo_change_mtu = bdx_change_mtu,
1887
.ndo_set_mac_address = bdx_set_mac,
1888
.ndo_vlan_rx_register = bdx_vlan_rx_register,
1889
.ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
1890
.ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid,
1894
* bdx_probe - Device Initialization Routine
1895
* @pdev: PCI device information struct
1896
* @ent: entry in bdx_pci_tbl
1898
* Returns 0 on success, negative on failure
1900
* bdx_probe initializes an adapter identified by a pci_dev structure.
1901
* The OS initialization, configuring of the adapter private structure,
1902
* and a hardware reset occur.
1904
* functions and their order used as explained in
1905
* /usr/src/linux/Documentation/DMA-{API,mapping}.txt
1909
/* TBD: netif_msg should be checked and implemented. I disable it for now */
1910
static int __devinit
1911
bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1913
struct net_device *ndev;
1914
struct bdx_priv *priv;
1915
int err, pci_using_dac, port;
1916
unsigned long pciaddr;
1918
struct pci_nic *nic;
1922
nic = vmalloc(sizeof(*nic));
1926
/************** pci *****************/
1927
err = pci_enable_device(pdev);
1928
if (err) /* it triggers interrupt, dunno why. */
1929
goto err_pci; /* it's not a problem though */
1931
if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
1932
!(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
1935
if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
1936
(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
1937
pr_err("No usable DMA configuration, aborting\n");
1943
err = pci_request_regions(pdev, BDX_DRV_NAME);
1947
pci_set_master(pdev);
1949
pciaddr = pci_resource_start(pdev, 0);
1952
pr_err("no MMIO resource\n");
1955
regionSize = pci_resource_len(pdev, 0);
1956
if (regionSize < BDX_REGS_SIZE) {
1958
pr_err("MMIO resource (%x) too small\n", regionSize);
1962
nic->regs = ioremap(pciaddr, regionSize);
1965
pr_err("ioremap failed\n");
1969
if (pdev->irq < 2) {
1971
pr_err("invalid irq (%d)\n", pdev->irq);
1974
pci_set_drvdata(pdev, nic);
1976
if (pdev->device == 0x3014)
1983
bdx_hw_reset_direct(nic->regs);
1985
nic->irq_type = IRQ_INTX;
1987
if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
1988
err = pci_enable_msi(pdev);
1990
pr_err("Can't eneble msi. error is %d\n", err);
1992
nic->irq_type = IRQ_MSI;
1994
DBG("HW does not support MSI\n");
1997
/************** netdev **************/
1998
for (port = 0; port < nic->port_num; port++) {
1999
ndev = alloc_etherdev(sizeof(struct bdx_priv));
2002
pr_err("alloc_etherdev failed\n");
2006
ndev->netdev_ops = &bdx_netdev_ops;
2007
ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
2009
bdx_set_ethtool_ops(ndev); /* ethtool interface */
2011
/* these fields are used for info purposes only
2012
* so we can have them same for all ports of the board */
2013
ndev->if_port = port;
2014
ndev->base_addr = pciaddr;
2015
ndev->mem_start = pciaddr;
2016
ndev->mem_end = pciaddr + regionSize;
2017
ndev->irq = pdev->irq;
2018
ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
2019
| NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2020
NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
2021
/*| NETIF_F_FRAGLIST */
2023
ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2024
NETIF_F_TSO | NETIF_F_HW_VLAN_TX;
2027
ndev->features |= NETIF_F_HIGHDMA;
2029
/************** priv ****************/
2030
priv = nic->priv[port] = netdev_priv(ndev);
2032
priv->pBdxRegs = nic->regs + port * 0x8000;
2037
priv->msg_enable = BDX_DEF_MSG_ENABLE;
2039
netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
2041
if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
2042
DBG("HW statistics not supported\n");
2043
priv->stats_flag = 0;
2045
priv->stats_flag = 1;
2048
/* Initialize fifo sizes. */
2054
/* Initialize the initial coalescing registers. */
2055
priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12);
2056
priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12);
2058
/* ndev->xmit_lock spinlock is not used.
2059
* Private priv->tx_lock is used for synchronization
2060
* between transmit and TX irq cleanup. In addition
2061
* set multicast list callback has to use priv->tx_lock.
2064
ndev->features |= NETIF_F_LLTX;
2066
spin_lock_init(&priv->tx_lock);
2068
/*bdx_hw_reset(priv); */
2069
if (bdx_read_mac(priv)) {
2070
pr_err("load MAC address failed\n");
2073
SET_NETDEV_DEV(ndev, &pdev->dev);
2074
err = register_netdev(ndev);
2076
pr_err("register_netdev failed\n");
2079
netif_carrier_off(ndev);
2080
netif_stop_queue(ndev);
2091
pci_release_regions(pdev);
2093
pci_disable_device(pdev);
2100
/****************** Ethtool interface *********************/
2101
/* get strings for statistics counters */
2103
bdx_stat_names[][ETH_GSTRING_LEN] = {
2104
"InUCast", /* 0x7200 */
2105
"InMCast", /* 0x7210 */
2106
"InBCast", /* 0x7220 */
2107
"InPkts", /* 0x7230 */
2108
"InErrors", /* 0x7240 */
2109
"InDropped", /* 0x7250 */
2110
"FrameTooLong", /* 0x7260 */
2111
"FrameSequenceErrors", /* 0x7270 */
2112
"InVLAN", /* 0x7280 */
2113
"InDroppedDFE", /* 0x7290 */
2114
"InDroppedIntFull", /* 0x72A0 */
2115
"InFrameAlignErrors", /* 0x72B0 */
2117
/* 0x72C0-0x72E0 RSRV */
2119
"OutUCast", /* 0x72F0 */
2120
"OutMCast", /* 0x7300 */
2121
"OutBCast", /* 0x7310 */
2122
"OutPkts", /* 0x7320 */
2124
/* 0x7330-0x7360 RSRV */
2126
"OutVLAN", /* 0x7370 */
2127
"InUCastOctects", /* 0x7380 */
2128
"OutUCastOctects", /* 0x7390 */
2130
/* 0x73A0-0x73B0 RSRV */
2132
"InBCastOctects", /* 0x73C0 */
2133
"OutBCastOctects", /* 0x73D0 */
2134
"InOctects", /* 0x73E0 */
2135
"OutOctects", /* 0x73F0 */
2139
* bdx_get_settings - get device-specific settings
2143
static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
2147
struct bdx_priv *priv = netdev_priv(netdev);
2149
rdintcm = priv->rdintcm;
2150
tdintcm = priv->tdintcm;
2152
ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
2153
ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
2154
ethtool_cmd_speed_set(ecmd, SPEED_10000);
2155
ecmd->duplex = DUPLEX_FULL;
2156
ecmd->port = PORT_FIBRE;
2157
ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */
2158
ecmd->autoneg = AUTONEG_DISABLE;
2160
/* PCK_TH measures in multiples of FIFO bytes
2161
We translate to packets */
2163
((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2165
((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2171
* bdx_get_drvinfo - report driver information
2176
bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
2178
struct bdx_priv *priv = netdev_priv(netdev);
2180
strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
2181
strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
2182
strlcat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2183
strlcat(drvinfo->bus_info, pci_name(priv->pdev),
2184
sizeof(drvinfo->bus_info));
2186
drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
2187
drvinfo->testinfo_len = 0;
2188
drvinfo->regdump_len = 0;
2189
drvinfo->eedump_len = 0;
2193
* bdx_get_coalesce - get interrupt coalescing parameters
2198
bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2202
struct bdx_priv *priv = netdev_priv(netdev);
2204
rdintcm = priv->rdintcm;
2205
tdintcm = priv->tdintcm;
2207
/* PCK_TH measures in multiples of FIFO bytes
2208
We translate to packets */
2209
ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT;
2210
ecoal->rx_max_coalesced_frames =
2211
((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2213
ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT;
2214
ecoal->tx_max_coalesced_frames =
2215
((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2217
/* adaptive parameters ignored */
2222
* bdx_set_coalesce - set interrupt coalescing parameters
2227
bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2231
struct bdx_priv *priv = netdev_priv(netdev);
2237
/* Check for valid input */
2238
rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT;
2239
tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT;
2240
rx_max_coal = ecoal->rx_max_coalesced_frames;
2241
tx_max_coal = ecoal->tx_max_coalesced_frames;
2243
/* Translate from packets to multiples of FIFO bytes */
2245
(((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1)
2248
(((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1)
2251
if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) ||
2252
(rx_max_coal > 0xF) || (tx_max_coal > 0xF))
2255
rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm),
2256
GET_RXF_TH(priv->rdintcm), rx_max_coal);
2257
tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0,
2260
priv->rdintcm = rdintcm;
2261
priv->tdintcm = tdintcm;
2263
WRITE_REG(priv, regRDINTCM0, rdintcm);
2264
WRITE_REG(priv, regTDINTCM0, tdintcm);
2269
/* Convert RX fifo size to number of pending packets */
2270
static inline int bdx_rx_fifo_size_to_packets(int rx_size)
2272
return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
2275
/* Convert TX fifo size to number of pending packets */
2276
static inline int bdx_tx_fifo_size_to_packets(int tx_size)
2278
return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
2282
* bdx_get_ringparam - report ring sizes
2287
bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2289
struct bdx_priv *priv = netdev_priv(netdev);
2291
/*max_pending - the maximum-sized FIFO we allow */
2292
ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3);
2293
ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3);
2294
ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size);
2295
ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size);
2299
* bdx_set_ringparam - set ring sizes
2304
bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2306
struct bdx_priv *priv = netdev_priv(netdev);
2310
for (; rx_size < 4; rx_size++) {
2311
if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending)
2317
for (; tx_size < 4; tx_size++) {
2318
if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending)
2324
/*Is there anything to do? */
2325
if ((rx_size == priv->rxf_size) &&
2326
(tx_size == priv->txd_size))
2329
priv->rxf_size = rx_size;
2331
priv->rxd_size = rx_size - 1;
2333
priv->rxd_size = rx_size;
2335
priv->txf_size = priv->txd_size = tx_size;
2337
if (netif_running(netdev)) {
2345
* bdx_get_strings - return a set of strings that describe the requested objects
2349
static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2351
switch (stringset) {
2353
memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
2359
* bdx_get_sset_count - return number of statistics or tests
2362
static int bdx_get_sset_count(struct net_device *netdev, int stringset)
2364
struct bdx_priv *priv = netdev_priv(netdev);
2366
switch (stringset) {
2368
BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2369
!= sizeof(struct bdx_stats) / sizeof(u64));
2370
return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0;
2377
* bdx_get_ethtool_stats - return device's hardware L2 statistics
2382
static void bdx_get_ethtool_stats(struct net_device *netdev,
2383
struct ethtool_stats *stats, u64 *data)
2385
struct bdx_priv *priv = netdev_priv(netdev);
2387
if (priv->stats_flag) {
2389
/* Update stats from HW */
2390
bdx_update_stats(priv);
2392
/* Copy data to user buffer */
2393
memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats));
2398
* bdx_set_ethtool_ops - ethtool interface implementation
2401
static void bdx_set_ethtool_ops(struct net_device *netdev)
2403
static const struct ethtool_ops bdx_ethtool_ops = {
2404
.get_settings = bdx_get_settings,
2405
.get_drvinfo = bdx_get_drvinfo,
2406
.get_link = ethtool_op_get_link,
2407
.get_coalesce = bdx_get_coalesce,
2408
.set_coalesce = bdx_set_coalesce,
2409
.get_ringparam = bdx_get_ringparam,
2410
.set_ringparam = bdx_set_ringparam,
2411
.get_strings = bdx_get_strings,
2412
.get_sset_count = bdx_get_sset_count,
2413
.get_ethtool_stats = bdx_get_ethtool_stats,
2416
SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops);
2420
* bdx_remove - Device Removal Routine
2421
* @pdev: PCI device information struct
2423
* bdx_remove is called by the PCI subsystem to alert the driver
2424
* that it should release a PCI device. The could be caused by a
2425
* Hot-Plug event, or because the driver is going to be removed from
2428
static void __devexit bdx_remove(struct pci_dev *pdev)
2430
struct pci_nic *nic = pci_get_drvdata(pdev);
2431
struct net_device *ndev;
2434
for (port = 0; port < nic->port_num; port++) {
2435
ndev = nic->priv[port]->ndev;
2436
unregister_netdev(ndev);
2440
/*bdx_hw_reset_direct(nic->regs); */
2442
if (nic->irq_type == IRQ_MSI)
2443
pci_disable_msi(pdev);
2447
pci_release_regions(pdev);
2448
pci_disable_device(pdev);
2449
pci_set_drvdata(pdev, NULL);
2455
static struct pci_driver bdx_pci_driver = {
2456
.name = BDX_DRV_NAME,
2457
.id_table = bdx_pci_tbl,
2459
.remove = __devexit_p(bdx_remove),
2463
* print_driver_id - print parameters of the driver build
2465
static void __init print_driver_id(void)
2467
pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
2468
pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
2471
static int __init bdx_module_init(void)
2476
RET(pci_register_driver(&bdx_pci_driver));
2479
module_init(bdx_module_init);
2481
static void __exit bdx_module_exit(void)
2484
pci_unregister_driver(&bdx_pci_driver);
2488
module_exit(bdx_module_exit);
2490
MODULE_LICENSE("GPL");
2491
MODULE_AUTHOR(DRIVER_AUTHOR);
2492
MODULE_DESCRIPTION(BDX_DRV_DESC);
2493
MODULE_FIRMWARE("tehuti/bdx.bin");