2
* This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
5
* Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
7
* This software is available to you under a choice of one of two
8
* licenses. You may choose to be licensed under the terms of the GNU
9
* General Public License (GPL) Version 2, available from the file
10
* COPYING in the main directory of this source tree, or the
11
* OpenIB.org BSD license below:
13
* Redistribution and use in source and binary forms, with or
14
* without modification, are permitted provided that the following
17
* - Redistributions of source code must retain the above
18
* copyright notice, this list of conditions and the following
21
* - Redistributions in binary form must reproduce the above
22
* copyright notice, this list of conditions and the following
23
* disclaimer in the documentation and/or other materials
24
* provided with the distribution.
26
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36
#include <linux/module.h>
37
#include <linux/moduleparam.h>
38
#include <linux/init.h>
39
#include <linux/pci.h>
40
#include <linux/dma-mapping.h>
41
#include <linux/netdevice.h>
42
#include <linux/etherdevice.h>
43
#include <linux/debugfs.h>
44
#include <linux/ethtool.h>
46
#include "t4vf_common.h"
47
#include "t4vf_defs.h"
49
#include "../cxgb4/t4_regs.h"
50
#include "../cxgb4/t4_msg.h"
53
* Generic information about the driver.
55
#define DRV_VERSION "1.0.0"
56
#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver"
64
* Default ethtool "message level" for adapters.
66
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
67
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
68
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
70
static int dflt_msg_enable = DFLT_MSG_ENABLE;
72
module_param(dflt_msg_enable, int, 0644);
73
MODULE_PARM_DESC(dflt_msg_enable,
74
"default adapter ethtool message level bitmap");
77
* The driver uses the best interrupt scheme available on a platform in the
78
* order MSI-X then MSI. This parameter determines which of these schemes the
79
* driver may consider as follows:
81
* msi = 2: choose from among MSI-X and MSI
82
* msi = 1: only consider MSI interrupts
84
* Note that unlike the Physical Function driver, this Virtual Function driver
85
* does _not_ support legacy INTx interrupts (this limitation is mandated by
86
* the PCI-E SR-IOV standard).
90
#define MSI_DEFAULT MSI_MSIX
92
static int msi = MSI_DEFAULT;
94
module_param(msi, int, 0644);
95
MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
98
* Fundamental constants.
99
* ======================
103
MAX_TXQ_ENTRIES = 16384,
104
MAX_RSPQ_ENTRIES = 16384,
105
MAX_RX_BUFFERS = 16384,
107
MIN_TXQ_ENTRIES = 32,
108
MIN_RSPQ_ENTRIES = 128,
112
* For purposes of manipulating the Free List size we need to
113
* recognize that Free Lists are actually Egress Queues (the host
114
* produces free buffers which the hardware consumes), Egress Queues
115
* indices are all in units of Egress Context Units bytes, and free
116
* list entries are 64-bit PCI DMA addresses. And since the state of
117
* the Producer Index == the Consumer Index implies an EMPTY list, we
118
* always have at least one Egress Unit's worth of Free List entries
119
* unused. See sge.c for more details ...
121
EQ_UNIT = SGE_EQ_IDXSIZE,
122
FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
123
MIN_FL_RESID = FL_PER_EQ_UNIT,
127
* Global driver state.
128
* ====================
131
static struct dentry *cxgb4vf_debugfs_root;
134
* OS "Callback" functions.
135
* ========================
139
* The link status has changed on the indicated "port" (Virtual Interface).
141
void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
143
struct net_device *dev = adapter->port[pidx];
146
* If the port is disabled or the current recorded "link up"
147
* status matches the new status, just return.
149
if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
153
* Tell the OS that the link status has changed and print a short
154
* informative message on the console about the event.
159
const struct port_info *pi = netdev_priv(dev);
161
netif_carrier_on(dev);
163
switch (pi->link_cfg.speed) {
181
switch (pi->link_cfg.fc) {
190
case PAUSE_RX|PAUSE_TX:
199
printk(KERN_INFO "%s: link up, %s, full-duplex, %s PAUSE\n",
202
netif_carrier_off(dev);
203
printk(KERN_INFO "%s: link down\n", dev->name);
208
* Net device operations.
209
* ======================
216
* Perform the MAC and PHY actions needed to enable a "port" (Virtual
219
static int link_start(struct net_device *dev)
222
struct port_info *pi = netdev_priv(dev);
225
* We do not set address filters and promiscuity here, the stack does
226
* that step explicitly. Enable vlan accel.
228
ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
231
ret = t4vf_change_mac(pi->adapter, pi->viid,
232
pi->xact_addr_filt, dev->dev_addr, true);
234
pi->xact_addr_filt = ret;
240
* We don't need to actually "start the link" itself since the
241
* firmware will do that for us when the first Virtual Interface
242
* is enabled on a port.
245
ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
250
* Name the MSI-X interrupts.
252
static void name_msix_vecs(struct adapter *adapter)
254
int namelen = sizeof(adapter->msix_info[0].desc) - 1;
260
snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
261
"%s-FWeventq", adapter->name);
262
adapter->msix_info[MSIX_FW].desc[namelen] = 0;
267
for_each_port(adapter, pidx) {
268
struct net_device *dev = adapter->port[pidx];
269
const struct port_info *pi = netdev_priv(dev);
272
for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
273
snprintf(adapter->msix_info[msi].desc, namelen,
274
"%s-%d", dev->name, qs);
275
adapter->msix_info[msi].desc[namelen] = 0;
281
* Request all of our MSI-X resources.
283
static int request_msix_queue_irqs(struct adapter *adapter)
285
struct sge *s = &adapter->sge;
291
err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
292
0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
300
for_each_ethrxq(s, rxq) {
301
err = request_irq(adapter->msix_info[msi].vec,
302
t4vf_sge_intr_msix, 0,
303
adapter->msix_info[msi].desc,
304
&s->ethrxq[rxq].rspq);
313
free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
314
free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
319
* Free our MSI-X resources.
321
static void free_msix_queue_irqs(struct adapter *adapter)
323
struct sge *s = &adapter->sge;
326
free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
328
for_each_ethrxq(s, rxq)
329
free_irq(adapter->msix_info[msi++].vec,
330
&s->ethrxq[rxq].rspq);
334
* Turn on NAPI and start up interrupts on a response queue.
336
static void qenable(struct sge_rspq *rspq)
338
napi_enable(&rspq->napi);
341
* 0-increment the Going To Sleep register to start the timer and
344
t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
346
SEINTARM(rspq->intr_params) |
347
INGRESSQID(rspq->cntxt_id));
351
* Enable NAPI scheduling and interrupt generation for all Receive Queues.
353
static void enable_rx(struct adapter *adapter)
356
struct sge *s = &adapter->sge;
358
for_each_ethrxq(s, rxq)
359
qenable(&s->ethrxq[rxq].rspq);
360
qenable(&s->fw_evtq);
363
* The interrupt queue doesn't use NAPI so we do the 0-increment of
364
* its Going To Sleep register here to get it started.
366
if (adapter->flags & USING_MSI)
367
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
369
SEINTARM(s->intrq.intr_params) |
370
INGRESSQID(s->intrq.cntxt_id));
375
* Wait until all NAPI handlers are descheduled.
377
static void quiesce_rx(struct adapter *adapter)
379
struct sge *s = &adapter->sge;
382
for_each_ethrxq(s, rxq)
383
napi_disable(&s->ethrxq[rxq].rspq.napi);
384
napi_disable(&s->fw_evtq.napi);
388
* Response queue handler for the firmware event queue.
390
static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
391
const struct pkt_gl *gl)
394
* Extract response opcode and get pointer to CPL message body.
396
struct adapter *adapter = rspq->adapter;
397
u8 opcode = ((const struct rss_header *)rsp)->opcode;
398
void *cpl = (void *)(rsp + 1);
403
* We've received an asynchronous message from the firmware.
405
const struct cpl_fw6_msg *fw_msg = cpl;
406
if (fw_msg->type == FW6_TYPE_CMD_RPL)
407
t4vf_handle_fw_rpl(adapter, fw_msg->data);
411
case CPL_SGE_EGR_UPDATE: {
413
* We've received an Egress Queue Status Update message. We
414
* get these, if the SGE is configured to send these when the
415
* firmware passes certain points in processing our TX
416
* Ethernet Queue or if we make an explicit request for one.
417
* We use these updates to determine when we may need to
418
* restart a TX Ethernet Queue which was stopped for lack of
419
* free TX Queue Descriptors ...
421
const struct cpl_sge_egr_update *p = (void *)cpl;
422
unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
423
struct sge *s = &adapter->sge;
425
struct sge_eth_txq *txq;
429
* Perform sanity checking on the Queue ID to make sure it
430
* really refers to one of our TX Ethernet Egress Queues which
431
* is active and matches the queue's ID. None of these error
432
* conditions should ever happen so we may want to either make
433
* them fatal and/or conditionalized under DEBUG.
435
eq_idx = EQ_IDX(s, qid);
436
if (unlikely(eq_idx >= MAX_EGRQ)) {
437
dev_err(adapter->pdev_dev,
438
"Egress Update QID %d out of range\n", qid);
441
tq = s->egr_map[eq_idx];
442
if (unlikely(tq == NULL)) {
443
dev_err(adapter->pdev_dev,
444
"Egress Update QID %d TXQ=NULL\n", qid);
447
txq = container_of(tq, struct sge_eth_txq, q);
448
if (unlikely(tq->abs_id != qid)) {
449
dev_err(adapter->pdev_dev,
450
"Egress Update QID %d refers to TXQ %d\n",
456
* Restart a stopped TX Queue which has less than half of its
460
netif_tx_wake_queue(txq->txq);
465
dev_err(adapter->pdev_dev,
466
"unexpected CPL %#x on FW event queue\n", opcode);
473
* Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
474
* to use and initializes them. We support multiple "Queue Sets" per port if
475
* we have MSI-X, otherwise just one queue set per port.
477
static int setup_sge_queues(struct adapter *adapter)
479
struct sge *s = &adapter->sge;
483
* Clear "Queue Set" Free List Starving and TX Queue Mapping Error
486
bitmap_zero(s->starving_fl, MAX_EGRQ);
489
* If we're using MSI interrupt mode we need to set up a "forwarded
490
* interrupt" queue which we'll set up with our MSI vector. The rest
491
* of the ingress queues will be set up to forward their interrupts to
492
* this queue ... This must be first since t4vf_sge_alloc_rxq() uses
493
* the intrq's queue ID as the interrupt forwarding queue for the
494
* subsequent calls ...
496
if (adapter->flags & USING_MSI) {
497
err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
498
adapter->port[0], 0, NULL, NULL);
500
goto err_free_queues;
504
* Allocate our ingress queue for asynchronous firmware messages.
506
err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
507
MSIX_FW, NULL, fwevtq_handler);
509
goto err_free_queues;
512
* Allocate each "port"'s initial Queue Sets. These can be changed
513
* later on ... up to the point where any interface on the adapter is
514
* brought up at which point lots of things get nailed down
518
for_each_port(adapter, pidx) {
519
struct net_device *dev = adapter->port[pidx];
520
struct port_info *pi = netdev_priv(dev);
521
struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
522
struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
525
for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
526
err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
528
&rxq->fl, t4vf_ethrx_handler);
530
goto err_free_queues;
532
err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
533
netdev_get_tx_queue(dev, qs),
534
s->fw_evtq.cntxt_id);
536
goto err_free_queues;
539
memset(&rxq->stats, 0, sizeof(rxq->stats));
544
* Create the reverse mappings for the queues.
546
s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
547
s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
548
IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
549
for_each_port(adapter, pidx) {
550
struct net_device *dev = adapter->port[pidx];
551
struct port_info *pi = netdev_priv(dev);
552
struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
553
struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
556
for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
557
IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
558
EQ_MAP(s, txq->q.abs_id) = &txq->q;
561
* The FW_IQ_CMD doesn't return the Absolute Queue IDs
562
* for Free Lists but since all of the Egress Queues
563
* (including Free Lists) have Relative Queue IDs
564
* which are computed as Absolute - Base Queue ID, we
565
* can synthesize the Absolute Queue IDs for the Free
566
* Lists. This is useful for debugging purposes when
567
* we want to dump Queue Contexts via the PF Driver.
569
rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
570
EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
576
t4vf_free_sge_resources(adapter);
581
* Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
582
* queues. We configure the RSS CPU lookup table to distribute to the number
583
* of HW receive queues, and the response queue lookup table to narrow that
584
* down to the response queues actually configured for each "port" (Virtual
585
* Interface). We always configure the RSS mapping for all ports since the
586
* mapping table has plenty of entries.
588
static int setup_rss(struct adapter *adapter)
592
for_each_port(adapter, pidx) {
593
struct port_info *pi = adap2pinfo(adapter, pidx);
594
struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
595
u16 rss[MAX_PORT_QSETS];
598
for (qs = 0; qs < pi->nqsets; qs++)
599
rss[qs] = rxq[qs].rspq.abs_id;
601
err = t4vf_config_rss_range(adapter, pi->viid,
602
0, pi->rss_size, rss, pi->nqsets);
607
* Perform Global RSS Mode-specific initialization.
609
switch (adapter->params.rss.mode) {
610
case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
612
* If Tunnel All Lookup isn't specified in the global
613
* RSS Configuration, then we need to specify a
614
* default Ingress Queue for any ingress packets which
615
* aren't hashed. We'll use our first ingress queue
618
if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
619
union rss_vi_config config;
620
err = t4vf_read_rss_vi_config(adapter,
625
config.basicvirtual.defaultq =
627
err = t4vf_write_rss_vi_config(adapter,
641
* Bring the adapter up. Called whenever we go from no "ports" open to having
642
* one open. This function performs the actions necessary to make an adapter
643
* operational, such as completing the initialization of HW modules, and
644
* enabling interrupts. Must be called with the rtnl lock held. (Note that
645
* this is called "cxgb_up" in the PF Driver.)
647
static int adapter_up(struct adapter *adapter)
652
* If this is the first time we've been called, perform basic
653
* adapter setup. Once we've done this, many of our adapter
654
* parameters can no longer be changed ...
656
if ((adapter->flags & FULL_INIT_DONE) == 0) {
657
err = setup_sge_queues(adapter);
660
err = setup_rss(adapter);
662
t4vf_free_sge_resources(adapter);
666
if (adapter->flags & USING_MSIX)
667
name_msix_vecs(adapter);
668
adapter->flags |= FULL_INIT_DONE;
672
* Acquire our interrupt resources. We only support MSI-X and MSI.
674
BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
675
if (adapter->flags & USING_MSIX)
676
err = request_msix_queue_irqs(adapter);
678
err = request_irq(adapter->pdev->irq,
679
t4vf_intr_handler(adapter), 0,
680
adapter->name, adapter);
682
dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
688
* Enable NAPI ingress processing and return success.
691
t4vf_sge_start(adapter);
696
* Bring the adapter down. Called whenever the last "port" (Virtual
697
* Interface) closed. (Note that this routine is called "cxgb_down" in the PF
700
static void adapter_down(struct adapter *adapter)
703
* Free interrupt resources.
705
if (adapter->flags & USING_MSIX)
706
free_msix_queue_irqs(adapter);
708
free_irq(adapter->pdev->irq, adapter);
711
* Wait for NAPI handlers to finish.
717
* Start up a net device.
719
static int cxgb4vf_open(struct net_device *dev)
722
struct port_info *pi = netdev_priv(dev);
723
struct adapter *adapter = pi->adapter;
726
* If this is the first interface that we're opening on the "adapter",
727
* bring the "adapter" up now.
729
if (adapter->open_device_map == 0) {
730
err = adapter_up(adapter);
736
* Note that this interface is up and start everything up ...
738
netif_set_real_num_tx_queues(dev, pi->nqsets);
739
err = netif_set_real_num_rx_queues(dev, pi->nqsets);
742
err = link_start(dev);
746
netif_tx_start_all_queues(dev);
747
set_bit(pi->port_id, &adapter->open_device_map);
751
if (adapter->open_device_map == 0)
752
adapter_down(adapter);
757
* Shut down a net device. This routine is called "cxgb_close" in the PF
760
static int cxgb4vf_stop(struct net_device *dev)
762
struct port_info *pi = netdev_priv(dev);
763
struct adapter *adapter = pi->adapter;
765
netif_tx_stop_all_queues(dev);
766
netif_carrier_off(dev);
767
t4vf_enable_vi(adapter, pi->viid, false, false);
768
pi->link_cfg.link_ok = 0;
770
clear_bit(pi->port_id, &adapter->open_device_map);
771
if (adapter->open_device_map == 0)
772
adapter_down(adapter);
777
* Translate our basic statistics into the standard "ifconfig" statistics.
779
static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
781
struct t4vf_port_stats stats;
782
struct port_info *pi = netdev2pinfo(dev);
783
struct adapter *adapter = pi->adapter;
784
struct net_device_stats *ns = &dev->stats;
787
spin_lock(&adapter->stats_lock);
788
err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
789
spin_unlock(&adapter->stats_lock);
791
memset(ns, 0, sizeof(*ns));
795
ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
796
stats.tx_ucast_bytes + stats.tx_offload_bytes);
797
ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
798
stats.tx_ucast_frames + stats.tx_offload_frames);
799
ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
800
stats.rx_ucast_bytes);
801
ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
802
stats.rx_ucast_frames);
803
ns->multicast = stats.rx_mcast_frames;
804
ns->tx_errors = stats.tx_drop_frames;
805
ns->rx_errors = stats.rx_err_frames;
811
* Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
812
* at a specified offset within the list, into an array of addrss pointers and
813
* return the number collected.
815
static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
818
unsigned int maxaddrs)
820
unsigned int index = 0;
821
unsigned int naddr = 0;
822
const struct netdev_hw_addr *ha;
824
for_each_dev_addr(dev, ha)
825
if (index++ >= offset) {
826
addr[naddr++] = ha->addr;
827
if (naddr >= maxaddrs)
834
* Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
835
* at a specified offset within the list, into an array of addrss pointers and
836
* return the number collected.
838
static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
841
unsigned int maxaddrs)
843
unsigned int index = 0;
844
unsigned int naddr = 0;
845
const struct netdev_hw_addr *ha;
847
netdev_for_each_mc_addr(ha, dev)
848
if (index++ >= offset) {
849
addr[naddr++] = ha->addr;
850
if (naddr >= maxaddrs)
857
* Configure the exact and hash address filters to handle a port's multicast
858
* and secondary unicast MAC addresses.
860
static int set_addr_filters(const struct net_device *dev, bool sleep)
865
unsigned int offset, naddr;
868
const struct port_info *pi = netdev_priv(dev);
870
/* first do the secondary unicast addresses */
871
for (offset = 0; ; offset += naddr) {
872
naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
877
ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
878
naddr, addr, NULL, &uhash, sleep);
885
/* next set up the multicast addresses */
886
for (offset = 0; ; offset += naddr) {
887
naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
892
ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
893
naddr, addr, NULL, &mhash, sleep);
899
return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
900
uhash | mhash, sleep);
904
* Set RX properties of a port, such as promiscruity, address filters, and MTU.
905
* If @mtu is -1 it is left unchanged.
907
static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
910
struct port_info *pi = netdev_priv(dev);
912
ret = set_addr_filters(dev, sleep_ok);
914
ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1,
915
(dev->flags & IFF_PROMISC) != 0,
916
(dev->flags & IFF_ALLMULTI) != 0,
922
* Set the current receive modes on the device.
924
static void cxgb4vf_set_rxmode(struct net_device *dev)
926
/* unfortunately we can't return errors to the stack */
927
set_rxmode(dev, -1, false);
931
* Find the entry in the interrupt holdoff timer value array which comes
932
* closest to the specified interrupt holdoff value.
934
static int closest_timer(const struct sge *s, int us)
936
int i, timer_idx = 0, min_delta = INT_MAX;
938
for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
939
int delta = us - s->timer_val[i];
942
if (delta < min_delta) {
950
static int closest_thres(const struct sge *s, int thres)
952
int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
954
for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
955
delta = thres - s->counter_val[i];
958
if (delta < min_delta) {
967
* Return a queue's interrupt hold-off time in us. 0 means no timer.
969
static unsigned int qtimer_val(const struct adapter *adapter,
970
const struct sge_rspq *rspq)
972
unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params);
974
return timer_idx < SGE_NTIMERS
975
? adapter->sge.timer_val[timer_idx]
980
* set_rxq_intr_params - set a queue's interrupt holdoff parameters
981
* @adapter: the adapter
982
* @rspq: the RX response queue
983
* @us: the hold-off time in us, or 0 to disable timer
984
* @cnt: the hold-off packet count, or 0 to disable counter
986
* Sets an RX response queue's interrupt hold-off time and packet count.
987
* At least one of the two needs to be enabled for the queue to generate
990
static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
991
unsigned int us, unsigned int cnt)
993
unsigned int timer_idx;
996
* If both the interrupt holdoff timer and count are specified as
997
* zero, default to a holdoff count of 1 ...
1003
* If an interrupt holdoff count has been specified, then find the
1004
* closest configured holdoff count and use that. If the response
1005
* queue has already been created, then update its queue context
1012
pktcnt_idx = closest_thres(&adapter->sge, cnt);
1013
if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1014
v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1016
FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1017
FW_PARAMS_PARAM_YZ(rspq->cntxt_id);
1018
err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1022
rspq->pktcnt_idx = pktcnt_idx;
1026
* Compute the closest holdoff timer index from the supplied holdoff
1029
timer_idx = (us == 0
1030
? SGE_TIMER_RSTRT_CNTR
1031
: closest_timer(&adapter->sge, us));
1034
* Update the response queue's interrupt coalescing parameters and
1037
rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
1038
(cnt > 0 ? QINTR_CNT_EN : 0));
1043
* Return a version number to identify the type of adapter. The scheme is:
1044
* - bits 0..9: chip version
1045
* - bits 10..15: chip revision
1047
static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1050
* Chip version 4, revision 0x3f (cxgb4vf).
1052
return 4 | (0x3f << 10);
1056
* Execute the specified ioctl command.
1058
static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1064
* The VF Driver doesn't have access to any of the other
1065
* common Ethernet device ioctl()'s (like reading/writing
1066
* PHY registers, etc.
1077
* Change the device's MTU.
1079
static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1082
struct port_info *pi = netdev_priv(dev);
1084
/* accommodate SACK */
1088
ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1089
-1, -1, -1, -1, true);
1095
static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
1098
* Since there is no support for separate rx/tx vlan accel
1099
* enable/disable make sure tx flag is always in same state as rx.
1101
if (features & NETIF_F_HW_VLAN_RX)
1102
features |= NETIF_F_HW_VLAN_TX;
1104
features &= ~NETIF_F_HW_VLAN_TX;
1109
static int cxgb4vf_set_features(struct net_device *dev, u32 features)
1111
struct port_info *pi = netdev_priv(dev);
1112
u32 changed = dev->features ^ features;
1114
if (changed & NETIF_F_HW_VLAN_RX)
1115
t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1116
features & NETIF_F_HW_VLAN_TX, 0);
1122
* Change the devices MAC address.
1124
static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1127
struct sockaddr *addr = _addr;
1128
struct port_info *pi = netdev_priv(dev);
1130
if (!is_valid_ether_addr(addr->sa_data))
1133
ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
1134
addr->sa_data, true);
1138
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1139
pi->xact_addr_filt = ret;
1143
#ifdef CONFIG_NET_POLL_CONTROLLER
1145
* Poll all of our receive queues. This is called outside of normal interrupt
1148
static void cxgb4vf_poll_controller(struct net_device *dev)
1150
struct port_info *pi = netdev_priv(dev);
1151
struct adapter *adapter = pi->adapter;
1153
if (adapter->flags & USING_MSIX) {
1154
struct sge_eth_rxq *rxq;
1157
rxq = &adapter->sge.ethrxq[pi->first_qset];
1158
for (nqsets = pi->nqsets; nqsets; nqsets--) {
1159
t4vf_sge_intr_msix(0, &rxq->rspq);
1163
t4vf_intr_handler(adapter)(0, adapter);
1168
* Ethtool operations.
1169
* ===================
1171
* Note that we don't support any ethtool operations which change the physical
1172
* state of the port to which we're linked.
1176
* Return current port link settings.
1178
static int cxgb4vf_get_settings(struct net_device *dev,
1179
struct ethtool_cmd *cmd)
1181
const struct port_info *pi = netdev_priv(dev);
1183
cmd->supported = pi->link_cfg.supported;
1184
cmd->advertising = pi->link_cfg.advertising;
1185
ethtool_cmd_speed_set(cmd,
1186
netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
1187
cmd->duplex = DUPLEX_FULL;
1189
cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1190
cmd->phy_address = pi->port_id;
1191
cmd->transceiver = XCVR_EXTERNAL;
1192
cmd->autoneg = pi->link_cfg.autoneg;
1199
* Return our driver information.
1201
static void cxgb4vf_get_drvinfo(struct net_device *dev,
1202
struct ethtool_drvinfo *drvinfo)
1204
struct adapter *adapter = netdev2adap(dev);
1206
strcpy(drvinfo->driver, KBUILD_MODNAME);
1207
strcpy(drvinfo->version, DRV_VERSION);
1208
strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)));
1209
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1210
"%u.%u.%u.%u, TP %u.%u.%u.%u",
1211
FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
1212
FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev),
1213
FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev),
1214
FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev),
1215
FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev),
1216
FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev),
1217
FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev),
1218
FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev));
1222
* Return current adapter message level.
1224
static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1226
return netdev2adap(dev)->msg_enable;
1230
* Set current adapter message level.
1232
static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1234
netdev2adap(dev)->msg_enable = msglevel;
1238
* Return the device's current Queue Set ring size parameters along with the
1239
* allowed maximum values. Since ethtool doesn't understand the concept of
1240
* multi-queue devices, we just return the current values associated with the
1243
static void cxgb4vf_get_ringparam(struct net_device *dev,
1244
struct ethtool_ringparam *rp)
1246
const struct port_info *pi = netdev_priv(dev);
1247
const struct sge *s = &pi->adapter->sge;
1249
rp->rx_max_pending = MAX_RX_BUFFERS;
1250
rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1251
rp->rx_jumbo_max_pending = 0;
1252
rp->tx_max_pending = MAX_TXQ_ENTRIES;
1254
rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1255
rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1256
rp->rx_jumbo_pending = 0;
1257
rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1261
* Set the Queue Set ring size parameters for the device. Again, since
1262
* ethtool doesn't allow for the concept of multiple queues per device, we'll
1263
* apply these new values across all of the Queue Sets associated with the
1264
* device -- after vetting them of course!
1266
static int cxgb4vf_set_ringparam(struct net_device *dev,
1267
struct ethtool_ringparam *rp)
1269
const struct port_info *pi = netdev_priv(dev);
1270
struct adapter *adapter = pi->adapter;
1271
struct sge *s = &adapter->sge;
1274
if (rp->rx_pending > MAX_RX_BUFFERS ||
1275
rp->rx_jumbo_pending ||
1276
rp->tx_pending > MAX_TXQ_ENTRIES ||
1277
rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1278
rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1279
rp->rx_pending < MIN_FL_ENTRIES ||
1280
rp->tx_pending < MIN_TXQ_ENTRIES)
1283
if (adapter->flags & FULL_INIT_DONE)
1286
for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1287
s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1288
s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1289
s->ethtxq[qs].q.size = rp->tx_pending;
1295
* Return the interrupt holdoff timer and count for the first Queue Set on the
1296
* device. Our extension ioctl() (the cxgbtool interface) allows the
1297
* interrupt holdoff timer to be read on all of the device's Queue Sets.
1299
static int cxgb4vf_get_coalesce(struct net_device *dev,
1300
struct ethtool_coalesce *coalesce)
1302
const struct port_info *pi = netdev_priv(dev);
1303
const struct adapter *adapter = pi->adapter;
1304
const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1306
coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1307
coalesce->rx_max_coalesced_frames =
1308
((rspq->intr_params & QINTR_CNT_EN)
1309
? adapter->sge.counter_val[rspq->pktcnt_idx]
1315
* Set the RX interrupt holdoff timer and count for the first Queue Set on the
1316
* interface. Our extension ioctl() (the cxgbtool interface) allows us to set
1317
* the interrupt holdoff timer on any of the device's Queue Sets.
1319
static int cxgb4vf_set_coalesce(struct net_device *dev,
1320
struct ethtool_coalesce *coalesce)
1322
const struct port_info *pi = netdev_priv(dev);
1323
struct adapter *adapter = pi->adapter;
1325
return set_rxq_intr_params(adapter,
1326
&adapter->sge.ethrxq[pi->first_qset].rspq,
1327
coalesce->rx_coalesce_usecs,
1328
coalesce->rx_max_coalesced_frames);
1332
* Report current port link pause parameter settings.
1334
static void cxgb4vf_get_pauseparam(struct net_device *dev,
1335
struct ethtool_pauseparam *pauseparam)
1337
struct port_info *pi = netdev_priv(dev);
1339
pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1340
pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
1341
pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
1345
* Identify the port by blinking the port's LED.
1347
static int cxgb4vf_phys_id(struct net_device *dev,
1348
enum ethtool_phys_id_state state)
1351
struct port_info *pi = netdev_priv(dev);
1353
if (state == ETHTOOL_ID_ACTIVE)
1355
else if (state == ETHTOOL_ID_INACTIVE)
1360
return t4vf_identify_port(pi->adapter, pi->viid, val);
1364
* Port stats maintained per queue of the port.
1366
struct queue_port_stats {
1377
* Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
1378
* these need to match the order of statistics returned by
1379
* t4vf_get_port_stats().
1381
static const char stats_strings[][ETH_GSTRING_LEN] = {
1383
* These must match the layout of the t4vf_port_stats structure.
1385
"TxBroadcastBytes ",
1386
"TxBroadcastFrames ",
1387
"TxMulticastBytes ",
1388
"TxMulticastFrames ",
1394
"RxBroadcastBytes ",
1395
"RxBroadcastFrames ",
1396
"RxMulticastBytes ",
1397
"RxMulticastFrames ",
1403
* These are accumulated per-queue statistics and must match the
1404
* order of the fields in the queue_port_stats structure.
1416
* Return the number of statistics in the specified statistics set.
1418
static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1422
return ARRAY_SIZE(stats_strings);
1430
* Return the strings for the specified statistics set.
1432
static void cxgb4vf_get_strings(struct net_device *dev,
1438
memcpy(data, stats_strings, sizeof(stats_strings));
1444
* Small utility routine to accumulate queue statistics across the queues of
1447
static void collect_sge_port_stats(const struct adapter *adapter,
1448
const struct port_info *pi,
1449
struct queue_port_stats *stats)
1451
const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1452
const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1455
memset(stats, 0, sizeof(*stats));
1456
for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1457
stats->tso += txq->tso;
1458
stats->tx_csum += txq->tx_cso;
1459
stats->rx_csum += rxq->stats.rx_cso;
1460
stats->vlan_ex += rxq->stats.vlan_ex;
1461
stats->vlan_ins += txq->vlan_ins;
1462
stats->lro_pkts += rxq->stats.lro_pkts;
1463
stats->lro_merged += rxq->stats.lro_merged;
1468
* Return the ETH_SS_STATS statistics set.
1470
static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1471
struct ethtool_stats *stats,
1474
struct port_info *pi = netdev2pinfo(dev);
1475
struct adapter *adapter = pi->adapter;
1476
int err = t4vf_get_port_stats(adapter, pi->pidx,
1477
(struct t4vf_port_stats *)data);
1479
memset(data, 0, sizeof(struct t4vf_port_stats));
1481
data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1482
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1486
* Return the size of our register map.
1488
static int cxgb4vf_get_regs_len(struct net_device *dev)
1490
return T4VF_REGMAP_SIZE;
1494
* Dump a block of registers, start to end inclusive, into a buffer.
1496
static void reg_block_dump(struct adapter *adapter, void *regbuf,
1497
unsigned int start, unsigned int end)
1499
u32 *bp = regbuf + start - T4VF_REGMAP_START;
1501
for ( ; start <= end; start += sizeof(u32)) {
1503
* Avoid reading the Mailbox Control register since that
1504
* can trigger a Mailbox Ownership Arbitration cycle and
1505
* interfere with communication with the firmware.
1507
if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1510
*bp++ = t4_read_reg(adapter, start);
1515
* Copy our entire register map into the provided buffer.
1517
static void cxgb4vf_get_regs(struct net_device *dev,
1518
struct ethtool_regs *regs,
1521
struct adapter *adapter = netdev2adap(dev);
1523
regs->version = mk_adap_vers(adapter);
1526
* Fill in register buffer with our register map.
1528
memset(regbuf, 0, T4VF_REGMAP_SIZE);
1530
reg_block_dump(adapter, regbuf,
1531
T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1532
T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1533
reg_block_dump(adapter, regbuf,
1534
T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1535
T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1536
reg_block_dump(adapter, regbuf,
1537
T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1538
T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST);
1539
reg_block_dump(adapter, regbuf,
1540
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1541
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1543
reg_block_dump(adapter, regbuf,
1544
T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1545
T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1549
* Report current Wake On LAN settings.
1551
static void cxgb4vf_get_wol(struct net_device *dev,
1552
struct ethtool_wolinfo *wol)
1556
memset(&wol->sopass, 0, sizeof(wol->sopass));
1560
* TCP Segmentation Offload flags which we support.
1562
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1564
static struct ethtool_ops cxgb4vf_ethtool_ops = {
1565
.get_settings = cxgb4vf_get_settings,
1566
.get_drvinfo = cxgb4vf_get_drvinfo,
1567
.get_msglevel = cxgb4vf_get_msglevel,
1568
.set_msglevel = cxgb4vf_set_msglevel,
1569
.get_ringparam = cxgb4vf_get_ringparam,
1570
.set_ringparam = cxgb4vf_set_ringparam,
1571
.get_coalesce = cxgb4vf_get_coalesce,
1572
.set_coalesce = cxgb4vf_set_coalesce,
1573
.get_pauseparam = cxgb4vf_get_pauseparam,
1574
.get_link = ethtool_op_get_link,
1575
.get_strings = cxgb4vf_get_strings,
1576
.set_phys_id = cxgb4vf_phys_id,
1577
.get_sset_count = cxgb4vf_get_sset_count,
1578
.get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1579
.get_regs_len = cxgb4vf_get_regs_len,
1580
.get_regs = cxgb4vf_get_regs,
1581
.get_wol = cxgb4vf_get_wol,
1585
* /sys/kernel/debug/cxgb4vf support code and data.
1586
* ================================================
1590
* Show SGE Queue Set information. We display QPL Queues Sets per line.
1594
static int sge_qinfo_show(struct seq_file *seq, void *v)
1596
struct adapter *adapter = seq->private;
1597
int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1598
int qs, r = (uintptr_t)v - 1;
1601
seq_putc(seq, '\n');
1603
#define S3(fmt_spec, s, v) \
1605
seq_printf(seq, "%-12s", s); \
1606
for (qs = 0; qs < n; ++qs) \
1607
seq_printf(seq, " %16" fmt_spec, v); \
1608
seq_putc(seq, '\n'); \
1610
#define S(s, v) S3("s", s, v)
1611
#define T(s, v) S3("u", s, txq[qs].v)
1612
#define R(s, v) S3("u", s, rxq[qs].v)
1614
if (r < eth_entries) {
1615
const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1616
const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1617
int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1619
S("QType:", "Ethernet");
1621
(rxq[qs].rspq.netdev
1622
? rxq[qs].rspq.netdev->name
1625
(rxq[qs].rspq.netdev
1626
? ((struct port_info *)
1627
netdev_priv(rxq[qs].rspq.netdev))->port_id
1629
T("TxQ ID:", q.abs_id);
1630
T("TxQ size:", q.size);
1631
T("TxQ inuse:", q.in_use);
1632
T("TxQ PIdx:", q.pidx);
1633
T("TxQ CIdx:", q.cidx);
1634
R("RspQ ID:", rspq.abs_id);
1635
R("RspQ size:", rspq.size);
1636
R("RspQE size:", rspq.iqe_len);
1637
S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
1638
S3("u", "Intr pktcnt:",
1639
adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
1640
R("RspQ CIdx:", rspq.cidx);
1641
R("RspQ Gen:", rspq.gen);
1642
R("FL ID:", fl.abs_id);
1643
R("FL size:", fl.size - MIN_FL_RESID);
1644
R("FL avail:", fl.avail);
1645
R("FL PIdx:", fl.pidx);
1646
R("FL CIdx:", fl.cidx);
1652
const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1654
seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
1655
seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
1656
seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1657
qtimer_val(adapter, evtq));
1658
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1659
adapter->sge.counter_val[evtq->pktcnt_idx]);
1660
seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
1661
seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
1662
} else if (r == 1) {
1663
const struct sge_rspq *intrq = &adapter->sge.intrq;
1665
seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
1666
seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
1667
seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1668
qtimer_val(adapter, intrq));
1669
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1670
adapter->sge.counter_val[intrq->pktcnt_idx]);
1671
seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
1672
seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
1684
* Return the number of "entries" in our "file". We group the multi-Queue
1685
* sections with QPL Queue Sets per "entry". The sections of the output are:
1687
* Ethernet RX/TX Queue Sets
1688
* Firmware Event Queue
1689
* Forwarded Interrupt Queue (if in MSI mode)
1691
static int sge_queue_entries(const struct adapter *adapter)
1693
return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1694
((adapter->flags & USING_MSI) != 0);
1697
static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
1699
int entries = sge_queue_entries(seq->private);
1701
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1704
static void sge_queue_stop(struct seq_file *seq, void *v)
1708
static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
1710
int entries = sge_queue_entries(seq->private);
1713
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1716
static const struct seq_operations sge_qinfo_seq_ops = {
1717
.start = sge_queue_start,
1718
.next = sge_queue_next,
1719
.stop = sge_queue_stop,
1720
.show = sge_qinfo_show
1723
static int sge_qinfo_open(struct inode *inode, struct file *file)
1725
int res = seq_open(file, &sge_qinfo_seq_ops);
1728
struct seq_file *seq = file->private_data;
1729
seq->private = inode->i_private;
1734
static const struct file_operations sge_qinfo_debugfs_fops = {
1735
.owner = THIS_MODULE,
1736
.open = sge_qinfo_open,
1738
.llseek = seq_lseek,
1739
.release = seq_release,
1743
* Show SGE Queue Set statistics. We display QPL Queues Sets per line.
1747
static int sge_qstats_show(struct seq_file *seq, void *v)
1749
struct adapter *adapter = seq->private;
1750
int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1751
int qs, r = (uintptr_t)v - 1;
1754
seq_putc(seq, '\n');
1756
#define S3(fmt, s, v) \
1758
seq_printf(seq, "%-16s", s); \
1759
for (qs = 0; qs < n; ++qs) \
1760
seq_printf(seq, " %8" fmt, v); \
1761
seq_putc(seq, '\n'); \
1763
#define S(s, v) S3("s", s, v)
1765
#define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
1766
#define T(s, v) T3("lu", s, v)
1768
#define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
1769
#define R(s, v) R3("lu", s, v)
1771
if (r < eth_entries) {
1772
const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1773
const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1774
int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1776
S("QType:", "Ethernet");
1778
(rxq[qs].rspq.netdev
1779
? rxq[qs].rspq.netdev->name
1781
R3("u", "RspQNullInts:", rspq.unhandled_irqs);
1782
R("RxPackets:", stats.pkts);
1783
R("RxCSO:", stats.rx_cso);
1784
R("VLANxtract:", stats.vlan_ex);
1785
R("LROmerged:", stats.lro_merged);
1786
R("LROpackets:", stats.lro_pkts);
1787
R("RxDrops:", stats.rx_drops);
1789
T("TxCSO:", tx_cso);
1790
T("VLANins:", vlan_ins);
1791
T("TxQFull:", q.stops);
1792
T("TxQRestarts:", q.restarts);
1793
T("TxMapErr:", mapping_err);
1794
R("FLAllocErr:", fl.alloc_failed);
1795
R("FLLrgAlcErr:", fl.large_alloc_failed);
1796
R("FLStarving:", fl.starving);
1802
const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1804
seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
1805
seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
1806
evtq->unhandled_irqs);
1807
seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
1808
seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
1809
} else if (r == 1) {
1810
const struct sge_rspq *intrq = &adapter->sge.intrq;
1812
seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
1813
seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
1814
intrq->unhandled_irqs);
1815
seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
1816
seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
1830
* Return the number of "entries" in our "file". We group the multi-Queue
1831
* sections with QPL Queue Sets per "entry". The sections of the output are:
1833
* Ethernet RX/TX Queue Sets
1834
* Firmware Event Queue
1835
* Forwarded Interrupt Queue (if in MSI mode)
1837
static int sge_qstats_entries(const struct adapter *adapter)
1839
return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1840
((adapter->flags & USING_MSI) != 0);
1843
static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
1845
int entries = sge_qstats_entries(seq->private);
1847
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1850
static void sge_qstats_stop(struct seq_file *seq, void *v)
1854
static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
1856
int entries = sge_qstats_entries(seq->private);
1859
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1862
static const struct seq_operations sge_qstats_seq_ops = {
1863
.start = sge_qstats_start,
1864
.next = sge_qstats_next,
1865
.stop = sge_qstats_stop,
1866
.show = sge_qstats_show
1869
static int sge_qstats_open(struct inode *inode, struct file *file)
1871
int res = seq_open(file, &sge_qstats_seq_ops);
1874
struct seq_file *seq = file->private_data;
1875
seq->private = inode->i_private;
1880
static const struct file_operations sge_qstats_proc_fops = {
1881
.owner = THIS_MODULE,
1882
.open = sge_qstats_open,
1884
.llseek = seq_lseek,
1885
.release = seq_release,
1889
* Show PCI-E SR-IOV Virtual Function Resource Limits.
1891
static int resources_show(struct seq_file *seq, void *v)
1893
struct adapter *adapter = seq->private;
1894
struct vf_resources *vfres = &adapter->params.vfres;
1896
#define S(desc, fmt, var) \
1897
seq_printf(seq, "%-60s " fmt "\n", \
1898
desc " (" #var "):", vfres->var)
1900
S("Virtual Interfaces", "%d", nvi);
1901
S("Egress Queues", "%d", neq);
1902
S("Ethernet Control", "%d", nethctrl);
1903
S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
1904
S("Ingress Queues", "%d", niq);
1905
S("Traffic Class", "%d", tc);
1906
S("Port Access Rights Mask", "%#x", pmask);
1907
S("MAC Address Filters", "%d", nexactf);
1908
S("Firmware Command Read Capabilities", "%#x", r_caps);
1909
S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
1916
static int resources_open(struct inode *inode, struct file *file)
1918
return single_open(file, resources_show, inode->i_private);
1921
static const struct file_operations resources_proc_fops = {
1922
.owner = THIS_MODULE,
1923
.open = resources_open,
1925
.llseek = seq_lseek,
1926
.release = single_release,
1930
* Show Virtual Interfaces.
1932
static int interfaces_show(struct seq_file *seq, void *v)
1934
if (v == SEQ_START_TOKEN) {
1935
seq_puts(seq, "Interface Port VIID\n");
1937
struct adapter *adapter = seq->private;
1938
int pidx = (uintptr_t)v - 2;
1939
struct net_device *dev = adapter->port[pidx];
1940
struct port_info *pi = netdev_priv(dev);
1942
seq_printf(seq, "%9s %4d %#5x\n",
1943
dev->name, pi->port_id, pi->viid);
1948
static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
1950
return pos <= adapter->params.nports
1951
? (void *)(uintptr_t)(pos + 1)
1955
static void *interfaces_start(struct seq_file *seq, loff_t *pos)
1958
? interfaces_get_idx(seq->private, *pos)
1962
static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
1965
return interfaces_get_idx(seq->private, *pos);
1968
static void interfaces_stop(struct seq_file *seq, void *v)
1972
static const struct seq_operations interfaces_seq_ops = {
1973
.start = interfaces_start,
1974
.next = interfaces_next,
1975
.stop = interfaces_stop,
1976
.show = interfaces_show
1979
static int interfaces_open(struct inode *inode, struct file *file)
1981
int res = seq_open(file, &interfaces_seq_ops);
1984
struct seq_file *seq = file->private_data;
1985
seq->private = inode->i_private;
1990
static const struct file_operations interfaces_proc_fops = {
1991
.owner = THIS_MODULE,
1992
.open = interfaces_open,
1994
.llseek = seq_lseek,
1995
.release = seq_release,
1999
* /sys/kernel/debugfs/cxgb4vf/ files list.
2001
struct cxgb4vf_debugfs_entry {
2002
const char *name; /* name of debugfs node */
2003
mode_t mode; /* file system mode */
2004
const struct file_operations *fops;
2007
static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2008
{ "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
2009
{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
2010
{ "resources", S_IRUGO, &resources_proc_fops },
2011
{ "interfaces", S_IRUGO, &interfaces_proc_fops },
2015
* Module and device initialization and cleanup code.
2016
* ==================================================
2020
* Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2021
* directory (debugfs_root) has already been set up.
2023
static int __devinit setup_debugfs(struct adapter *adapter)
2027
BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2030
* Debugfs support is best effort.
2032
for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2033
(void)debugfs_create_file(debugfs_files[i].name,
2034
debugfs_files[i].mode,
2035
adapter->debugfs_root,
2037
debugfs_files[i].fops);
2043
* Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2044
* it to our caller to tear down the directory (debugfs_root).
2046
static void cleanup_debugfs(struct adapter *adapter)
2048
BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2051
* Unlike our sister routine cleanup_proc(), we don't need to remove
2052
* individual entries because a call will be made to
2053
* debugfs_remove_recursive(). We just need to clean up any ancillary
2060
* Perform early "adapter" initialization. This is where we discover what
2061
* adapter parameters we're going to be using and initialize basic adapter
2064
static int __devinit adap_init0(struct adapter *adapter)
2066
struct vf_resources *vfres = &adapter->params.vfres;
2067
struct sge_params *sge_params = &adapter->params.sge;
2068
struct sge *s = &adapter->sge;
2069
unsigned int ethqsets;
2073
* Wait for the device to become ready before proceeding ...
2075
err = t4vf_wait_dev_ready(adapter);
2077
dev_err(adapter->pdev_dev, "device didn't become ready:"
2083
* Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2084
* 2.6.31 and later we can't call pci_reset_function() in order to
2085
* issue an FLR because of a self- deadlock on the device semaphore.
2086
* Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2087
* cases where they're needed -- for instance, some versions of KVM
2088
* fail to reset "Assigned Devices" when the VM reboots. Therefore we
2089
* use the firmware based reset in order to reset any per function
2092
err = t4vf_fw_reset(adapter);
2094
dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2099
* Grab basic operational parameters. These will predominantly have
2100
* been set up by the Physical Function Driver or will be hard coded
2101
* into the adapter. We just have to live with them ... Note that
2102
* we _must_ get our VPD parameters before our SGE parameters because
2103
* we need to know the adapter's core clock from the VPD in order to
2104
* properly decode the SGE Timer Values.
2106
err = t4vf_get_dev_params(adapter);
2108
dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2109
" device parameters: err=%d\n", err);
2112
err = t4vf_get_vpd_params(adapter);
2114
dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2115
" VPD parameters: err=%d\n", err);
2118
err = t4vf_get_sge_params(adapter);
2120
dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2121
" SGE parameters: err=%d\n", err);
2124
err = t4vf_get_rss_glb_config(adapter);
2126
dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2127
" RSS parameters: err=%d\n", err);
2130
if (adapter->params.rss.mode !=
2131
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2132
dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2133
" mode %d\n", adapter->params.rss.mode);
2136
err = t4vf_sge_init(adapter);
2138
dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2144
* Retrieve our RX interrupt holdoff timer values and counter
2145
* threshold values from the SGE parameters.
2147
s->timer_val[0] = core_ticks_to_us(adapter,
2148
TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1));
2149
s->timer_val[1] = core_ticks_to_us(adapter,
2150
TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1));
2151
s->timer_val[2] = core_ticks_to_us(adapter,
2152
TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3));
2153
s->timer_val[3] = core_ticks_to_us(adapter,
2154
TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3));
2155
s->timer_val[4] = core_ticks_to_us(adapter,
2156
TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5));
2157
s->timer_val[5] = core_ticks_to_us(adapter,
2158
TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
2161
THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold);
2163
THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold);
2165
THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
2167
THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
2170
* Grab our Virtual Interface resource allocation, extract the
2171
* features that we're interested in and do a bit of sanity testing on
2174
err = t4vf_get_vfres(adapter);
2176
dev_err(adapter->pdev_dev, "unable to get virtual interface"
2177
" resources: err=%d\n", err);
2182
* The number of "ports" which we support is equal to the number of
2183
* Virtual Interfaces with which we've been provisioned.
2185
adapter->params.nports = vfres->nvi;
2186
if (adapter->params.nports > MAX_NPORTS) {
2187
dev_warn(adapter->pdev_dev, "only using %d of %d allowed"
2188
" virtual interfaces\n", MAX_NPORTS,
2189
adapter->params.nports);
2190
adapter->params.nports = MAX_NPORTS;
2194
* We need to reserve a number of the ingress queues with Free List
2195
* and Interrupt capabilities for special interrupt purposes (like
2196
* asynchronous firmware messages, or forwarded interrupts if we're
2197
* using MSI). The rest of the FL/Intr-capable ingress queues will be
2198
* matched up one-for-one with Ethernet/Control egress queues in order
2199
* to form "Queue Sets" which will be aportioned between the "ports".
2200
* For each Queue Set, we'll need the ability to allocate two Egress
2201
* Contexts -- one for the Ingress Queue Free List and one for the TX
2204
ethqsets = vfres->niqflint - INGQ_EXTRAS;
2205
if (vfres->nethctrl != ethqsets) {
2206
dev_warn(adapter->pdev_dev, "unequal number of [available]"
2207
" ingress/egress queues (%d/%d); using minimum for"
2208
" number of Queue Sets\n", ethqsets, vfres->nethctrl);
2209
ethqsets = min(vfres->nethctrl, ethqsets);
2211
if (vfres->neq < ethqsets*2) {
2212
dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)"
2213
" to support Queue Sets (%d); reducing allowed Queue"
2214
" Sets\n", vfres->neq, ethqsets);
2215
ethqsets = vfres->neq/2;
2217
if (ethqsets > MAX_ETH_QSETS) {
2218
dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue"
2219
" Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets);
2220
ethqsets = MAX_ETH_QSETS;
2222
if (vfres->niq != 0 || vfres->neq > ethqsets*2) {
2223
dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)"
2224
" ignored\n", vfres->niq, vfres->neq - ethqsets*2);
2226
adapter->sge.max_ethqsets = ethqsets;
2229
* Check for various parameter sanity issues. Most checks simply
2230
* result in us using fewer resources than our provissioning but we
2231
* do need at least one "port" with which to work ...
2233
if (adapter->sge.max_ethqsets < adapter->params.nports) {
2234
dev_warn(adapter->pdev_dev, "only using %d of %d available"
2235
" virtual interfaces (too few Queue Sets)\n",
2236
adapter->sge.max_ethqsets, adapter->params.nports);
2237
adapter->params.nports = adapter->sge.max_ethqsets;
2239
if (adapter->params.nports == 0) {
2240
dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2247
static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2248
u8 pkt_cnt_idx, unsigned int size,
2249
unsigned int iqe_size)
2251
rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
2252
(pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0));
2253
rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2256
rspq->iqe_len = iqe_size;
2261
* Perform default configuration of DMA queues depending on the number and
2262
* type of ports we found and the number of available CPUs. Most settings can
2263
* be modified by the admin via ethtool and cxgbtool prior to the adapter
2264
* being brought up for the first time.
2266
static void __devinit cfg_queues(struct adapter *adapter)
2268
struct sge *s = &adapter->sge;
2269
int q10g, n10g, qidx, pidx, qs;
2273
* We should not be called till we know how many Queue Sets we can
2274
* support. In particular, this means that we need to know what kind
2275
* of interrupts we'll be using ...
2277
BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2280
* Count the number of 10GbE Virtual Interfaces that we have.
2283
for_each_port(adapter, pidx)
2284
n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2287
* We default to 1 queue per non-10G port and up to # of cores queues
2293
int n1g = (adapter->params.nports - n10g);
2294
q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2295
if (q10g > num_online_cpus())
2296
q10g = num_online_cpus();
2300
* Allocate the "Queue Sets" to the various Virtual Interfaces.
2301
* The layout will be established in setup_sge_queues() when the
2302
* adapter is brough up for the first time.
2305
for_each_port(adapter, pidx) {
2306
struct port_info *pi = adap2pinfo(adapter, pidx);
2308
pi->first_qset = qidx;
2309
pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
2315
* The Ingress Queue Entry Size for our various Response Queues needs
2316
* to be big enough to accommodate the largest message we can receive
2317
* from the chip/firmware; which is 64 bytes ...
2322
* Set up default Queue Set parameters ... Start off with the
2323
* shortest interrupt holdoff timer.
2325
for (qs = 0; qs < s->max_ethqsets; qs++) {
2326
struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2327
struct sge_eth_txq *txq = &s->ethtxq[qs];
2329
init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2335
* The firmware event queue is used for link state changes and
2336
* notifications of TX DMA completions.
2338
init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2341
* The forwarded interrupt queue is used when we're in MSI interrupt
2342
* mode. In this mode all interrupts associated with RX queues will
2343
* be forwarded to a single queue which we'll associate with our MSI
2344
* interrupt vector. The messages dropped in the forwarded interrupt
2345
* queue will indicate which ingress queue needs servicing ... This
2346
* queue needs to be large enough to accommodate all of the ingress
2347
* queues which are forwarding their interrupt (+1 to prevent the PIDX
2348
* from equalling the CIDX if every ingress queue has an outstanding
2349
* interrupt). The queue doesn't need to be any larger because no
2350
* ingress queue will ever have more than one outstanding interrupt at
2353
init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2358
* Reduce the number of Ethernet queues across all ports to at most n.
2359
* n provides at least one queue per port.
2361
static void __devinit reduce_ethqs(struct adapter *adapter, int n)
2364
struct port_info *pi;
2367
* While we have too many active Ether Queue Sets, interate across the
2368
* "ports" and reduce their individual Queue Set allocations.
2370
BUG_ON(n < adapter->params.nports);
2371
while (n < adapter->sge.ethqsets)
2372
for_each_port(adapter, i) {
2373
pi = adap2pinfo(adapter, i);
2374
if (pi->nqsets > 1) {
2376
adapter->sge.ethqsets--;
2377
if (adapter->sge.ethqsets <= n)
2383
* Reassign the starting Queue Sets for each of the "ports" ...
2386
for_each_port(adapter, i) {
2387
pi = adap2pinfo(adapter, i);
2394
* We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
2395
* we get a separate MSI-X vector for every "Queue Set" plus any extras we
2396
* need. Minimally we need one for every Virtual Interface plus those needed
2397
* for our "extras". Note that this process may lower the maximum number of
2398
* allowed Queue Sets ...
2400
static int __devinit enable_msix(struct adapter *adapter)
2402
int i, err, want, need;
2403
struct msix_entry entries[MSIX_ENTRIES];
2404
struct sge *s = &adapter->sge;
2406
for (i = 0; i < MSIX_ENTRIES; ++i)
2407
entries[i].entry = i;
2410
* We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2411
* plus those needed for our "extras" (for example, the firmware
2412
* message queue). We _need_ at least one "Queue Set" per Virtual
2413
* Interface plus those needed for our "extras". So now we get to see
2414
* if the song is right ...
2416
want = s->max_ethqsets + MSIX_EXTRAS;
2417
need = adapter->params.nports + MSIX_EXTRAS;
2418
while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
2422
int nqsets = want - MSIX_EXTRAS;
2423
if (nqsets < s->max_ethqsets) {
2424
dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2425
" for %d Queue Sets\n", nqsets);
2426
s->max_ethqsets = nqsets;
2427
if (nqsets < s->ethqsets)
2428
reduce_ethqs(adapter, nqsets);
2430
for (i = 0; i < want; ++i)
2431
adapter->msix_info[i].vec = entries[i].vector;
2432
} else if (err > 0) {
2433
pci_disable_msix(adapter->pdev);
2434
dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
2435
" not using MSI-X\n", err);
2440
static const struct net_device_ops cxgb4vf_netdev_ops = {
2441
.ndo_open = cxgb4vf_open,
2442
.ndo_stop = cxgb4vf_stop,
2443
.ndo_start_xmit = t4vf_eth_xmit,
2444
.ndo_get_stats = cxgb4vf_get_stats,
2445
.ndo_set_rx_mode = cxgb4vf_set_rxmode,
2446
.ndo_set_mac_address = cxgb4vf_set_mac_addr,
2447
.ndo_validate_addr = eth_validate_addr,
2448
.ndo_do_ioctl = cxgb4vf_do_ioctl,
2449
.ndo_change_mtu = cxgb4vf_change_mtu,
2450
.ndo_fix_features = cxgb4vf_fix_features,
2451
.ndo_set_features = cxgb4vf_set_features,
2452
#ifdef CONFIG_NET_POLL_CONTROLLER
2453
.ndo_poll_controller = cxgb4vf_poll_controller,
2458
* "Probe" a device: initialize a device and construct all kernel and driver
2459
* state needed to manage the device. This routine is called "init_one" in
2462
static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2463
const struct pci_device_id *ent)
2465
static int version_printed;
2470
struct adapter *adapter;
2471
struct port_info *pi;
2472
struct net_device *netdev;
2475
* Print our driver banner the first time we're called to initialize a
2478
if (version_printed == 0) {
2479
printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2480
version_printed = 1;
2484
* Initialize generic PCI device state.
2486
err = pci_enable_device(pdev);
2488
dev_err(&pdev->dev, "cannot enable PCI device\n");
2493
* Reserve PCI resources for the device. If we can't get them some
2494
* other driver may have already claimed the device ...
2496
err = pci_request_regions(pdev, KBUILD_MODNAME);
2498
dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2499
goto err_disable_device;
2503
* Set up our DMA mask: try for 64-bit address masking first and
2504
* fall back to 32-bit if we can't get 64 bits ...
2506
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2508
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2510
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2511
" coherent allocations\n");
2512
goto err_release_regions;
2516
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2518
dev_err(&pdev->dev, "no usable DMA configuration\n");
2519
goto err_release_regions;
2525
* Enable bus mastering for the device ...
2527
pci_set_master(pdev);
2530
* Allocate our adapter data structure and attach it to the device.
2532
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2535
goto err_release_regions;
2537
pci_set_drvdata(pdev, adapter);
2538
adapter->pdev = pdev;
2539
adapter->pdev_dev = &pdev->dev;
2542
* Initialize SMP data synchronization resources.
2544
spin_lock_init(&adapter->stats_lock);
2547
* Map our I/O registers in BAR0.
2549
adapter->regs = pci_ioremap_bar(pdev, 0);
2550
if (!adapter->regs) {
2551
dev_err(&pdev->dev, "cannot map device registers\n");
2553
goto err_free_adapter;
2557
* Initialize adapter level features.
2559
adapter->name = pci_name(pdev);
2560
adapter->msg_enable = dflt_msg_enable;
2561
err = adap_init0(adapter);
2566
* Allocate our "adapter ports" and stitch everything together.
2568
pmask = adapter->params.vfres.pmask;
2569
for_each_port(adapter, pidx) {
2573
* We simplistically allocate our virtual interfaces
2574
* sequentially across the port numbers to which we have
2575
* access rights. This should be configurable in some manner
2580
port_id = ffs(pmask) - 1;
2581
pmask &= ~(1 << port_id);
2582
viid = t4vf_alloc_vi(adapter, port_id);
2584
dev_err(&pdev->dev, "cannot allocate VI for port %d:"
2585
" err=%d\n", port_id, viid);
2591
* Allocate our network device and stitch things together.
2593
netdev = alloc_etherdev_mq(sizeof(struct port_info),
2595
if (netdev == NULL) {
2596
dev_err(&pdev->dev, "cannot allocate netdev for"
2597
" port %d\n", port_id);
2598
t4vf_free_vi(adapter, viid);
2602
adapter->port[pidx] = netdev;
2603
SET_NETDEV_DEV(netdev, &pdev->dev);
2604
pi = netdev_priv(netdev);
2605
pi->adapter = adapter;
2607
pi->port_id = port_id;
2611
* Initialize the starting state of our "port" and register
2614
pi->xact_addr_filt = -1;
2615
netif_carrier_off(netdev);
2616
netdev->irq = pdev->irq;
2618
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
2619
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2620
NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
2621
netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
2622
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2624
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX;
2626
netdev->features |= NETIF_F_HIGHDMA;
2628
netdev->priv_flags |= IFF_UNICAST_FLT;
2630
netdev->netdev_ops = &cxgb4vf_netdev_ops;
2631
SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
2634
* Initialize the hardware/software state for the port.
2636
err = t4vf_port_init(adapter, pidx);
2638
dev_err(&pdev->dev, "cannot initialize port %d\n",
2645
* The "card" is now ready to go. If any errors occur during device
2646
* registration we do not fail the whole "card" but rather proceed
2647
* only with the ports we manage to register successfully. However we
2648
* must register at least one net device.
2650
for_each_port(adapter, pidx) {
2651
netdev = adapter->port[pidx];
2655
err = register_netdev(netdev);
2657
dev_warn(&pdev->dev, "cannot register net device %s,"
2658
" skipping\n", netdev->name);
2662
set_bit(pidx, &adapter->registered_device_map);
2664
if (adapter->registered_device_map == 0) {
2665
dev_err(&pdev->dev, "could not register any net devices\n");
2670
* Set up our debugfs entries.
2672
if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
2673
adapter->debugfs_root =
2674
debugfs_create_dir(pci_name(pdev),
2675
cxgb4vf_debugfs_root);
2676
if (IS_ERR_OR_NULL(adapter->debugfs_root))
2677
dev_warn(&pdev->dev, "could not create debugfs"
2680
setup_debugfs(adapter);
2684
* See what interrupts we'll be using. If we've been configured to
2685
* use MSI-X interrupts, try to enable them but fall back to using
2686
* MSI interrupts if we can't enable MSI-X interrupts. If we can't
2687
* get MSI interrupts we bail with the error.
2689
if (msi == MSI_MSIX && enable_msix(adapter) == 0)
2690
adapter->flags |= USING_MSIX;
2692
err = pci_enable_msi(pdev);
2694
dev_err(&pdev->dev, "Unable to allocate %s interrupts;"
2696
msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err);
2697
goto err_free_debugfs;
2699
adapter->flags |= USING_MSI;
2703
* Now that we know how many "ports" we have and what their types are,
2704
* and how many Queue Sets we can support, we can configure our queue
2707
cfg_queues(adapter);
2710
* Print a short notice on the existence and configuration of the new
2711
* VF network device ...
2713
for_each_port(adapter, pidx) {
2714
dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
2715
adapter->port[pidx]->name,
2716
(adapter->flags & USING_MSIX) ? "MSI-X" :
2717
(adapter->flags & USING_MSI) ? "MSI" : "");
2726
* Error recovery and exit code. Unwind state that's been created
2727
* so far and return the error.
2731
if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2732
cleanup_debugfs(adapter);
2733
debugfs_remove_recursive(adapter->debugfs_root);
2737
for_each_port(adapter, pidx) {
2738
netdev = adapter->port[pidx];
2741
pi = netdev_priv(netdev);
2742
t4vf_free_vi(adapter, pi->viid);
2743
if (test_bit(pidx, &adapter->registered_device_map))
2744
unregister_netdev(netdev);
2745
free_netdev(netdev);
2749
iounmap(adapter->regs);
2753
pci_set_drvdata(pdev, NULL);
2755
err_release_regions:
2756
pci_release_regions(pdev);
2757
pci_set_drvdata(pdev, NULL);
2758
pci_clear_master(pdev);
2761
pci_disable_device(pdev);
2767
* "Remove" a device: tear down all kernel and driver state created in the
2768
* "probe" routine and quiesce the device (disable interrupts, etc.). (Note
2769
* that this is called "remove_one" in the PF Driver.)
2771
static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2773
struct adapter *adapter = pci_get_drvdata(pdev);
2776
* Tear down driver state associated with device.
2782
* Stop all of our activity. Unregister network port,
2783
* disable interrupts, etc.
2785
for_each_port(adapter, pidx)
2786
if (test_bit(pidx, &adapter->registered_device_map))
2787
unregister_netdev(adapter->port[pidx]);
2788
t4vf_sge_stop(adapter);
2789
if (adapter->flags & USING_MSIX) {
2790
pci_disable_msix(adapter->pdev);
2791
adapter->flags &= ~USING_MSIX;
2792
} else if (adapter->flags & USING_MSI) {
2793
pci_disable_msi(adapter->pdev);
2794
adapter->flags &= ~USING_MSI;
2798
* Tear down our debugfs entries.
2800
if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2801
cleanup_debugfs(adapter);
2802
debugfs_remove_recursive(adapter->debugfs_root);
2806
* Free all of the various resources which we've acquired ...
2808
t4vf_free_sge_resources(adapter);
2809
for_each_port(adapter, pidx) {
2810
struct net_device *netdev = adapter->port[pidx];
2811
struct port_info *pi;
2816
pi = netdev_priv(netdev);
2817
t4vf_free_vi(adapter, pi->viid);
2818
free_netdev(netdev);
2820
iounmap(adapter->regs);
2822
pci_set_drvdata(pdev, NULL);
2826
* Disable the device and release its PCI resources.
2828
pci_disable_device(pdev);
2829
pci_clear_master(pdev);
2830
pci_release_regions(pdev);
2834
* "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
2837
static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
2839
struct adapter *adapter;
2842
adapter = pci_get_drvdata(pdev);
2847
* Disable all Virtual Interfaces. This will shut down the
2848
* delivery of all ingress packets into the chip for these
2849
* Virtual Interfaces.
2851
for_each_port(adapter, pidx) {
2852
struct net_device *netdev;
2853
struct port_info *pi;
2855
if (!test_bit(pidx, &adapter->registered_device_map))
2858
netdev = adapter->port[pidx];
2862
pi = netdev_priv(netdev);
2863
t4vf_enable_vi(adapter, pi->viid, false, false);
2867
* Free up all Queues which will prevent further DMA and
2868
* Interrupts allowing various internal pathways to drain.
2870
t4vf_free_sge_resources(adapter);
2874
* PCI Device registration data structures.
2876
#define CH_DEVICE(devid, idx) \
2877
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
2879
static struct pci_device_id cxgb4vf_pci_tbl[] = {
2880
CH_DEVICE(0xb000, 0), /* PE10K FPGA */
2881
CH_DEVICE(0x4800, 0), /* T440-dbg */
2882
CH_DEVICE(0x4801, 0), /* T420-cr */
2883
CH_DEVICE(0x4802, 0), /* T422-cr */
2884
CH_DEVICE(0x4803, 0), /* T440-cr */
2885
CH_DEVICE(0x4804, 0), /* T420-bch */
2886
CH_DEVICE(0x4805, 0), /* T440-bch */
2887
CH_DEVICE(0x4806, 0), /* T460-ch */
2888
CH_DEVICE(0x4807, 0), /* T420-so */
2889
CH_DEVICE(0x4808, 0), /* T420-cx */
2890
CH_DEVICE(0x4809, 0), /* T420-bt */
2891
CH_DEVICE(0x480a, 0), /* T404-bt */
2895
MODULE_DESCRIPTION(DRV_DESC);
2896
MODULE_AUTHOR("Chelsio Communications");
2897
MODULE_LICENSE("Dual BSD/GPL");
2898
MODULE_VERSION(DRV_VERSION);
2899
MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
2901
static struct pci_driver cxgb4vf_driver = {
2902
.name = KBUILD_MODNAME,
2903
.id_table = cxgb4vf_pci_tbl,
2904
.probe = cxgb4vf_pci_probe,
2905
.remove = __devexit_p(cxgb4vf_pci_remove),
2906
.shutdown = __devexit_p(cxgb4vf_pci_shutdown),
2910
* Initialize global driver state.
2912
static int __init cxgb4vf_module_init(void)
2917
* Vet our module parameters.
2919
if (msi != MSI_MSIX && msi != MSI_MSI) {
2920
printk(KERN_WARNING KBUILD_MODNAME
2921
": bad module parameter msi=%d; must be %d"
2922
" (MSI-X or MSI) or %d (MSI)\n",
2923
msi, MSI_MSIX, MSI_MSI);
2927
/* Debugfs support is optional, just warn if this fails */
2928
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
2929
if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2930
printk(KERN_WARNING KBUILD_MODNAME ": could not create"
2931
" debugfs entry, continuing\n");
2933
ret = pci_register_driver(&cxgb4vf_driver);
2934
if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2935
debugfs_remove(cxgb4vf_debugfs_root);
2940
* Tear down global driver state.
2942
static void __exit cxgb4vf_module_exit(void)
2944
pci_unregister_driver(&cxgb4vf_driver);
2945
debugfs_remove(cxgb4vf_debugfs_root);
2948
module_init(cxgb4vf_module_init);
2949
module_exit(cxgb4vf_module_exit);