1
/******************************************************************************
2
* This software may be used and distributed according to the terms of
3
* the GNU General Public License (GPL), incorporated herein by reference.
4
* Drivers based on or derived from this code fall under the GPL and must
5
* retain the authorship, copyright and license notice. This file is not
6
* a complete program and may only be used when the entire operating
7
* system is licensed under the GPL.
8
* See the file COPYING in this distribution for more information.
10
* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11
* Virtualized Server Adapter.
12
* Copyright(c) 2002-2010 Exar Corp.
14
* The module loadable parameters that are supported by the driver and a brief
15
* explanation of all the variables:
17
* Strip VLAN Tag enable/disable. Instructs the device to remove
18
* the VLAN tag from all received tagged frames that are not
19
* replicated at the internal L2 switch.
20
* 0 - Do not strip the VLAN tag.
21
* 1 - Strip the VLAN tag.
24
* Enable learning the mac address of the guest OS interface in
25
* a virtualization environment.
30
* Maximum number of port to be supported.
34
* This configures the maximum no of VPATH configures for each
36
* MIN - 1 and MAX - 17
39
* This configures maximum no of Device function to be enabled.
40
* MIN - 1 and MAX - 17
42
******************************************************************************/
44
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46
#include <linux/bitops.h>
47
#include <linux/if_vlan.h>
48
#include <linux/interrupt.h>
49
#include <linux/pci.h>
50
#include <linux/slab.h>
51
#include <linux/tcp.h>
53
#include <linux/netdevice.h>
54
#include <linux/etherdevice.h>
55
#include <linux/firmware.h>
56
#include <linux/net_tstamp.h>
57
#include <linux/prefetch.h>
58
#include <linux/module.h>
59
#include "vxge-main.h"
62
MODULE_LICENSE("Dual BSD/GPL");
63
MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
64
"Virtualized Server Adapter");
66
static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
67
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
69
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
74
MODULE_DEVICE_TABLE(pci, vxge_id_table);
76
VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
77
VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
78
VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
79
VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
80
VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
81
VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
83
static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
84
{0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
85
static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
86
{[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
87
module_param_array(bw_percentage, uint, NULL, 0);
89
static struct vxge_drv_config *driver_config;
91
static inline int is_vxge_card_up(struct vxgedev *vdev)
93
return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
96
static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
98
struct sk_buff **skb_ptr = NULL;
99
struct sk_buff **temp;
100
#define NR_SKB_COMPLETED 128
101
struct sk_buff *completed[NR_SKB_COMPLETED];
108
if (__netif_tx_trylock(fifo->txq)) {
109
vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
110
NR_SKB_COMPLETED, &more);
111
__netif_tx_unlock(fifo->txq);
115
for (temp = completed; temp != skb_ptr; temp++)
116
dev_kfree_skb_irq(*temp);
120
static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
124
/* Complete all transmits */
125
for (i = 0; i < vdev->no_of_vpath; i++)
126
VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
129
static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
132
struct vxge_ring *ring;
134
/* Complete all receives*/
135
for (i = 0; i < vdev->no_of_vpath; i++) {
136
ring = &vdev->vpaths[i].ring;
137
vxge_hw_vpath_poll_rx(ring->handle);
142
* vxge_callback_link_up
144
* This function is called during interrupt context to notify link up state
147
static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
149
struct net_device *dev = hldev->ndev;
150
struct vxgedev *vdev = netdev_priv(dev);
152
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
153
vdev->ndev->name, __func__, __LINE__);
154
netdev_notice(vdev->ndev, "Link Up\n");
155
vdev->stats.link_up++;
157
netif_carrier_on(vdev->ndev);
158
netif_tx_wake_all_queues(vdev->ndev);
160
vxge_debug_entryexit(VXGE_TRACE,
161
"%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
165
* vxge_callback_link_down
167
* This function is called during interrupt context to notify link down state
170
static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
172
struct net_device *dev = hldev->ndev;
173
struct vxgedev *vdev = netdev_priv(dev);
175
vxge_debug_entryexit(VXGE_TRACE,
176
"%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
177
netdev_notice(vdev->ndev, "Link Down\n");
179
vdev->stats.link_down++;
180
netif_carrier_off(vdev->ndev);
181
netif_tx_stop_all_queues(vdev->ndev);
183
vxge_debug_entryexit(VXGE_TRACE,
184
"%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
192
static struct sk_buff *
193
vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
195
struct net_device *dev;
197
struct vxge_rx_priv *rx_priv;
200
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
201
ring->ndev->name, __func__, __LINE__);
203
rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
205
/* try to allocate skb first. this one may fail */
206
skb = netdev_alloc_skb(dev, skb_size +
207
VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
209
vxge_debug_mem(VXGE_ERR,
210
"%s: out of memory to allocate SKB", dev->name);
211
ring->stats.skb_alloc_fail++;
215
vxge_debug_mem(VXGE_TRACE,
216
"%s: %s:%d Skb : 0x%p", ring->ndev->name,
217
__func__, __LINE__, skb);
219
skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
222
rx_priv->skb_data = NULL;
223
rx_priv->data_size = skb_size;
224
vxge_debug_entryexit(VXGE_TRACE,
225
"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
233
static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
235
struct vxge_rx_priv *rx_priv;
238
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
239
ring->ndev->name, __func__, __LINE__);
240
rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
242
rx_priv->skb_data = rx_priv->skb->data;
243
dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
244
rx_priv->data_size, PCI_DMA_FROMDEVICE);
246
if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
247
ring->stats.pci_map_fail++;
250
vxge_debug_mem(VXGE_TRACE,
251
"%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
252
ring->ndev->name, __func__, __LINE__,
253
(unsigned long long)dma_addr);
254
vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
256
rx_priv->data_dma = dma_addr;
257
vxge_debug_entryexit(VXGE_TRACE,
258
"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
264
* vxge_rx_initial_replenish
265
* Allocation of RxD as an initial replenish procedure.
267
static enum vxge_hw_status
268
vxge_rx_initial_replenish(void *dtrh, void *userdata)
270
struct vxge_ring *ring = (struct vxge_ring *)userdata;
271
struct vxge_rx_priv *rx_priv;
273
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
274
ring->ndev->name, __func__, __LINE__);
275
if (vxge_rx_alloc(dtrh, ring,
276
VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
279
if (vxge_rx_map(dtrh, ring)) {
280
rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
281
dev_kfree_skb(rx_priv->skb);
285
vxge_debug_entryexit(VXGE_TRACE,
286
"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
292
vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
293
int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
296
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
297
ring->ndev->name, __func__, __LINE__);
298
skb_record_rx_queue(skb, ring->driver_id);
299
skb->protocol = eth_type_trans(skb, ring->ndev);
301
u64_stats_update_begin(&ring->stats.syncp);
302
ring->stats.rx_frms++;
303
ring->stats.rx_bytes += pkt_length;
305
if (skb->pkt_type == PACKET_MULTICAST)
306
ring->stats.rx_mcast++;
307
u64_stats_update_end(&ring->stats.syncp);
309
vxge_debug_rx(VXGE_TRACE,
310
"%s: %s:%d skb protocol = %d",
311
ring->ndev->name, __func__, __LINE__, skb->protocol);
313
if (ext_info->vlan &&
314
ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
315
__vlan_hwaccel_put_tag(skb, ext_info->vlan);
316
napi_gro_receive(ring->napi_p, skb);
318
vxge_debug_entryexit(VXGE_TRACE,
319
"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
322
static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
323
struct vxge_rx_priv *rx_priv)
325
pci_dma_sync_single_for_device(ring->pdev,
326
rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
328
vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
329
vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
332
static inline void vxge_post(int *dtr_cnt, void **first_dtr,
333
void *post_dtr, struct __vxge_hw_ring *ringh)
335
int dtr_count = *dtr_cnt;
336
if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
338
vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
339
*first_dtr = post_dtr;
341
vxge_hw_ring_rxd_post_post(ringh, post_dtr);
343
*dtr_cnt = dtr_count;
349
* If the interrupt is because of a received frame or if the receive ring
350
* contains fresh as yet un-processed frames, this function is called.
352
static enum vxge_hw_status
353
vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
354
u8 t_code, void *userdata)
356
struct vxge_ring *ring = (struct vxge_ring *)userdata;
357
struct net_device *dev = ring->ndev;
358
unsigned int dma_sizes;
359
void *first_dtr = NULL;
365
struct vxge_rx_priv *rx_priv;
366
struct vxge_hw_ring_rxd_info ext_info;
367
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
368
ring->ndev->name, __func__, __LINE__);
371
prefetch((char *)dtr + L1_CACHE_BYTES);
372
rx_priv = vxge_hw_ring_rxd_private_get(dtr);
374
data_size = rx_priv->data_size;
375
data_dma = rx_priv->data_dma;
376
prefetch(rx_priv->skb_data);
378
vxge_debug_rx(VXGE_TRACE,
379
"%s: %s:%d skb = 0x%p",
380
ring->ndev->name, __func__, __LINE__, skb);
382
vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
383
pkt_length = dma_sizes;
385
pkt_length -= ETH_FCS_LEN;
387
vxge_debug_rx(VXGE_TRACE,
388
"%s: %s:%d Packet Length = %d",
389
ring->ndev->name, __func__, __LINE__, pkt_length);
391
vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
393
/* check skb validity */
396
prefetch((char *)skb + L1_CACHE_BYTES);
397
if (unlikely(t_code)) {
398
if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
401
ring->stats.rx_errors++;
402
vxge_debug_rx(VXGE_TRACE,
403
"%s: %s :%d Rx T_code is %d",
404
ring->ndev->name, __func__,
407
/* If the t_code is not supported and if the
408
* t_code is other than 0x5 (unparseable packet
409
* such as unknown UPV6 header), Drop it !!!
411
vxge_re_pre_post(dtr, ring, rx_priv);
413
vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
414
ring->stats.rx_dropped++;
419
if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
420
if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
421
if (!vxge_rx_map(dtr, ring)) {
422
skb_put(skb, pkt_length);
424
pci_unmap_single(ring->pdev, data_dma,
425
data_size, PCI_DMA_FROMDEVICE);
427
vxge_hw_ring_rxd_pre_post(ringh, dtr);
428
vxge_post(&dtr_cnt, &first_dtr, dtr,
431
dev_kfree_skb(rx_priv->skb);
433
rx_priv->data_size = data_size;
434
vxge_re_pre_post(dtr, ring, rx_priv);
436
vxge_post(&dtr_cnt, &first_dtr, dtr,
438
ring->stats.rx_dropped++;
442
vxge_re_pre_post(dtr, ring, rx_priv);
444
vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
445
ring->stats.rx_dropped++;
449
struct sk_buff *skb_up;
451
skb_up = netdev_alloc_skb(dev, pkt_length +
452
VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
453
if (skb_up != NULL) {
455
VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
457
pci_dma_sync_single_for_cpu(ring->pdev,
461
vxge_debug_mem(VXGE_TRACE,
462
"%s: %s:%d skb_up = %p",
463
ring->ndev->name, __func__,
465
memcpy(skb_up->data, skb->data, pkt_length);
467
vxge_re_pre_post(dtr, ring, rx_priv);
469
vxge_post(&dtr_cnt, &first_dtr, dtr,
471
/* will netif_rx small SKB instead */
473
skb_put(skb, pkt_length);
475
vxge_re_pre_post(dtr, ring, rx_priv);
477
vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
478
vxge_debug_rx(VXGE_ERR,
479
"%s: vxge_rx_1b_compl: out of "
480
"memory", dev->name);
481
ring->stats.skb_alloc_fail++;
486
if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
487
!(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
488
(dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
489
ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
490
ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
491
skb->ip_summed = CHECKSUM_UNNECESSARY;
493
skb_checksum_none_assert(skb);
497
struct skb_shared_hwtstamps *skb_hwts;
498
u32 ns = *(u32 *)(skb->head + pkt_length);
500
skb_hwts = skb_hwtstamps(skb);
501
skb_hwts->hwtstamp = ns_to_ktime(ns);
502
skb_hwts->syststamp.tv64 = 0;
505
/* rth_hash_type and rth_it_hit are non-zero regardless of
506
* whether rss is enabled. Only the rth_value is zero/non-zero
507
* if rss is disabled/enabled, so key off of that.
509
if (ext_info.rth_value)
510
skb->rxhash = ext_info.rth_value;
512
vxge_rx_complete(ring, skb, ext_info.vlan,
513
pkt_length, &ext_info);
516
ring->pkts_processed++;
520
} while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
521
&t_code) == VXGE_HW_OK);
524
vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
526
vxge_debug_entryexit(VXGE_TRACE,
535
* If an interrupt was raised to indicate DMA complete of the Tx packet,
536
* this function is called. It identifies the last TxD whose buffer was
537
* freed and frees all skbs whose data have already DMA'ed into the NICs
540
static enum vxge_hw_status
541
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
542
enum vxge_hw_fifo_tcode t_code, void *userdata,
543
struct sk_buff ***skb_ptr, int nr_skb, int *more)
545
struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
546
struct sk_buff *skb, **done_skb = *skb_ptr;
549
vxge_debug_entryexit(VXGE_TRACE,
550
"%s:%d Entered....", __func__, __LINE__);
556
struct vxge_tx_priv *txd_priv =
557
vxge_hw_fifo_txdl_private_get(dtr);
560
frg_cnt = skb_shinfo(skb)->nr_frags;
561
frag = &skb_shinfo(skb)->frags[0];
563
vxge_debug_tx(VXGE_TRACE,
564
"%s: %s:%d fifo_hw = %p dtr = %p "
565
"tcode = 0x%x", fifo->ndev->name, __func__,
566
__LINE__, fifo_hw, dtr, t_code);
567
/* check skb validity */
569
vxge_debug_tx(VXGE_TRACE,
570
"%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
571
fifo->ndev->name, __func__, __LINE__,
572
skb, txd_priv, frg_cnt);
573
if (unlikely(t_code)) {
574
fifo->stats.tx_errors++;
575
vxge_debug_tx(VXGE_ERR,
576
"%s: tx: dtr %p completed due to "
577
"error t_code %01x", fifo->ndev->name,
579
vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
582
/* for unfragmented skb */
583
pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
584
skb_headlen(skb), PCI_DMA_TODEVICE);
586
for (j = 0; j < frg_cnt; j++) {
587
pci_unmap_page(fifo->pdev,
588
txd_priv->dma_buffers[i++],
589
skb_frag_size(frag), PCI_DMA_TODEVICE);
593
vxge_hw_fifo_txdl_free(fifo_hw, dtr);
595
/* Updating the statistics block */
596
u64_stats_update_begin(&fifo->stats.syncp);
597
fifo->stats.tx_frms++;
598
fifo->stats.tx_bytes += skb->len;
599
u64_stats_update_end(&fifo->stats.syncp);
609
if (pkt_cnt > fifo->indicate_max_pkts)
612
} while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
613
&dtr, &t_code) == VXGE_HW_OK);
616
if (netif_tx_queue_stopped(fifo->txq))
617
netif_tx_wake_queue(fifo->txq);
619
vxge_debug_entryexit(VXGE_TRACE,
620
"%s: %s:%d Exiting...",
621
fifo->ndev->name, __func__, __LINE__);
625
/* select a vpath to transmit the packet */
626
static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
628
u16 queue_len, counter = 0;
629
if (skb->protocol == htons(ETH_P_IP)) {
635
if (!ip_is_fragment(ip)) {
636
th = (struct tcphdr *)(((unsigned char *)ip) +
639
queue_len = vdev->no_of_vpath;
640
counter = (ntohs(th->source) +
642
vdev->vpath_selector[queue_len - 1];
643
if (counter >= queue_len)
644
counter = queue_len - 1;
650
static enum vxge_hw_status vxge_search_mac_addr_in_list(
651
struct vxge_vpath *vpath, u64 del_mac)
653
struct list_head *entry, *next;
654
list_for_each_safe(entry, next, &vpath->mac_addr_list) {
655
if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
661
static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
663
struct vxge_mac_addrs *new_mac_entry;
664
u8 *mac_address = NULL;
666
if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
669
new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
670
if (!new_mac_entry) {
671
vxge_debug_mem(VXGE_ERR,
672
"%s: memory allocation failed",
677
list_add(&new_mac_entry->item, &vpath->mac_addr_list);
679
/* Copy the new mac address to the list */
680
mac_address = (u8 *)&new_mac_entry->macaddr;
681
memcpy(mac_address, mac->macaddr, ETH_ALEN);
683
new_mac_entry->state = mac->state;
684
vpath->mac_addr_cnt++;
686
if (is_multicast_ether_addr(mac->macaddr))
687
vpath->mcast_addr_cnt++;
692
/* Add a mac address to DA table */
693
static enum vxge_hw_status
694
vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
696
enum vxge_hw_status status = VXGE_HW_OK;
697
struct vxge_vpath *vpath;
698
enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
700
if (is_multicast_ether_addr(mac->macaddr))
701
duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
703
duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
705
vpath = &vdev->vpaths[mac->vpath_no];
706
status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
707
mac->macmask, duplicate_mode);
708
if (status != VXGE_HW_OK) {
709
vxge_debug_init(VXGE_ERR,
710
"DA config add entry failed for vpath:%d",
713
if (FALSE == vxge_mac_list_add(vpath, mac))
719
static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
721
struct macInfo mac_info;
722
u8 *mac_address = NULL;
723
u64 mac_addr = 0, vpath_vector = 0;
725
enum vxge_hw_status status = VXGE_HW_OK;
726
struct vxge_vpath *vpath = NULL;
727
struct __vxge_hw_device *hldev;
729
hldev = pci_get_drvdata(vdev->pdev);
731
mac_address = (u8 *)&mac_addr;
732
memcpy(mac_address, mac_header, ETH_ALEN);
734
/* Is this mac address already in the list? */
735
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
736
vpath = &vdev->vpaths[vpath_idx];
737
if (vxge_search_mac_addr_in_list(vpath, mac_addr))
741
memset(&mac_info, 0, sizeof(struct macInfo));
742
memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
744
/* Any vpath has room to add mac address to its da table? */
745
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
746
vpath = &vdev->vpaths[vpath_idx];
747
if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
748
/* Add this mac address to this vpath */
749
mac_info.vpath_no = vpath_idx;
750
mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
751
status = vxge_add_mac_addr(vdev, &mac_info);
752
if (status != VXGE_HW_OK)
758
mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
760
mac_info.vpath_no = vpath_idx;
761
/* Is the first vpath already selected as catch-basin ? */
762
vpath = &vdev->vpaths[vpath_idx];
763
if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
764
/* Add this mac address to this vpath */
765
if (FALSE == vxge_mac_list_add(vpath, &mac_info))
770
/* Select first vpath as catch-basin */
771
vpath_vector = vxge_mBIT(vpath->device_id);
772
status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
773
vxge_hw_mgmt_reg_type_mrpcim,
776
struct vxge_hw_mrpcim_reg,
779
if (status != VXGE_HW_OK) {
780
vxge_debug_tx(VXGE_ERR,
781
"%s: Unable to set the vpath-%d in catch-basin mode",
782
VXGE_DRIVER_NAME, vpath->device_id);
786
if (FALSE == vxge_mac_list_add(vpath, &mac_info))
794
* @skb : the socket buffer containing the Tx data.
795
* @dev : device pointer.
797
* This function is the Tx entry point of the driver. Neterion NIC supports
798
* certain protocol assist features on Tx side, namely CSO, S/G, LSO.
801
vxge_xmit(struct sk_buff *skb, struct net_device *dev)
803
struct vxge_fifo *fifo = NULL;
806
struct vxgedev *vdev = NULL;
807
enum vxge_hw_status status;
808
int frg_cnt, first_frg_len;
810
int i = 0, j = 0, avail;
812
struct vxge_tx_priv *txdl_priv = NULL;
813
struct __vxge_hw_fifo *fifo_hw;
817
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
818
dev->name, __func__, __LINE__);
820
/* A buffer with no data will be dropped */
821
if (unlikely(skb->len <= 0)) {
822
vxge_debug_tx(VXGE_ERR,
823
"%s: Buffer has no data..", dev->name);
828
vdev = netdev_priv(dev);
830
if (unlikely(!is_vxge_card_up(vdev))) {
831
vxge_debug_tx(VXGE_ERR,
832
"%s: vdev not initialized", dev->name);
837
if (vdev->config.addr_learn_en) {
838
vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
839
if (vpath_no == -EPERM) {
840
vxge_debug_tx(VXGE_ERR,
841
"%s: Failed to store the mac address",
848
if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
849
vpath_no = skb_get_queue_mapping(skb);
850
else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
851
vpath_no = vxge_get_vpath_no(vdev, skb);
853
vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
855
if (vpath_no >= vdev->no_of_vpath)
858
fifo = &vdev->vpaths[vpath_no].fifo;
859
fifo_hw = fifo->handle;
861
if (netif_tx_queue_stopped(fifo->txq))
862
return NETDEV_TX_BUSY;
864
avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
866
vxge_debug_tx(VXGE_ERR,
867
"%s: No free TXDs available", dev->name);
868
fifo->stats.txd_not_free++;
872
/* Last TXD? Stop tx queue to avoid dropping packets. TX
873
* completion will resume the queue.
876
netif_tx_stop_queue(fifo->txq);
878
status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
879
if (unlikely(status != VXGE_HW_OK)) {
880
vxge_debug_tx(VXGE_ERR,
881
"%s: Out of descriptors .", dev->name);
882
fifo->stats.txd_out_of_desc++;
886
vxge_debug_tx(VXGE_TRACE,
887
"%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
888
dev->name, __func__, __LINE__,
889
fifo_hw, dtr, dtr_priv);
891
if (vlan_tx_tag_present(skb)) {
892
u16 vlan_tag = vlan_tx_tag_get(skb);
893
vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
896
first_frg_len = skb_headlen(skb);
898
dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
901
if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
902
vxge_hw_fifo_txdl_free(fifo_hw, dtr);
903
fifo->stats.pci_map_fail++;
907
txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
908
txdl_priv->skb = skb;
909
txdl_priv->dma_buffers[j] = dma_pointer;
911
frg_cnt = skb_shinfo(skb)->nr_frags;
912
vxge_debug_tx(VXGE_TRACE,
913
"%s: %s:%d skb = %p txdl_priv = %p "
914
"frag_cnt = %d dma_pointer = 0x%llx", dev->name,
915
__func__, __LINE__, skb, txdl_priv,
916
frg_cnt, (unsigned long long)dma_pointer);
918
vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
921
frag = &skb_shinfo(skb)->frags[0];
922
for (i = 0; i < frg_cnt; i++) {
923
/* ignore 0 length fragment */
924
if (!skb_frag_size(frag))
927
dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
928
0, skb_frag_size(frag),
931
if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
933
vxge_debug_tx(VXGE_TRACE,
934
"%s: %s:%d frag = %d dma_pointer = 0x%llx",
935
dev->name, __func__, __LINE__, i,
936
(unsigned long long)dma_pointer);
938
txdl_priv->dma_buffers[j] = dma_pointer;
939
vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
940
skb_frag_size(frag));
944
offload_type = vxge_offload_type(skb);
946
if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
947
int mss = vxge_tcp_mss(skb);
949
vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
950
dev->name, __func__, __LINE__, mss);
951
vxge_hw_fifo_txdl_mss_set(dtr, mss);
953
vxge_assert(skb->len <=
954
dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
960
if (skb->ip_summed == CHECKSUM_PARTIAL)
961
vxge_hw_fifo_txdl_cksum_set_bits(dtr,
962
VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
963
VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
964
VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
966
vxge_hw_fifo_txdl_post(fifo_hw, dtr);
968
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
969
dev->name, __func__, __LINE__);
973
vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
976
frag = &skb_shinfo(skb)->frags[0];
978
pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
979
skb_headlen(skb), PCI_DMA_TODEVICE);
982
pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
983
skb_frag_size(frag), PCI_DMA_TODEVICE);
987
vxge_hw_fifo_txdl_free(fifo_hw, dtr);
989
netif_tx_stop_queue(fifo->txq);
998
* Function will be called by hw function to abort all outstanding receive
1002
vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1004
struct vxge_ring *ring = (struct vxge_ring *)userdata;
1005
struct vxge_rx_priv *rx_priv =
1006
vxge_hw_ring_rxd_private_get(dtrh);
1008
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1009
ring->ndev->name, __func__, __LINE__);
1010
if (state != VXGE_HW_RXD_STATE_POSTED)
1013
pci_unmap_single(ring->pdev, rx_priv->data_dma,
1014
rx_priv->data_size, PCI_DMA_FROMDEVICE);
1016
dev_kfree_skb(rx_priv->skb);
1017
rx_priv->skb_data = NULL;
1019
vxge_debug_entryexit(VXGE_TRACE,
1020
"%s: %s:%d Exiting...",
1021
ring->ndev->name, __func__, __LINE__);
1027
* Function will be called to abort all outstanding tx descriptors
1030
vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1032
struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1034
int i = 0, j, frg_cnt;
1035
struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1036
struct sk_buff *skb = txd_priv->skb;
1038
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1040
if (state != VXGE_HW_TXDL_STATE_POSTED)
1043
/* check skb validity */
1045
frg_cnt = skb_shinfo(skb)->nr_frags;
1046
frag = &skb_shinfo(skb)->frags[0];
1048
/* for unfragmented skb */
1049
pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1050
skb_headlen(skb), PCI_DMA_TODEVICE);
1052
for (j = 0; j < frg_cnt; j++) {
1053
pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1054
skb_frag_size(frag), PCI_DMA_TODEVICE);
1060
vxge_debug_entryexit(VXGE_TRACE,
1061
"%s:%d Exiting...", __func__, __LINE__);
1064
static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1066
struct list_head *entry, *next;
1068
u8 *mac_address = (u8 *) (&del_mac);
1070
/* Copy the mac address to delete from the list */
1071
memcpy(mac_address, mac->macaddr, ETH_ALEN);
1073
list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1074
if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1076
kfree((struct vxge_mac_addrs *)entry);
1077
vpath->mac_addr_cnt--;
1079
if (is_multicast_ether_addr(mac->macaddr))
1080
vpath->mcast_addr_cnt--;
1088
/* delete a mac address from DA table */
1089
static enum vxge_hw_status
1090
vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1092
enum vxge_hw_status status = VXGE_HW_OK;
1093
struct vxge_vpath *vpath;
1095
vpath = &vdev->vpaths[mac->vpath_no];
1096
status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1098
if (status != VXGE_HW_OK) {
1099
vxge_debug_init(VXGE_ERR,
1100
"DA config delete entry failed for vpath:%d",
1103
vxge_mac_list_del(vpath, mac);
1108
* vxge_set_multicast
1109
* @dev: pointer to the device structure
1111
* Entry point for multicast address enable/disable
1112
* This function is a driver entry point which gets called by the kernel
1113
* whenever multicast addresses must be enabled/disabled. This also gets
1114
* called to set/reset promiscuous mode. Depending on the deivce flag, we
1115
* determine, if multicast address must be enabled or if promiscuous mode
1116
* is to be disabled etc.
1118
static void vxge_set_multicast(struct net_device *dev)
1120
struct netdev_hw_addr *ha;
1121
struct vxgedev *vdev;
1122
int i, mcast_cnt = 0;
1123
struct __vxge_hw_device *hldev;
1124
struct vxge_vpath *vpath;
1125
enum vxge_hw_status status = VXGE_HW_OK;
1126
struct macInfo mac_info;
1128
struct vxge_mac_addrs *mac_entry;
1129
struct list_head *list_head;
1130
struct list_head *entry, *next;
1131
u8 *mac_address = NULL;
1133
vxge_debug_entryexit(VXGE_TRACE,
1134
"%s:%d", __func__, __LINE__);
1136
vdev = netdev_priv(dev);
1137
hldev = (struct __vxge_hw_device *)vdev->devh;
1139
if (unlikely(!is_vxge_card_up(vdev)))
1142
if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1143
for (i = 0; i < vdev->no_of_vpath; i++) {
1144
vpath = &vdev->vpaths[i];
1145
vxge_assert(vpath->is_open);
1146
status = vxge_hw_vpath_mcast_enable(vpath->handle);
1147
if (status != VXGE_HW_OK)
1148
vxge_debug_init(VXGE_ERR, "failed to enable "
1149
"multicast, status %d", status);
1150
vdev->all_multi_flg = 1;
1152
} else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1153
for (i = 0; i < vdev->no_of_vpath; i++) {
1154
vpath = &vdev->vpaths[i];
1155
vxge_assert(vpath->is_open);
1156
status = vxge_hw_vpath_mcast_disable(vpath->handle);
1157
if (status != VXGE_HW_OK)
1158
vxge_debug_init(VXGE_ERR, "failed to disable "
1159
"multicast, status %d", status);
1160
vdev->all_multi_flg = 0;
1165
if (!vdev->config.addr_learn_en) {
1166
for (i = 0; i < vdev->no_of_vpath; i++) {
1167
vpath = &vdev->vpaths[i];
1168
vxge_assert(vpath->is_open);
1170
if (dev->flags & IFF_PROMISC)
1171
status = vxge_hw_vpath_promisc_enable(
1174
status = vxge_hw_vpath_promisc_disable(
1176
if (status != VXGE_HW_OK)
1177
vxge_debug_init(VXGE_ERR, "failed to %s promisc"
1178
", status %d", dev->flags&IFF_PROMISC ?
1179
"enable" : "disable", status);
1183
memset(&mac_info, 0, sizeof(struct macInfo));
1184
/* Update individual M_CAST address list */
1185
if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1186
mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1187
list_head = &vdev->vpaths[0].mac_addr_list;
1188
if ((netdev_mc_count(dev) +
1189
(vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1190
vdev->vpaths[0].max_mac_addr_cnt)
1191
goto _set_all_mcast;
1193
/* Delete previous MC's */
1194
for (i = 0; i < mcast_cnt; i++) {
1195
list_for_each_safe(entry, next, list_head) {
1196
mac_entry = (struct vxge_mac_addrs *)entry;
1197
/* Copy the mac address to delete */
1198
mac_address = (u8 *)&mac_entry->macaddr;
1199
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1201
if (is_multicast_ether_addr(mac_info.macaddr)) {
1202
for (vpath_idx = 0; vpath_idx <
1205
mac_info.vpath_no = vpath_idx;
1206
status = vxge_del_mac_addr(
1215
netdev_for_each_mc_addr(ha, dev) {
1216
memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
1217
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1219
mac_info.vpath_no = vpath_idx;
1220
mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1221
status = vxge_add_mac_addr(vdev, &mac_info);
1222
if (status != VXGE_HW_OK) {
1223
vxge_debug_init(VXGE_ERR,
1224
"%s:%d Setting individual"
1225
"multicast address failed",
1226
__func__, __LINE__);
1227
goto _set_all_mcast;
1234
mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1235
/* Delete previous MC's */
1236
for (i = 0; i < mcast_cnt; i++) {
1237
list_for_each_safe(entry, next, list_head) {
1238
mac_entry = (struct vxge_mac_addrs *)entry;
1239
/* Copy the mac address to delete */
1240
mac_address = (u8 *)&mac_entry->macaddr;
1241
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1243
if (is_multicast_ether_addr(mac_info.macaddr))
1247
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1249
mac_info.vpath_no = vpath_idx;
1250
status = vxge_del_mac_addr(vdev, &mac_info);
1254
/* Enable all multicast */
1255
for (i = 0; i < vdev->no_of_vpath; i++) {
1256
vpath = &vdev->vpaths[i];
1257
vxge_assert(vpath->is_open);
1259
status = vxge_hw_vpath_mcast_enable(vpath->handle);
1260
if (status != VXGE_HW_OK) {
1261
vxge_debug_init(VXGE_ERR,
1262
"%s:%d Enabling all multicasts failed",
1263
__func__, __LINE__);
1265
vdev->all_multi_flg = 1;
1267
dev->flags |= IFF_ALLMULTI;
1270
vxge_debug_entryexit(VXGE_TRACE,
1271
"%s:%d Exiting...", __func__, __LINE__);
1276
* @dev: pointer to the device structure
1278
* Update entry "0" (default MAC addr)
1280
static int vxge_set_mac_addr(struct net_device *dev, void *p)
1282
struct sockaddr *addr = p;
1283
struct vxgedev *vdev;
1284
struct __vxge_hw_device *hldev;
1285
enum vxge_hw_status status = VXGE_HW_OK;
1286
struct macInfo mac_info_new, mac_info_old;
1289
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1291
vdev = netdev_priv(dev);
1294
if (!is_valid_ether_addr(addr->sa_data))
1297
memset(&mac_info_new, 0, sizeof(struct macInfo));
1298
memset(&mac_info_old, 0, sizeof(struct macInfo));
1300
vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1301
__func__, __LINE__);
1303
/* Get the old address */
1304
memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1306
/* Copy the new address */
1307
memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1309
/* First delete the old mac address from all the vpaths
1310
as we can't specify the index while adding new mac address */
1311
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1312
struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1313
if (!vpath->is_open) {
1314
/* This can happen when this interface is added/removed
1315
to the bonding interface. Delete this station address
1316
from the linked list */
1317
vxge_mac_list_del(vpath, &mac_info_old);
1319
/* Add this new address to the linked list
1320
for later restoring */
1321
vxge_mac_list_add(vpath, &mac_info_new);
1325
/* Delete the station address */
1326
mac_info_old.vpath_no = vpath_idx;
1327
status = vxge_del_mac_addr(vdev, &mac_info_old);
1330
if (unlikely(!is_vxge_card_up(vdev))) {
1331
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1335
/* Set this mac address to all the vpaths */
1336
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1337
mac_info_new.vpath_no = vpath_idx;
1338
mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1339
status = vxge_add_mac_addr(vdev, &mac_info_new);
1340
if (status != VXGE_HW_OK)
1344
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1350
* vxge_vpath_intr_enable
1351
* @vdev: pointer to vdev
1352
* @vp_id: vpath for which to enable the interrupts
1354
* Enables the interrupts for the vpath
1356
static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1358
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1360
int tim_msix_id[4] = {0, 1, 0, 0};
1361
int alarm_msix_id = VXGE_ALARM_MSIX_ID;
1363
vxge_hw_vpath_intr_enable(vpath->handle);
1365
if (vdev->config.intr_type == INTA)
1366
vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1368
vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1371
msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1372
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1373
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1375
/* enable the alarm vector */
1376
msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1377
VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1378
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1383
* vxge_vpath_intr_disable
1384
* @vdev: pointer to vdev
1385
* @vp_id: vpath for which to disable the interrupts
1387
* Disables the interrupts for the vpath
1389
static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1391
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1392
struct __vxge_hw_device *hldev;
1395
hldev = pci_get_drvdata(vdev->pdev);
1397
vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1399
vxge_hw_vpath_intr_disable(vpath->handle);
1401
if (vdev->config.intr_type == INTA)
1402
vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1404
msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1405
vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1406
vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1408
/* disable the alarm vector */
1409
msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1410
VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
1411
vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1415
/* list all mac addresses from DA table */
1416
static enum vxge_hw_status
1417
vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
1419
enum vxge_hw_status status = VXGE_HW_OK;
1420
unsigned char macmask[ETH_ALEN];
1421
unsigned char macaddr[ETH_ALEN];
1423
status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1425
if (status != VXGE_HW_OK) {
1426
vxge_debug_init(VXGE_ERR,
1427
"DA config list entry failed for vpath:%d",
1432
while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1433
status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1435
if (status != VXGE_HW_OK)
1442
/* Store all mac addresses from the list to the DA table */
1443
static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1445
enum vxge_hw_status status = VXGE_HW_OK;
1446
struct macInfo mac_info;
1447
u8 *mac_address = NULL;
1448
struct list_head *entry, *next;
1450
memset(&mac_info, 0, sizeof(struct macInfo));
1452
if (vpath->is_open) {
1453
list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1456
((struct vxge_mac_addrs *)entry)->macaddr;
1457
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1458
((struct vxge_mac_addrs *)entry)->state =
1459
VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1460
/* does this mac address already exist in da table? */
1461
status = vxge_search_mac_addr_in_da_table(vpath,
1463
if (status != VXGE_HW_OK) {
1464
/* Add this mac address to the DA table */
1465
status = vxge_hw_vpath_mac_addr_add(
1466
vpath->handle, mac_info.macaddr,
1468
VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1469
if (status != VXGE_HW_OK) {
1470
vxge_debug_init(VXGE_ERR,
1471
"DA add entry failed for vpath:%d",
1473
((struct vxge_mac_addrs *)entry)->state
1474
= VXGE_LL_MAC_ADDR_IN_LIST;
1483
/* Store all vlan ids from the list to the vid table */
1484
static enum vxge_hw_status
1485
vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1487
enum vxge_hw_status status = VXGE_HW_OK;
1488
struct vxgedev *vdev = vpath->vdev;
1491
if (!vpath->is_open)
1494
for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
1495
status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1502
* @vdev: pointer to vdev
1503
* @vp_id: vpath to reset
1507
static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1509
enum vxge_hw_status status = VXGE_HW_OK;
1510
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1513
/* check if device is down already */
1514
if (unlikely(!is_vxge_card_up(vdev)))
1517
/* is device reset already scheduled */
1518
if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1521
if (vpath->handle) {
1522
if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1523
if (is_vxge_card_up(vdev) &&
1524
vxge_hw_vpath_recover_from_reset(vpath->handle)
1526
vxge_debug_init(VXGE_ERR,
1527
"vxge_hw_vpath_recover_from_reset"
1528
"failed for vpath:%d", vp_id);
1532
vxge_debug_init(VXGE_ERR,
1533
"vxge_hw_vpath_reset failed for"
1538
return VXGE_HW_FAIL;
1540
vxge_restore_vpath_mac_addr(vpath);
1541
vxge_restore_vpath_vid_table(vpath);
1543
/* Enable all broadcast */
1544
vxge_hw_vpath_bcast_enable(vpath->handle);
1546
/* Enable all multicast */
1547
if (vdev->all_multi_flg) {
1548
status = vxge_hw_vpath_mcast_enable(vpath->handle);
1549
if (status != VXGE_HW_OK)
1550
vxge_debug_init(VXGE_ERR,
1551
"%s:%d Enabling multicast failed",
1552
__func__, __LINE__);
1555
/* Enable the interrupts */
1556
vxge_vpath_intr_enable(vdev, vp_id);
1560
/* Enable the flow of traffic through the vpath */
1561
vxge_hw_vpath_enable(vpath->handle);
1564
vxge_hw_vpath_rx_doorbell_init(vpath->handle);
1565
vpath->ring.last_status = VXGE_HW_OK;
1567
/* Vpath reset done */
1568
clear_bit(vp_id, &vdev->vp_reset);
1570
/* Start the vpath queue */
1571
if (netif_tx_queue_stopped(vpath->fifo.txq))
1572
netif_tx_wake_queue(vpath->fifo.txq);
1578
static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1582
/* Enable CI for RTI */
1583
if (vdev->config.intr_type == MSI_X) {
1584
for (i = 0; i < vdev->no_of_vpath; i++) {
1585
struct __vxge_hw_ring *hw_ring;
1587
hw_ring = vdev->vpaths[i].ring.handle;
1588
vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1592
/* Enable CI for TTI */
1593
for (i = 0; i < vdev->no_of_vpath; i++) {
1594
struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1595
vxge_hw_vpath_tti_ci_set(hw_fifo);
1597
* For Inta (with or without napi), Set CI ON for only one
1598
* vpath. (Have only one free running timer).
1600
if ((vdev->config.intr_type == INTA) && (i == 0))
1607
static int do_vxge_reset(struct vxgedev *vdev, int event)
1609
enum vxge_hw_status status;
1610
int ret = 0, vp_id, i;
1612
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1614
if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1615
/* check if device is down already */
1616
if (unlikely(!is_vxge_card_up(vdev)))
1619
/* is reset already scheduled */
1620
if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1624
if (event == VXGE_LL_FULL_RESET) {
1625
netif_carrier_off(vdev->ndev);
1627
/* wait for all the vpath reset to complete */
1628
for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1629
while (test_bit(vp_id, &vdev->vp_reset))
1633
netif_carrier_on(vdev->ndev);
1635
/* if execution mode is set to debug, don't reset the adapter */
1636
if (unlikely(vdev->exec_mode)) {
1637
vxge_debug_init(VXGE_ERR,
1638
"%s: execution mode is debug, returning..",
1640
clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1641
netif_tx_stop_all_queues(vdev->ndev);
1646
if (event == VXGE_LL_FULL_RESET) {
1647
vxge_hw_device_wait_receive_idle(vdev->devh);
1648
vxge_hw_device_intr_disable(vdev->devh);
1650
switch (vdev->cric_err_event) {
1651
case VXGE_HW_EVENT_UNKNOWN:
1652
netif_tx_stop_all_queues(vdev->ndev);
1653
vxge_debug_init(VXGE_ERR,
1654
"fatal: %s: Disabling device due to"
1659
case VXGE_HW_EVENT_RESET_START:
1661
case VXGE_HW_EVENT_RESET_COMPLETE:
1662
case VXGE_HW_EVENT_LINK_DOWN:
1663
case VXGE_HW_EVENT_LINK_UP:
1664
case VXGE_HW_EVENT_ALARM_CLEARED:
1665
case VXGE_HW_EVENT_ECCERR:
1666
case VXGE_HW_EVENT_MRPCIM_ECCERR:
1669
case VXGE_HW_EVENT_FIFO_ERR:
1670
case VXGE_HW_EVENT_VPATH_ERR:
1672
case VXGE_HW_EVENT_CRITICAL_ERR:
1673
netif_tx_stop_all_queues(vdev->ndev);
1674
vxge_debug_init(VXGE_ERR,
1675
"fatal: %s: Disabling device due to"
1678
/* SOP or device reset required */
1679
/* This event is not currently used */
1682
case VXGE_HW_EVENT_SERR:
1683
netif_tx_stop_all_queues(vdev->ndev);
1684
vxge_debug_init(VXGE_ERR,
1685
"fatal: %s: Disabling device due to"
1690
case VXGE_HW_EVENT_SRPCIM_SERR:
1691
case VXGE_HW_EVENT_MRPCIM_SERR:
1694
case VXGE_HW_EVENT_SLOT_FREEZE:
1695
netif_tx_stop_all_queues(vdev->ndev);
1696
vxge_debug_init(VXGE_ERR,
1697
"fatal: %s: Disabling device due to"
1708
if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1709
netif_tx_stop_all_queues(vdev->ndev);
1711
if (event == VXGE_LL_FULL_RESET) {
1712
status = vxge_reset_all_vpaths(vdev);
1713
if (status != VXGE_HW_OK) {
1714
vxge_debug_init(VXGE_ERR,
1715
"fatal: %s: can not reset vpaths",
1722
if (event == VXGE_LL_COMPL_RESET) {
1723
for (i = 0; i < vdev->no_of_vpath; i++)
1724
if (vdev->vpaths[i].handle) {
1725
if (vxge_hw_vpath_recover_from_reset(
1726
vdev->vpaths[i].handle)
1728
vxge_debug_init(VXGE_ERR,
1729
"vxge_hw_vpath_recover_"
1730
"from_reset failed for vpath: "
1736
vxge_debug_init(VXGE_ERR,
1737
"vxge_hw_vpath_reset failed for "
1744
if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1745
/* Reprogram the DA table with populated mac addresses */
1746
for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1747
vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1748
vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1751
/* enable vpath interrupts */
1752
for (i = 0; i < vdev->no_of_vpath; i++)
1753
vxge_vpath_intr_enable(vdev, i);
1755
vxge_hw_device_intr_enable(vdev->devh);
1759
/* Indicate card up */
1760
set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1762
/* Get the traffic to flow through the vpaths */
1763
for (i = 0; i < vdev->no_of_vpath; i++) {
1764
vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1766
vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1769
netif_tx_wake_all_queues(vdev->ndev);
1773
vxge_config_ci_for_tti_rti(vdev);
1776
vxge_debug_entryexit(VXGE_TRACE,
1777
"%s:%d Exiting...", __func__, __LINE__);
1779
/* Indicate reset done */
1780
if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1781
clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1787
* @vdev: pointer to ll device
1789
* driver may reset the chip on events of serr, eccerr, etc
1791
static void vxge_reset(struct work_struct *work)
1793
struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1795
if (!netif_running(vdev->ndev))
1798
do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1802
* vxge_poll - Receive handler when Receive Polling is used.
1803
* @dev: pointer to the device structure.
1804
* @budget: Number of packets budgeted to be processed in this iteration.
1806
* This function comes into picture only if Receive side is being handled
1807
* through polling (called NAPI in linux). It mostly does what the normal
1808
* Rx interrupt handler does in terms of descriptor and packet processing
1809
* but not in an interrupt context. Also it will process a specified number
1810
* of packets at most in one iteration. This value is passed down by the
1811
* kernel as the function argument 'budget'.
1813
static int vxge_poll_msix(struct napi_struct *napi, int budget)
1815
struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1817
int budget_org = budget;
1819
ring->budget = budget;
1820
ring->pkts_processed = 0;
1821
vxge_hw_vpath_poll_rx(ring->handle);
1822
pkts_processed = ring->pkts_processed;
1824
if (ring->pkts_processed < budget_org) {
1825
napi_complete(napi);
1827
/* Re enable the Rx interrupts for the vpath */
1828
vxge_hw_channel_msix_unmask(
1829
(struct __vxge_hw_channel *)ring->handle,
1830
ring->rx_vector_no);
1834
/* We are copying and returning the local variable, in case if after
1835
* clearing the msix interrupt above, if the interrupt fires right
1836
* away which can preempt this NAPI thread */
1837
return pkts_processed;
1840
static int vxge_poll_inta(struct napi_struct *napi, int budget)
1842
struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1843
int pkts_processed = 0;
1845
int budget_org = budget;
1846
struct vxge_ring *ring;
1848
struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
1850
for (i = 0; i < vdev->no_of_vpath; i++) {
1851
ring = &vdev->vpaths[i].ring;
1852
ring->budget = budget;
1853
ring->pkts_processed = 0;
1854
vxge_hw_vpath_poll_rx(ring->handle);
1855
pkts_processed += ring->pkts_processed;
1856
budget -= ring->pkts_processed;
1861
VXGE_COMPLETE_ALL_TX(vdev);
1863
if (pkts_processed < budget_org) {
1864
napi_complete(napi);
1865
/* Re enable the Rx interrupts for the ring */
1866
vxge_hw_device_unmask_all(hldev);
1867
vxge_hw_device_flush_io(hldev);
1870
return pkts_processed;
1873
#ifdef CONFIG_NET_POLL_CONTROLLER
1875
* vxge_netpoll - netpoll event handler entry point
1876
* @dev : pointer to the device structure.
1878
* This function will be called by upper layer to check for events on the
1879
* interface in situations where interrupts are disabled. It is used for
1880
* specific in-kernel networking tasks, such as remote consoles and kernel
1881
* debugging over the network (example netdump in RedHat).
1883
static void vxge_netpoll(struct net_device *dev)
1885
struct __vxge_hw_device *hldev;
1886
struct vxgedev *vdev;
1888
vdev = netdev_priv(dev);
1889
hldev = pci_get_drvdata(vdev->pdev);
1891
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1893
if (pci_channel_offline(vdev->pdev))
1896
disable_irq(dev->irq);
1897
vxge_hw_device_clear_tx_rx(hldev);
1899
vxge_hw_device_clear_tx_rx(hldev);
1900
VXGE_COMPLETE_ALL_RX(vdev);
1901
VXGE_COMPLETE_ALL_TX(vdev);
1903
enable_irq(dev->irq);
1905
vxge_debug_entryexit(VXGE_TRACE,
1906
"%s:%d Exiting...", __func__, __LINE__);
1910
/* RTH configuration */
1911
static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1913
enum vxge_hw_status status = VXGE_HW_OK;
1914
struct vxge_hw_rth_hash_types hash_types;
1915
u8 itable[256] = {0}; /* indirection table */
1916
u8 mtable[256] = {0}; /* CPU to vpath mapping */
1921
* - itable with bucket numbers
1922
* - mtable with bucket-to-vpath mapping
1924
for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1925
itable[index] = index;
1926
mtable[index] = index % vdev->no_of_vpath;
1929
/* set indirection table, bucket-to-vpath mapping */
1930
status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1933
vdev->config.rth_bkt_sz);
1934
if (status != VXGE_HW_OK) {
1935
vxge_debug_init(VXGE_ERR,
1936
"RTH indirection table configuration failed "
1937
"for vpath:%d", vdev->vpaths[0].device_id);
1941
/* Fill RTH hash types */
1942
hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1943
hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1944
hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1945
hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1946
hash_types.hash_type_tcpipv6ex_en =
1947
vdev->config.rth_hash_type_tcpipv6ex;
1948
hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1951
* Because the itable_set() method uses the active_table field
1952
* for the target virtual path the RTH config should be updated
1953
* for all VPATHs. The h/w only uses the lowest numbered VPATH
1954
* when steering frames.
1956
for (index = 0; index < vdev->no_of_vpath; index++) {
1957
status = vxge_hw_vpath_rts_rth_set(
1958
vdev->vpaths[index].handle,
1959
vdev->config.rth_algorithm,
1961
vdev->config.rth_bkt_sz);
1962
if (status != VXGE_HW_OK) {
1963
vxge_debug_init(VXGE_ERR,
1964
"RTH configuration failed for vpath:%d",
1965
vdev->vpaths[index].device_id);
1974
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1976
enum vxge_hw_status status = VXGE_HW_OK;
1977
struct vxge_vpath *vpath;
1980
for (i = 0; i < vdev->no_of_vpath; i++) {
1981
vpath = &vdev->vpaths[i];
1982
if (vpath->handle) {
1983
if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1984
if (is_vxge_card_up(vdev) &&
1985
vxge_hw_vpath_recover_from_reset(
1986
vpath->handle) != VXGE_HW_OK) {
1987
vxge_debug_init(VXGE_ERR,
1988
"vxge_hw_vpath_recover_"
1989
"from_reset failed for vpath: "
1994
vxge_debug_init(VXGE_ERR,
1995
"vxge_hw_vpath_reset failed for "
2006
static void vxge_close_vpaths(struct vxgedev *vdev, int index)
2008
struct vxge_vpath *vpath;
2011
for (i = index; i < vdev->no_of_vpath; i++) {
2012
vpath = &vdev->vpaths[i];
2014
if (vpath->handle && vpath->is_open) {
2015
vxge_hw_vpath_close(vpath->handle);
2016
vdev->stats.vpaths_open--;
2019
vpath->handle = NULL;
2024
static int vxge_open_vpaths(struct vxgedev *vdev)
2026
struct vxge_hw_vpath_attr attr;
2027
enum vxge_hw_status status;
2028
struct vxge_vpath *vpath;
2032
for (i = 0; i < vdev->no_of_vpath; i++) {
2033
vpath = &vdev->vpaths[i];
2034
vxge_assert(vpath->is_configured);
2036
if (!vdev->titan1) {
2037
struct vxge_hw_vp_config *vcfg;
2038
vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2040
vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2041
vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2042
vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2043
vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2044
vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2045
vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2046
vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2047
vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2048
vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2051
attr.vp_id = vpath->device_id;
2052
attr.fifo_attr.callback = vxge_xmit_compl;
2053
attr.fifo_attr.txdl_term = vxge_tx_term;
2054
attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2055
attr.fifo_attr.userdata = &vpath->fifo;
2057
attr.ring_attr.callback = vxge_rx_1b_compl;
2058
attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2059
attr.ring_attr.rxd_term = vxge_rx_term;
2060
attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2061
attr.ring_attr.userdata = &vpath->ring;
2063
vpath->ring.ndev = vdev->ndev;
2064
vpath->ring.pdev = vdev->pdev;
2066
status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
2067
if (status == VXGE_HW_OK) {
2068
vpath->fifo.handle =
2069
(struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2070
vpath->ring.handle =
2071
(struct __vxge_hw_ring *)attr.ring_attr.userdata;
2072
vpath->fifo.tx_steering_type =
2073
vdev->config.tx_steering_type;
2074
vpath->fifo.ndev = vdev->ndev;
2075
vpath->fifo.pdev = vdev->pdev;
2076
if (vdev->config.tx_steering_type)
2078
netdev_get_tx_queue(vdev->ndev, i);
2081
netdev_get_tx_queue(vdev->ndev, 0);
2082
vpath->fifo.indicate_max_pkts =
2083
vdev->config.fifo_indicate_max_pkts;
2084
vpath->fifo.tx_vector_no = 0;
2085
vpath->ring.rx_vector_no = 0;
2086
vpath->ring.rx_hwts = vdev->rx_hwts;
2088
vdev->vp_handles[i] = vpath->handle;
2089
vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2090
vdev->stats.vpaths_open++;
2092
vdev->stats.vpath_open_fail++;
2093
vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
2094
"open with status: %d",
2095
vdev->ndev->name, vpath->device_id,
2097
vxge_close_vpaths(vdev, 0);
2101
vp_id = vpath->handle->vpath->vp_id;
2102
vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2109
* adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2110
* if the interrupts are not within a range
2111
* @fifo: pointer to transmit fifo structure
2112
* Description: The function changes boundary timer and restriction timer
2113
* value depends on the traffic
2114
* Return Value: None
2116
static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2118
fifo->interrupt_count++;
2119
if (jiffies > fifo->jiffies + HZ / 100) {
2120
struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2122
fifo->jiffies = jiffies;
2123
if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2124
hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2125
hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2126
vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2127
} else if (hw_fifo->rtimer != 0) {
2128
hw_fifo->rtimer = 0;
2129
vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2131
fifo->interrupt_count = 0;
2136
* adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2137
* if the interrupts are not within a range
2138
* @ring: pointer to receive ring structure
2139
* Description: The function increases of decreases the packet counts within
2140
* the ranges of traffic utilization, if the interrupts due to this ring are
2141
* not within a fixed range.
2142
* Return Value: Nothing
2144
static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2146
ring->interrupt_count++;
2147
if (jiffies > ring->jiffies + HZ / 100) {
2148
struct __vxge_hw_ring *hw_ring = ring->handle;
2150
ring->jiffies = jiffies;
2151
if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2152
hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2153
hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2154
vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2155
} else if (hw_ring->rtimer != 0) {
2156
hw_ring->rtimer = 0;
2157
vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2159
ring->interrupt_count = 0;
2165
* @irq: the irq of the device.
2166
* @dev_id: a void pointer to the hldev structure of the Titan device
2167
* @ptregs: pointer to the registers pushed on the stack.
2169
* This function is the ISR handler of the device when napi is enabled. It
2170
* identifies the reason for the interrupt and calls the relevant service
2173
static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2175
struct net_device *dev;
2176
struct __vxge_hw_device *hldev;
2178
enum vxge_hw_status status;
2179
struct vxgedev *vdev = (struct vxgedev *)dev_id;
2181
vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2184
hldev = pci_get_drvdata(vdev->pdev);
2186
if (pci_channel_offline(vdev->pdev))
2189
if (unlikely(!is_vxge_card_up(vdev)))
2192
status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
2193
if (status == VXGE_HW_OK) {
2194
vxge_hw_device_mask_all(hldev);
2197
VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2198
vdev->vpaths_deployed >>
2199
(64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2201
vxge_hw_device_clear_tx_rx(hldev);
2202
napi_schedule(&vdev->napi);
2203
vxge_debug_intr(VXGE_TRACE,
2204
"%s:%d Exiting...", __func__, __LINE__);
2207
vxge_hw_device_unmask_all(hldev);
2208
} else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2209
(status == VXGE_HW_ERR_CRITICAL) ||
2210
(status == VXGE_HW_ERR_FIFO))) {
2211
vxge_hw_device_mask_all(hldev);
2212
vxge_hw_device_flush_io(hldev);
2214
} else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2217
vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2221
#ifdef CONFIG_PCI_MSI
2223
static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2225
struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2227
adaptive_coalesce_tx_interrupts(fifo);
2229
vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2230
fifo->tx_vector_no);
2232
vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2233
fifo->tx_vector_no);
2235
VXGE_COMPLETE_VPATH_TX(fifo);
2237
vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2238
fifo->tx_vector_no);
2245
static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2247
struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2249
adaptive_coalesce_rx_interrupts(ring);
2251
vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2252
ring->rx_vector_no);
2254
vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2255
ring->rx_vector_no);
2257
napi_schedule(&ring->napi);
2262
vxge_alarm_msix_handle(int irq, void *dev_id)
2265
enum vxge_hw_status status;
2266
struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2267
struct vxgedev *vdev = vpath->vdev;
2268
int msix_id = (vpath->handle->vpath->vp_id *
2269
VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2271
for (i = 0; i < vdev->no_of_vpath; i++) {
2272
/* Reduce the chance of losing alarm interrupts by masking
2273
* the vector. A pending bit will be set if an alarm is
2274
* generated and on unmask the interrupt will be fired.
2276
vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2277
vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2280
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2282
if (status == VXGE_HW_OK) {
2283
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2288
vxge_debug_intr(VXGE_ERR,
2289
"%s: vxge_hw_vpath_alarm_process failed %x ",
2290
VXGE_DRIVER_NAME, status);
2295
static int vxge_alloc_msix(struct vxgedev *vdev)
2298
int msix_intr_vect = 0, temp;
2302
/* Tx/Rx MSIX Vectors count */
2303
vdev->intr_cnt = vdev->no_of_vpath * 2;
2305
/* Alarm MSIX Vectors count */
2308
vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2310
if (!vdev->entries) {
2311
vxge_debug_init(VXGE_ERR,
2312
"%s: memory allocation failed",
2315
goto alloc_entries_failed;
2318
vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2319
sizeof(struct vxge_msix_entry),
2321
if (!vdev->vxge_entries) {
2322
vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2325
goto alloc_vxge_entries_failed;
2328
for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
2330
msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2332
/* Initialize the fifo vector */
2333
vdev->entries[j].entry = msix_intr_vect;
2334
vdev->vxge_entries[j].entry = msix_intr_vect;
2335
vdev->vxge_entries[j].in_use = 0;
2338
/* Initialize the ring vector */
2339
vdev->entries[j].entry = msix_intr_vect + 1;
2340
vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2341
vdev->vxge_entries[j].in_use = 0;
2345
/* Initialize the alarm vector */
2346
vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2347
vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2348
vdev->vxge_entries[j].in_use = 0;
2350
ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
2352
vxge_debug_init(VXGE_ERR,
2353
"%s: MSI-X enable failed for %d vectors, ret: %d",
2354
VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2355
if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
2357
goto enable_msix_failed;
2360
kfree(vdev->entries);
2361
kfree(vdev->vxge_entries);
2362
vdev->entries = NULL;
2363
vdev->vxge_entries = NULL;
2364
/* Try with less no of vector by reducing no of vpaths count */
2366
vxge_close_vpaths(vdev, temp);
2367
vdev->no_of_vpath = temp;
2369
} else if (ret < 0) {
2371
goto enable_msix_failed;
2376
kfree(vdev->vxge_entries);
2377
alloc_vxge_entries_failed:
2378
kfree(vdev->entries);
2379
alloc_entries_failed:
2383
static int vxge_enable_msix(struct vxgedev *vdev)
2387
/* 0 - Tx, 1 - Rx */
2388
int tim_msix_id[4] = {0, 1, 0, 0};
2392
/* allocate msix vectors */
2393
ret = vxge_alloc_msix(vdev);
2395
for (i = 0; i < vdev->no_of_vpath; i++) {
2396
struct vxge_vpath *vpath = &vdev->vpaths[i];
2398
/* If fifo or ring are not enabled, the MSIX vector for
2399
* it should be set to 0.
2401
vpath->ring.rx_vector_no = (vpath->device_id *
2402
VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2404
vpath->fifo.tx_vector_no = (vpath->device_id *
2405
VXGE_HW_VPATH_MSIX_ACTIVE);
2407
vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2408
VXGE_ALARM_MSIX_ID);
2415
static void vxge_rem_msix_isr(struct vxgedev *vdev)
2419
for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
2421
if (vdev->vxge_entries[intr_cnt].in_use) {
2422
synchronize_irq(vdev->entries[intr_cnt].vector);
2423
free_irq(vdev->entries[intr_cnt].vector,
2424
vdev->vxge_entries[intr_cnt].arg);
2425
vdev->vxge_entries[intr_cnt].in_use = 0;
2429
kfree(vdev->entries);
2430
kfree(vdev->vxge_entries);
2431
vdev->entries = NULL;
2432
vdev->vxge_entries = NULL;
2434
if (vdev->config.intr_type == MSI_X)
2435
pci_disable_msix(vdev->pdev);
2439
static void vxge_rem_isr(struct vxgedev *vdev)
2441
struct __vxge_hw_device *hldev;
2442
hldev = pci_get_drvdata(vdev->pdev);
2444
#ifdef CONFIG_PCI_MSI
2445
if (vdev->config.intr_type == MSI_X) {
2446
vxge_rem_msix_isr(vdev);
2449
if (vdev->config.intr_type == INTA) {
2450
synchronize_irq(vdev->pdev->irq);
2451
free_irq(vdev->pdev->irq, vdev);
2455
static int vxge_add_isr(struct vxgedev *vdev)
2458
#ifdef CONFIG_PCI_MSI
2459
int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2460
int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2462
if (vdev->config.intr_type == MSI_X)
2463
ret = vxge_enable_msix(vdev);
2466
vxge_debug_init(VXGE_ERR,
2467
"%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2468
vxge_debug_init(VXGE_ERR,
2469
"%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2470
vdev->config.intr_type = INTA;
2473
if (vdev->config.intr_type == MSI_X) {
2475
intr_idx < (vdev->no_of_vpath *
2476
VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2478
msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2483
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2484
"%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2486
vdev->entries[intr_cnt].entry,
2489
vdev->entries[intr_cnt].vector,
2490
vxge_tx_msix_handle, 0,
2491
vdev->desc[intr_cnt],
2492
&vdev->vpaths[vp_idx].fifo);
2493
vdev->vxge_entries[intr_cnt].arg =
2494
&vdev->vpaths[vp_idx].fifo;
2498
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2499
"%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2501
vdev->entries[intr_cnt].entry,
2504
vdev->entries[intr_cnt].vector,
2505
vxge_rx_msix_napi_handle,
2507
vdev->desc[intr_cnt],
2508
&vdev->vpaths[vp_idx].ring);
2509
vdev->vxge_entries[intr_cnt].arg =
2510
&vdev->vpaths[vp_idx].ring;
2516
vxge_debug_init(VXGE_ERR,
2517
"%s: MSIX - %d Registration failed",
2518
vdev->ndev->name, intr_cnt);
2519
vxge_rem_msix_isr(vdev);
2520
vdev->config.intr_type = INTA;
2521
vxge_debug_init(VXGE_ERR,
2522
"%s: Defaulting to INTA"
2523
, vdev->ndev->name);
2528
/* We requested for this msix interrupt */
2529
vdev->vxge_entries[intr_cnt].in_use = 1;
2530
msix_idx += vdev->vpaths[vp_idx].device_id *
2531
VXGE_HW_VPATH_MSIX_ACTIVE;
2532
vxge_hw_vpath_msix_unmask(
2533
vdev->vpaths[vp_idx].handle,
2538
/* Point to next vpath handler */
2539
if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2540
(vp_idx < (vdev->no_of_vpath - 1)))
2544
intr_cnt = vdev->no_of_vpath * 2;
2545
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2546
"%s:vxge:MSI-X %d - Alarm - fn:%d",
2548
vdev->entries[intr_cnt].entry,
2550
/* For Alarm interrupts */
2551
ret = request_irq(vdev->entries[intr_cnt].vector,
2552
vxge_alarm_msix_handle, 0,
2553
vdev->desc[intr_cnt],
2556
vxge_debug_init(VXGE_ERR,
2557
"%s: MSIX - %d Registration failed",
2558
vdev->ndev->name, intr_cnt);
2559
vxge_rem_msix_isr(vdev);
2560
vdev->config.intr_type = INTA;
2561
vxge_debug_init(VXGE_ERR,
2562
"%s: Defaulting to INTA",
2567
msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2568
VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2569
vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2571
vdev->vxge_entries[intr_cnt].in_use = 1;
2572
vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2577
if (vdev->config.intr_type == INTA) {
2578
snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2579
"%s:vxge:INTA", vdev->ndev->name);
2580
vxge_hw_device_set_intr_type(vdev->devh,
2581
VXGE_HW_INTR_MODE_IRQLINE);
2583
vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2585
ret = request_irq((int) vdev->pdev->irq,
2587
IRQF_SHARED, vdev->desc[0], vdev);
2589
vxge_debug_init(VXGE_ERR,
2590
"%s %s-%d: ISR registration failed",
2591
VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2594
vxge_debug_init(VXGE_TRACE,
2595
"new %s-%d line allocated",
2596
"IRQ", vdev->pdev->irq);
2602
static void vxge_poll_vp_reset(unsigned long data)
2604
struct vxgedev *vdev = (struct vxgedev *)data;
2607
for (i = 0; i < vdev->no_of_vpath; i++) {
2608
if (test_bit(i, &vdev->vp_reset)) {
2609
vxge_reset_vpath(vdev, i);
2613
if (j && (vdev->config.intr_type != MSI_X)) {
2614
vxge_hw_device_unmask_all(vdev->devh);
2615
vxge_hw_device_flush_io(vdev->devh);
2618
mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2621
static void vxge_poll_vp_lockup(unsigned long data)
2623
struct vxgedev *vdev = (struct vxgedev *)data;
2624
enum vxge_hw_status status = VXGE_HW_OK;
2625
struct vxge_vpath *vpath;
2626
struct vxge_ring *ring;
2628
unsigned long rx_frms;
2630
for (i = 0; i < vdev->no_of_vpath; i++) {
2631
ring = &vdev->vpaths[i].ring;
2633
/* Truncated to machine word size number of frames */
2634
rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
2636
/* Did this vpath received any packets */
2637
if (ring->stats.prev_rx_frms == rx_frms) {
2638
status = vxge_hw_vpath_check_leak(ring->handle);
2640
/* Did it received any packets last time */
2641
if ((VXGE_HW_FAIL == status) &&
2642
(VXGE_HW_FAIL == ring->last_status)) {
2644
/* schedule vpath reset */
2645
if (!test_and_set_bit(i, &vdev->vp_reset)) {
2646
vpath = &vdev->vpaths[i];
2648
/* disable interrupts for this vpath */
2649
vxge_vpath_intr_disable(vdev, i);
2651
/* stop the queue for this vpath */
2652
netif_tx_stop_queue(vpath->fifo.txq);
2657
ring->stats.prev_rx_frms = rx_frms;
2658
ring->last_status = status;
2661
/* Check every 1 milli second */
2662
mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2665
static u32 vxge_fix_features(struct net_device *dev, u32 features)
2667
u32 changed = dev->features ^ features;
2669
/* Enabling RTH requires some of the logic in vxge_device_register and a
2670
* vpath reset. Due to these restrictions, only allow modification
2671
* while the interface is down.
2673
if ((changed & NETIF_F_RXHASH) && netif_running(dev))
2674
features ^= NETIF_F_RXHASH;
2679
static int vxge_set_features(struct net_device *dev, u32 features)
2681
struct vxgedev *vdev = netdev_priv(dev);
2682
u32 changed = dev->features ^ features;
2684
if (!(changed & NETIF_F_RXHASH))
2687
/* !netif_running() ensured by vxge_fix_features() */
2689
vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2690
if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
2691
dev->features = features ^ NETIF_F_RXHASH;
2692
vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
2701
* @dev: pointer to the device structure.
2703
* This function is the open entry point of the driver. It mainly calls a
2704
* function to allocate Rx buffers and inserts them into the buffer
2705
* descriptors and then enables the Rx part of the NIC.
2706
* Return value: '0' on success and an appropriate (-)ve integer as
2707
* defined in errno.h file on failure.
2709
static int vxge_open(struct net_device *dev)
2711
enum vxge_hw_status status;
2712
struct vxgedev *vdev;
2713
struct __vxge_hw_device *hldev;
2714
struct vxge_vpath *vpath;
2717
u64 val64, function_mode;
2719
vxge_debug_entryexit(VXGE_TRACE,
2720
"%s: %s:%d", dev->name, __func__, __LINE__);
2722
vdev = netdev_priv(dev);
2723
hldev = pci_get_drvdata(vdev->pdev);
2724
function_mode = vdev->config.device_hw_info.function_mode;
2726
/* make sure you have link off by default every time Nic is
2728
netif_carrier_off(dev);
2731
status = vxge_open_vpaths(vdev);
2732
if (status != VXGE_HW_OK) {
2733
vxge_debug_init(VXGE_ERR,
2734
"%s: fatal: Vpath open failed", vdev->ndev->name);
2739
vdev->mtu = dev->mtu;
2741
status = vxge_add_isr(vdev);
2742
if (status != VXGE_HW_OK) {
2743
vxge_debug_init(VXGE_ERR,
2744
"%s: fatal: ISR add failed", dev->name);
2749
if (vdev->config.intr_type != MSI_X) {
2750
netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2751
vdev->config.napi_weight);
2752
napi_enable(&vdev->napi);
2753
for (i = 0; i < vdev->no_of_vpath; i++) {
2754
vpath = &vdev->vpaths[i];
2755
vpath->ring.napi_p = &vdev->napi;
2758
for (i = 0; i < vdev->no_of_vpath; i++) {
2759
vpath = &vdev->vpaths[i];
2760
netif_napi_add(dev, &vpath->ring.napi,
2761
vxge_poll_msix, vdev->config.napi_weight);
2762
napi_enable(&vpath->ring.napi);
2763
vpath->ring.napi_p = &vpath->ring.napi;
2768
if (vdev->config.rth_steering) {
2769
status = vxge_rth_configure(vdev);
2770
if (status != VXGE_HW_OK) {
2771
vxge_debug_init(VXGE_ERR,
2772
"%s: fatal: RTH configuration failed",
2778
printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2779
hldev->config.rth_en ? "enabled" : "disabled");
2781
for (i = 0; i < vdev->no_of_vpath; i++) {
2782
vpath = &vdev->vpaths[i];
2784
/* set initial mtu before enabling the device */
2785
status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
2786
if (status != VXGE_HW_OK) {
2787
vxge_debug_init(VXGE_ERR,
2788
"%s: fatal: can not set new MTU", dev->name);
2794
VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2795
vxge_debug_init(vdev->level_trace,
2796
"%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2797
VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2799
/* Restore the DA, VID table and also multicast and promiscuous mode
2802
if (vdev->all_multi_flg) {
2803
for (i = 0; i < vdev->no_of_vpath; i++) {
2804
vpath = &vdev->vpaths[i];
2805
vxge_restore_vpath_mac_addr(vpath);
2806
vxge_restore_vpath_vid_table(vpath);
2808
status = vxge_hw_vpath_mcast_enable(vpath->handle);
2809
if (status != VXGE_HW_OK)
2810
vxge_debug_init(VXGE_ERR,
2811
"%s:%d Enabling multicast failed",
2812
__func__, __LINE__);
2816
/* Enable vpath to sniff all unicast/multicast traffic that not
2817
* addressed to them. We allow promiscuous mode for PF only
2821
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2822
val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2824
vxge_hw_mgmt_reg_write(vdev->devh,
2825
vxge_hw_mgmt_reg_type_mrpcim,
2827
(ulong)offsetof(struct vxge_hw_mrpcim_reg,
2828
rxmac_authorize_all_addr),
2831
vxge_hw_mgmt_reg_write(vdev->devh,
2832
vxge_hw_mgmt_reg_type_mrpcim,
2834
(ulong)offsetof(struct vxge_hw_mrpcim_reg,
2835
rxmac_authorize_all_vid),
2838
vxge_set_multicast(dev);
2840
/* Enabling Bcast and mcast for all vpath */
2841
for (i = 0; i < vdev->no_of_vpath; i++) {
2842
vpath = &vdev->vpaths[i];
2843
status = vxge_hw_vpath_bcast_enable(vpath->handle);
2844
if (status != VXGE_HW_OK)
2845
vxge_debug_init(VXGE_ERR,
2846
"%s : Can not enable bcast for vpath "
2847
"id %d", dev->name, i);
2848
if (vdev->config.addr_learn_en) {
2849
status = vxge_hw_vpath_mcast_enable(vpath->handle);
2850
if (status != VXGE_HW_OK)
2851
vxge_debug_init(VXGE_ERR,
2852
"%s : Can not enable mcast for vpath "
2853
"id %d", dev->name, i);
2857
vxge_hw_device_setpause_data(vdev->devh, 0,
2858
vdev->config.tx_pause_enable,
2859
vdev->config.rx_pause_enable);
2861
if (vdev->vp_reset_timer.function == NULL)
2862
vxge_os_timer(vdev->vp_reset_timer,
2863
vxge_poll_vp_reset, vdev, (HZ/2));
2865
/* There is no need to check for RxD leak and RxD lookup on Titan1A */
2866
if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2867
vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2870
set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2874
if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2875
netif_carrier_on(vdev->ndev);
2876
netdev_notice(vdev->ndev, "Link Up\n");
2877
vdev->stats.link_up++;
2880
vxge_hw_device_intr_enable(vdev->devh);
2884
for (i = 0; i < vdev->no_of_vpath; i++) {
2885
vpath = &vdev->vpaths[i];
2887
vxge_hw_vpath_enable(vpath->handle);
2889
vxge_hw_vpath_rx_doorbell_init(vpath->handle);
2892
netif_tx_start_all_queues(vdev->ndev);
2895
vxge_config_ci_for_tti_rti(vdev);
2903
if (vdev->config.intr_type != MSI_X)
2904
napi_disable(&vdev->napi);
2906
for (i = 0; i < vdev->no_of_vpath; i++)
2907
napi_disable(&vdev->vpaths[i].ring.napi);
2911
vxge_close_vpaths(vdev, 0);
2913
vxge_debug_entryexit(VXGE_TRACE,
2914
"%s: %s:%d Exiting...",
2915
dev->name, __func__, __LINE__);
2919
/* Loop through the mac address list and delete all the entries */
2920
static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2923
struct list_head *entry, *next;
2924
if (list_empty(&vpath->mac_addr_list))
2927
list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2929
kfree((struct vxge_mac_addrs *)entry);
2933
static void vxge_napi_del_all(struct vxgedev *vdev)
2936
if (vdev->config.intr_type != MSI_X)
2937
netif_napi_del(&vdev->napi);
2939
for (i = 0; i < vdev->no_of_vpath; i++)
2940
netif_napi_del(&vdev->vpaths[i].ring.napi);
2944
static int do_vxge_close(struct net_device *dev, int do_io)
2946
enum vxge_hw_status status;
2947
struct vxgedev *vdev;
2948
struct __vxge_hw_device *hldev;
2950
u64 val64, vpath_vector;
2951
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2952
dev->name, __func__, __LINE__);
2954
vdev = netdev_priv(dev);
2955
hldev = pci_get_drvdata(vdev->pdev);
2957
if (unlikely(!is_vxge_card_up(vdev)))
2960
/* If vxge_handle_crit_err task is executing,
2961
* wait till it completes. */
2962
while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2966
/* Put the vpath back in normal mode */
2967
vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2968
status = vxge_hw_mgmt_reg_read(vdev->devh,
2969
vxge_hw_mgmt_reg_type_mrpcim,
2972
struct vxge_hw_mrpcim_reg,
2973
rts_mgr_cbasin_cfg),
2975
if (status == VXGE_HW_OK) {
2976
val64 &= ~vpath_vector;
2977
status = vxge_hw_mgmt_reg_write(vdev->devh,
2978
vxge_hw_mgmt_reg_type_mrpcim,
2981
struct vxge_hw_mrpcim_reg,
2982
rts_mgr_cbasin_cfg),
2986
/* Remove the function 0 from promiscuous mode */
2987
vxge_hw_mgmt_reg_write(vdev->devh,
2988
vxge_hw_mgmt_reg_type_mrpcim,
2990
(ulong)offsetof(struct vxge_hw_mrpcim_reg,
2991
rxmac_authorize_all_addr),
2994
vxge_hw_mgmt_reg_write(vdev->devh,
2995
vxge_hw_mgmt_reg_type_mrpcim,
2997
(ulong)offsetof(struct vxge_hw_mrpcim_reg,
2998
rxmac_authorize_all_vid),
3005
del_timer_sync(&vdev->vp_lockup_timer);
3007
del_timer_sync(&vdev->vp_reset_timer);
3010
vxge_hw_device_wait_receive_idle(hldev);
3012
clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3015
if (vdev->config.intr_type != MSI_X)
3016
napi_disable(&vdev->napi);
3018
for (i = 0; i < vdev->no_of_vpath; i++)
3019
napi_disable(&vdev->vpaths[i].ring.napi);
3022
netif_carrier_off(vdev->ndev);
3023
netdev_notice(vdev->ndev, "Link Down\n");
3024
netif_tx_stop_all_queues(vdev->ndev);
3026
/* Note that at this point xmit() is stopped by upper layer */
3028
vxge_hw_device_intr_disable(vdev->devh);
3032
vxge_napi_del_all(vdev);
3035
vxge_reset_all_vpaths(vdev);
3037
vxge_close_vpaths(vdev, 0);
3039
vxge_debug_entryexit(VXGE_TRACE,
3040
"%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
3042
clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
3049
* @dev: device pointer.
3051
* This is the stop entry point of the driver. It needs to undo exactly
3052
* whatever was done by the open entry point, thus it's usually referred to
3053
* as the close function.Among other things this function mainly stops the
3054
* Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3055
* Return value: '0' on success and an appropriate (-)ve integer as
3056
* defined in errno.h file on failure.
3058
static int vxge_close(struct net_device *dev)
3060
do_vxge_close(dev, 1);
3066
* @dev: net device pointer.
3067
* @new_mtu :the new MTU size for the device.
3069
* A driver entry point to change MTU size for the device. Before changing
3070
* the MTU the device must be stopped.
3072
static int vxge_change_mtu(struct net_device *dev, int new_mtu)
3074
struct vxgedev *vdev = netdev_priv(dev);
3076
vxge_debug_entryexit(vdev->level_trace,
3077
"%s:%d", __func__, __LINE__);
3078
if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
3079
vxge_debug_init(vdev->level_err,
3080
"%s: mtu size is invalid", dev->name);
3084
/* check if device is down already */
3085
if (unlikely(!is_vxge_card_up(vdev))) {
3086
/* just store new value, will use later on open() */
3088
vxge_debug_init(vdev->level_err,
3089
"%s", "device is down on MTU change");
3093
vxge_debug_init(vdev->level_trace,
3094
"trying to apply new MTU %d", new_mtu);
3096
if (vxge_close(dev))
3100
vdev->mtu = new_mtu;
3105
vxge_debug_init(vdev->level_trace,
3106
"%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3108
vxge_debug_entryexit(vdev->level_trace,
3109
"%s:%d Exiting...", __func__, __LINE__);
3116
* @dev: pointer to the device structure
3117
* @stats: pointer to struct rtnl_link_stats64
3120
static struct rtnl_link_stats64 *
3121
vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
3123
struct vxgedev *vdev = netdev_priv(dev);
3126
/* net_stats already zeroed by caller */
3127
for (k = 0; k < vdev->no_of_vpath; k++) {
3128
struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
3129
struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
3131
u64 packets, bytes, multicast;
3134
start = u64_stats_fetch_begin(&rxstats->syncp);
3136
packets = rxstats->rx_frms;
3137
multicast = rxstats->rx_mcast;
3138
bytes = rxstats->rx_bytes;
3139
} while (u64_stats_fetch_retry(&rxstats->syncp, start));
3141
net_stats->rx_packets += packets;
3142
net_stats->rx_bytes += bytes;
3143
net_stats->multicast += multicast;
3145
net_stats->rx_errors += rxstats->rx_errors;
3146
net_stats->rx_dropped += rxstats->rx_dropped;
3149
start = u64_stats_fetch_begin(&txstats->syncp);
3151
packets = txstats->tx_frms;
3152
bytes = txstats->tx_bytes;
3153
} while (u64_stats_fetch_retry(&txstats->syncp, start));
3155
net_stats->tx_packets += packets;
3156
net_stats->tx_bytes += bytes;
3157
net_stats->tx_errors += txstats->tx_errors;
3163
static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
3165
enum vxge_hw_status status;
3168
/* Timestamp is passed to the driver via the FCS, therefore we
3169
* must disable the FCS stripping by the adapter. Since this is
3170
* required for the driver to load (due to a hardware bug),
3171
* there is no need to do anything special here.
3173
val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3174
VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3175
VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3177
status = vxge_hw_mgmt_reg_write(devh,
3178
vxge_hw_mgmt_reg_type_mrpcim,
3180
offsetof(struct vxge_hw_mrpcim_reg,
3183
vxge_hw_device_flush_io(devh);
3184
devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
3188
static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3190
struct hwtstamp_config config;
3193
if (copy_from_user(&config, data, sizeof(config)))
3196
/* reserved for future extensions */
3200
/* Transmit HW Timestamp not supported */
3201
switch (config.tx_type) {
3202
case HWTSTAMP_TX_OFF:
3204
case HWTSTAMP_TX_ON:
3209
switch (config.rx_filter) {
3210
case HWTSTAMP_FILTER_NONE:
3212
config.rx_filter = HWTSTAMP_FILTER_NONE;
3215
case HWTSTAMP_FILTER_ALL:
3216
case HWTSTAMP_FILTER_SOME:
3217
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3218
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3219
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3220
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3221
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3222
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3223
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3224
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3225
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3226
case HWTSTAMP_FILTER_PTP_V2_EVENT:
3227
case HWTSTAMP_FILTER_PTP_V2_SYNC:
3228
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3229
if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
3233
config.rx_filter = HWTSTAMP_FILTER_ALL;
3240
for (i = 0; i < vdev->no_of_vpath; i++)
3241
vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3243
if (copy_to_user(data, &config, sizeof(config)))
3251
* @dev: Device pointer.
3252
* @ifr: An IOCTL specific structure, that can contain a pointer to
3253
* a proprietary structure used to pass information to the driver.
3254
* @cmd: This is used to distinguish between the different commands that
3255
* can be passed to the IOCTL functions.
3257
* Entry point for the Ioctl.
3259
static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3261
struct vxgedev *vdev = netdev_priv(dev);
3266
ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
3279
* @dev: pointer to net device structure
3281
* Watchdog for transmit side.
3282
* This function is triggered if the Tx Queue is stopped
3283
* for a pre-defined amount of time when the Interface is still up.
3285
static void vxge_tx_watchdog(struct net_device *dev)
3287
struct vxgedev *vdev;
3289
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3291
vdev = netdev_priv(dev);
3293
vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3295
schedule_work(&vdev->reset_task);
3296
vxge_debug_entryexit(VXGE_TRACE,
3297
"%s:%d Exiting...", __func__, __LINE__);
3301
* vxge_vlan_rx_add_vid
3302
* @dev: net device pointer.
3305
* Add the vlan id to the devices vlan id table
3308
vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3310
struct vxgedev *vdev = netdev_priv(dev);
3311
struct vxge_vpath *vpath;
3314
/* Add these vlan to the vid table */
3315
for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3316
vpath = &vdev->vpaths[vp_id];
3317
if (!vpath->is_open)
3319
vxge_hw_vpath_vid_add(vpath->handle, vid);
3321
set_bit(vid, vdev->active_vlans);
3325
* vxge_vlan_rx_add_vid
3326
* @dev: net device pointer.
3329
* Remove the vlan id from the device's vlan id table
3332
vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3334
struct vxgedev *vdev = netdev_priv(dev);
3335
struct vxge_vpath *vpath;
3338
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3340
/* Delete this vlan from the vid table */
3341
for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3342
vpath = &vdev->vpaths[vp_id];
3343
if (!vpath->is_open)
3345
vxge_hw_vpath_vid_delete(vpath->handle, vid);
3347
vxge_debug_entryexit(VXGE_TRACE,
3348
"%s:%d Exiting...", __func__, __LINE__);
3349
clear_bit(vid, vdev->active_vlans);
3352
static const struct net_device_ops vxge_netdev_ops = {
3353
.ndo_open = vxge_open,
3354
.ndo_stop = vxge_close,
3355
.ndo_get_stats64 = vxge_get_stats64,
3356
.ndo_start_xmit = vxge_xmit,
3357
.ndo_validate_addr = eth_validate_addr,
3358
.ndo_set_rx_mode = vxge_set_multicast,
3359
.ndo_do_ioctl = vxge_ioctl,
3360
.ndo_set_mac_address = vxge_set_mac_addr,
3361
.ndo_change_mtu = vxge_change_mtu,
3362
.ndo_fix_features = vxge_fix_features,
3363
.ndo_set_features = vxge_set_features,
3364
.ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3365
.ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3366
.ndo_tx_timeout = vxge_tx_watchdog,
3367
#ifdef CONFIG_NET_POLL_CONTROLLER
3368
.ndo_poll_controller = vxge_netpoll,
3372
static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3373
struct vxge_config *config,
3374
int high_dma, int no_of_vpath,
3375
struct vxgedev **vdev_out)
3377
struct net_device *ndev;
3378
enum vxge_hw_status status = VXGE_HW_OK;
3379
struct vxgedev *vdev;
3380
int ret = 0, no_of_queue = 1;
3384
if (config->tx_steering_type)
3385
no_of_queue = no_of_vpath;
3387
ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3391
vxge_hw_device_trace_level_get(hldev),
3392
"%s : device allocation failed", __func__);
3397
vxge_debug_entryexit(
3398
vxge_hw_device_trace_level_get(hldev),
3399
"%s: %s:%d Entering...",
3400
ndev->name, __func__, __LINE__);
3402
vdev = netdev_priv(ndev);
3403
memset(vdev, 0, sizeof(struct vxgedev));
3407
vdev->pdev = hldev->pdev;
3408
memcpy(&vdev->config, config, sizeof(struct vxge_config));
3410
vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3412
SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3414
ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3415
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3416
NETIF_F_TSO | NETIF_F_TSO6 |
3418
if (vdev->config.rth_steering != NO_STEERING)
3419
ndev->hw_features |= NETIF_F_RXHASH;
3421
ndev->features |= ndev->hw_features |
3422
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3424
/* Driver entry points */
3425
ndev->irq = vdev->pdev->irq;
3426
ndev->base_addr = (unsigned long) hldev->bar0;
3428
ndev->netdev_ops = &vxge_netdev_ops;
3430
ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3431
INIT_WORK(&vdev->reset_task, vxge_reset);
3433
vxge_initialize_ethtool_ops(ndev);
3435
/* Allocate memory for vpath */
3436
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3437
no_of_vpath, GFP_KERNEL);
3438
if (!vdev->vpaths) {
3439
vxge_debug_init(VXGE_ERR,
3440
"%s: vpath memory allocation failed",
3446
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3447
"%s : checksuming enabled", __func__);
3450
ndev->features |= NETIF_F_HIGHDMA;
3451
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3452
"%s : using High DMA", __func__);
3455
ret = register_netdev(ndev);
3457
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3458
"%s: %s : device registration failed!",
3459
ndev->name, __func__);
3463
/* Set the factory defined MAC address initially */
3464
ndev->addr_len = ETH_ALEN;
3466
/* Make Link state as off at this point, when the Link change
3467
* interrupt comes the state will be automatically changed to
3470
netif_carrier_off(ndev);
3472
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3473
"%s: Ethernet device registered",
3479
/* Resetting the Device stats */
3480
status = vxge_hw_mrpcim_stats_access(
3482
VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3487
if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3489
vxge_hw_device_trace_level_get(hldev),
3490
"%s: device stats clear returns"
3491
"VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3493
vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3494
"%s: %s:%d Exiting...",
3495
ndev->name, __func__, __LINE__);
3499
kfree(vdev->vpaths);
3507
* vxge_device_unregister
3509
* This function will unregister and free network device
3511
static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3513
struct vxgedev *vdev;
3514
struct net_device *dev;
3518
vdev = netdev_priv(dev);
3520
vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3521
__func__, __LINE__);
3523
strncpy(buf, dev->name, IFNAMSIZ);
3525
flush_work_sync(&vdev->reset_task);
3527
/* in 2.6 will call stop() if device is up */
3528
unregister_netdev(dev);
3530
kfree(vdev->vpaths);
3532
/* we are safe to free it now */
3535
vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3537
vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3538
__func__, __LINE__);
3542
* vxge_callback_crit_err
3544
* This function is called by the alarm handler in interrupt context.
3545
* Driver must analyze it based on the event type.
3548
vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3549
enum vxge_hw_event type, u64 vp_id)
3551
struct net_device *dev = hldev->ndev;
3552
struct vxgedev *vdev = netdev_priv(dev);
3553
struct vxge_vpath *vpath = NULL;
3556
vxge_debug_entryexit(vdev->level_trace,
3557
"%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3559
/* Note: This event type should be used for device wide
3560
* indications only - Serious errors, Slot freeze and critical errors
3562
vdev->cric_err_event = type;
3564
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3565
vpath = &vdev->vpaths[vpath_idx];
3566
if (vpath->device_id == vp_id)
3570
if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3571
if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3572
vxge_debug_init(VXGE_ERR,
3573
"%s: Slot is frozen", vdev->ndev->name);
3574
} else if (type == VXGE_HW_EVENT_SERR) {
3575
vxge_debug_init(VXGE_ERR,
3576
"%s: Encountered Serious Error",
3578
} else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3579
vxge_debug_init(VXGE_ERR,
3580
"%s: Encountered Critical Error",
3584
if ((type == VXGE_HW_EVENT_SERR) ||
3585
(type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3586
if (unlikely(vdev->exec_mode))
3587
clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3588
} else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3589
vxge_hw_device_mask_all(hldev);
3590
if (unlikely(vdev->exec_mode))
3591
clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3592
} else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3593
(type == VXGE_HW_EVENT_VPATH_ERR)) {
3595
if (unlikely(vdev->exec_mode))
3596
clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3598
/* check if this vpath is already set for reset */
3599
if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3601
/* disable interrupts for this vpath */
3602
vxge_vpath_intr_disable(vdev, vpath_idx);
3604
/* stop the queue for this vpath */
3605
netif_tx_stop_queue(vpath->fifo.txq);
3610
vxge_debug_entryexit(vdev->level_trace,
3611
"%s: %s:%d Exiting...",
3612
vdev->ndev->name, __func__, __LINE__);
3615
static void verify_bandwidth(void)
3617
int i, band_width, total = 0, equal_priority = 0;
3619
/* 1. If user enters 0 for some fifo, give equal priority to all */
3620
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3621
if (bw_percentage[i] == 0) {
3627
if (!equal_priority) {
3628
/* 2. If sum exceeds 100, give equal priority to all */
3629
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3630
if (bw_percentage[i] == 0xFF)
3633
total += bw_percentage[i];
3634
if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3641
if (!equal_priority) {
3642
/* Is all the bandwidth consumed? */
3643
if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3644
if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3645
/* Split rest of bw equally among next VPs*/
3647
(VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3648
(VXGE_HW_MAX_VIRTUAL_PATHS - i);
3649
if (band_width < 2) /* min of 2% */
3652
for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3658
} else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3662
if (equal_priority) {
3663
vxge_debug_init(VXGE_ERR,
3664
"%s: Assigning equal bandwidth to all the vpaths",
3666
bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3667
VXGE_HW_MAX_VIRTUAL_PATHS;
3668
for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3669
bw_percentage[i] = bw_percentage[0];
3674
* Vpath configuration
3676
static int __devinit vxge_config_vpaths(
3677
struct vxge_hw_device_config *device_config,
3678
u64 vpath_mask, struct vxge_config *config_param)
3680
int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3681
u32 txdl_size, txdl_per_memblock;
3683
temp = driver_config->vpath_per_dev;
3684
if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3685
(max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3686
/* No more CPU. Return vpath number as zero.*/
3687
if (driver_config->g_no_cpus == -1)
3690
if (!driver_config->g_no_cpus)
3691
driver_config->g_no_cpus = num_online_cpus();
3693
driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3694
if (!driver_config->vpath_per_dev)
3695
driver_config->vpath_per_dev = 1;
3697
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3698
if (!vxge_bVALn(vpath_mask, i, 1))
3702
if (default_no_vpath < driver_config->vpath_per_dev)
3703
driver_config->vpath_per_dev = default_no_vpath;
3705
driver_config->g_no_cpus = driver_config->g_no_cpus -
3706
(driver_config->vpath_per_dev * 2);
3707
if (driver_config->g_no_cpus <= 0)
3708
driver_config->g_no_cpus = -1;
3711
if (driver_config->vpath_per_dev == 1) {
3712
vxge_debug_ll_config(VXGE_TRACE,
3713
"%s: Disable tx and rx steering, "
3714
"as single vpath is configured", VXGE_DRIVER_NAME);
3715
config_param->rth_steering = NO_STEERING;
3716
config_param->tx_steering_type = NO_STEERING;
3717
device_config->rth_en = 0;
3720
/* configure bandwidth */
3721
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3722
device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3724
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3725
device_config->vp_config[i].vp_id = i;
3726
device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3727
if (no_of_vpaths < driver_config->vpath_per_dev) {
3728
if (!vxge_bVALn(vpath_mask, i, 1)) {
3729
vxge_debug_ll_config(VXGE_TRACE,
3730
"%s: vpath: %d is not available",
3731
VXGE_DRIVER_NAME, i);
3734
vxge_debug_ll_config(VXGE_TRACE,
3735
"%s: vpath: %d available",
3736
VXGE_DRIVER_NAME, i);
3740
vxge_debug_ll_config(VXGE_TRACE,
3741
"%s: vpath: %d is not configured, "
3742
"max_config_vpath exceeded",
3743
VXGE_DRIVER_NAME, i);
3747
/* Configure Tx fifo's */
3748
device_config->vp_config[i].fifo.enable =
3749
VXGE_HW_FIFO_ENABLE;
3750
device_config->vp_config[i].fifo.max_frags =
3752
device_config->vp_config[i].fifo.memblock_size =
3753
VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3755
txdl_size = device_config->vp_config[i].fifo.max_frags *
3756
sizeof(struct vxge_hw_fifo_txd);
3757
txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3759
device_config->vp_config[i].fifo.fifo_blocks =
3760
((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3762
device_config->vp_config[i].fifo.intr =
3763
VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3765
/* Configure tti properties */
3766
device_config->vp_config[i].tti.intr_enable =
3767
VXGE_HW_TIM_INTR_ENABLE;
3769
device_config->vp_config[i].tti.btimer_val =
3770
(VXGE_TTI_BTIMER_VAL * 1000) / 272;
3772
device_config->vp_config[i].tti.timer_ac_en =
3773
VXGE_HW_TIM_TIMER_AC_ENABLE;
3775
/* For msi-x with napi (each vector has a handler of its own) -
3776
* Set CI to OFF for all vpaths
3778
device_config->vp_config[i].tti.timer_ci_en =
3779
VXGE_HW_TIM_TIMER_CI_DISABLE;
3781
device_config->vp_config[i].tti.timer_ri_en =
3782
VXGE_HW_TIM_TIMER_RI_DISABLE;
3784
device_config->vp_config[i].tti.util_sel =
3785
VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3787
device_config->vp_config[i].tti.ltimer_val =
3788
(VXGE_TTI_LTIMER_VAL * 1000) / 272;
3790
device_config->vp_config[i].tti.rtimer_val =
3791
(VXGE_TTI_RTIMER_VAL * 1000) / 272;
3793
device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3794
device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3795
device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3796
device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3797
device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3798
device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3799
device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3801
/* Configure Rx rings */
3802
device_config->vp_config[i].ring.enable =
3803
VXGE_HW_RING_ENABLE;
3805
device_config->vp_config[i].ring.ring_blocks =
3806
VXGE_HW_DEF_RING_BLOCKS;
3808
device_config->vp_config[i].ring.buffer_mode =
3809
VXGE_HW_RING_RXD_BUFFER_MODE_1;
3811
device_config->vp_config[i].ring.rxds_limit =
3812
VXGE_HW_DEF_RING_RXDS_LIMIT;
3814
device_config->vp_config[i].ring.scatter_mode =
3815
VXGE_HW_RING_SCATTER_MODE_A;
3817
/* Configure rti properties */
3818
device_config->vp_config[i].rti.intr_enable =
3819
VXGE_HW_TIM_INTR_ENABLE;
3821
device_config->vp_config[i].rti.btimer_val =
3822
(VXGE_RTI_BTIMER_VAL * 1000)/272;
3824
device_config->vp_config[i].rti.timer_ac_en =
3825
VXGE_HW_TIM_TIMER_AC_ENABLE;
3827
device_config->vp_config[i].rti.timer_ci_en =
3828
VXGE_HW_TIM_TIMER_CI_DISABLE;
3830
device_config->vp_config[i].rti.timer_ri_en =
3831
VXGE_HW_TIM_TIMER_RI_DISABLE;
3833
device_config->vp_config[i].rti.util_sel =
3834
VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3836
device_config->vp_config[i].rti.urange_a =
3838
device_config->vp_config[i].rti.urange_b =
3840
device_config->vp_config[i].rti.urange_c =
3842
device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3843
device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3844
device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3845
device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3847
device_config->vp_config[i].rti.rtimer_val =
3848
(VXGE_RTI_RTIMER_VAL * 1000) / 272;
3850
device_config->vp_config[i].rti.ltimer_val =
3851
(VXGE_RTI_LTIMER_VAL * 1000) / 272;
3853
device_config->vp_config[i].rpa_strip_vlan_tag =
3857
driver_config->vpath_per_dev = temp;
3858
return no_of_vpaths;
3861
/* initialize device configuratrions */
3862
static void __devinit vxge_device_config_init(
3863
struct vxge_hw_device_config *device_config,
3866
/* Used for CQRQ/SRQ. */
3867
device_config->dma_blockpool_initial =
3868
VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3870
device_config->dma_blockpool_max =
3871
VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3873
if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3874
max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3876
#ifndef CONFIG_PCI_MSI
3877
vxge_debug_init(VXGE_ERR,
3878
"%s: This Kernel does not support "
3879
"MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3883
/* Configure whether MSI-X or IRQL. */
3884
switch (*intr_type) {
3886
device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3890
device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3894
/* Timer period between device poll */
3895
device_config->device_poll_millis = VXGE_TIMER_DELAY;
3897
/* Configure mac based steering. */
3898
device_config->rts_mac_en = addr_learn_en;
3900
/* Configure Vpaths */
3901
device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3903
vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3905
vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3906
device_config->intr_mode);
3907
vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3908
device_config->device_poll_millis);
3909
vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3910
device_config->rth_en);
3911
vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3912
device_config->rth_it_type);
3915
static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3919
vxge_debug_init(VXGE_TRACE,
3920
"%s: %d Vpath(s) opened",
3921
vdev->ndev->name, vdev->no_of_vpath);
3923
switch (vdev->config.intr_type) {
3925
vxge_debug_init(VXGE_TRACE,
3926
"%s: Interrupt type INTA", vdev->ndev->name);
3930
vxge_debug_init(VXGE_TRACE,
3931
"%s: Interrupt type MSI-X", vdev->ndev->name);
3935
if (vdev->config.rth_steering) {
3936
vxge_debug_init(VXGE_TRACE,
3937
"%s: RTH steering enabled for TCP_IPV4",
3940
vxge_debug_init(VXGE_TRACE,
3941
"%s: RTH steering disabled", vdev->ndev->name);
3944
switch (vdev->config.tx_steering_type) {
3946
vxge_debug_init(VXGE_TRACE,
3947
"%s: Tx steering disabled", vdev->ndev->name);
3949
case TX_PRIORITY_STEERING:
3950
vxge_debug_init(VXGE_TRACE,
3951
"%s: Unsupported tx steering option",
3953
vxge_debug_init(VXGE_TRACE,
3954
"%s: Tx steering disabled", vdev->ndev->name);
3955
vdev->config.tx_steering_type = 0;
3957
case TX_VLAN_STEERING:
3958
vxge_debug_init(VXGE_TRACE,
3959
"%s: Unsupported tx steering option",
3961
vxge_debug_init(VXGE_TRACE,
3962
"%s: Tx steering disabled", vdev->ndev->name);
3963
vdev->config.tx_steering_type = 0;
3965
case TX_MULTIQ_STEERING:
3966
vxge_debug_init(VXGE_TRACE,
3967
"%s: Tx multiqueue steering enabled",
3970
case TX_PORT_STEERING:
3971
vxge_debug_init(VXGE_TRACE,
3972
"%s: Tx port steering enabled",
3976
vxge_debug_init(VXGE_ERR,
3977
"%s: Unsupported tx steering type",
3979
vxge_debug_init(VXGE_TRACE,
3980
"%s: Tx steering disabled", vdev->ndev->name);
3981
vdev->config.tx_steering_type = 0;
3984
if (vdev->config.addr_learn_en)
3985
vxge_debug_init(VXGE_TRACE,
3986
"%s: MAC Address learning enabled", vdev->ndev->name);
3988
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3989
if (!vxge_bVALn(vpath_mask, i, 1))
3991
vxge_debug_ll_config(VXGE_TRACE,
3992
"%s: MTU size - %d", vdev->ndev->name,
3993
((struct __vxge_hw_device *)(vdev->devh))->
3994
config.vp_config[i].mtu);
3995
vxge_debug_init(VXGE_TRACE,
3996
"%s: VLAN tag stripping %s", vdev->ndev->name,
3997
((struct __vxge_hw_device *)(vdev->devh))->
3998
config.vp_config[i].rpa_strip_vlan_tag
3999
? "Enabled" : "Disabled");
4000
vxge_debug_ll_config(VXGE_TRACE,
4001
"%s: Max frags : %d", vdev->ndev->name,
4002
((struct __vxge_hw_device *)(vdev->devh))->
4003
config.vp_config[i].fifo.max_frags);
4010
* vxge_pm_suspend - vxge power management suspend entry point
4013
static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
4018
* vxge_pm_resume - vxge power management resume entry point
4021
static int vxge_pm_resume(struct pci_dev *pdev)
4029
* vxge_io_error_detected - called when PCI error is detected
4030
* @pdev: Pointer to PCI device
4031
* @state: The current pci connection state
4033
* This function is called after a PCI bus error affecting
4034
* this device has been detected.
4036
static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
4037
pci_channel_state_t state)
4039
struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4040
struct net_device *netdev = hldev->ndev;
4042
netif_device_detach(netdev);
4044
if (state == pci_channel_io_perm_failure)
4045
return PCI_ERS_RESULT_DISCONNECT;
4047
if (netif_running(netdev)) {
4048
/* Bring down the card, while avoiding PCI I/O */
4049
do_vxge_close(netdev, 0);
4052
pci_disable_device(pdev);
4054
return PCI_ERS_RESULT_NEED_RESET;
4058
* vxge_io_slot_reset - called after the pci bus has been reset.
4059
* @pdev: Pointer to PCI device
4061
* Restart the card from scratch, as if from a cold-boot.
4062
* At this point, the card has exprienced a hard reset,
4063
* followed by fixups by BIOS, and has its config space
4064
* set up identically to what it was at cold boot.
4066
static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
4068
struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4069
struct net_device *netdev = hldev->ndev;
4071
struct vxgedev *vdev = netdev_priv(netdev);
4073
if (pci_enable_device(pdev)) {
4074
netdev_err(netdev, "Cannot re-enable device after reset\n");
4075
return PCI_ERS_RESULT_DISCONNECT;
4078
pci_set_master(pdev);
4079
do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
4081
return PCI_ERS_RESULT_RECOVERED;
4085
* vxge_io_resume - called when traffic can start flowing again.
4086
* @pdev: Pointer to PCI device
4088
* This callback is called when the error recovery driver tells
4089
* us that its OK to resume normal operation.
4091
static void vxge_io_resume(struct pci_dev *pdev)
4093
struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4094
struct net_device *netdev = hldev->ndev;
4096
if (netif_running(netdev)) {
4097
if (vxge_open(netdev)) {
4099
"Can't bring device back up after reset\n");
4104
netif_device_attach(netdev);
4107
static inline u32 vxge_get_num_vfs(u64 function_mode)
4109
u32 num_functions = 0;
4111
switch (function_mode) {
4112
case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4113
case VXGE_HW_FUNCTION_MODE_SRIOV_8:
4116
case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4119
case VXGE_HW_FUNCTION_MODE_SRIOV:
4120
case VXGE_HW_FUNCTION_MODE_MRIOV:
4121
case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
4124
case VXGE_HW_FUNCTION_MODE_SRIOV_4:
4127
case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
4130
case VXGE_HW_FUNCTION_MODE_MRIOV_8:
4131
num_functions = 8; /* TODO */
4134
return num_functions;
4137
int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4139
struct __vxge_hw_device *hldev = vdev->devh;
4140
u32 maj, min, bld, cmaj, cmin, cbld;
4141
enum vxge_hw_status status;
4142
const struct firmware *fw;
4145
ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4147
vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4148
VXGE_DRIVER_NAME, fw_name);
4152
/* Load the new firmware onto the adapter */
4153
status = vxge_update_fw_image(hldev, fw->data, fw->size);
4154
if (status != VXGE_HW_OK) {
4155
vxge_debug_init(VXGE_ERR,
4156
"%s: FW image download to adapter failed '%s'.",
4157
VXGE_DRIVER_NAME, fw_name);
4162
/* Read the version of the new firmware */
4163
status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4164
if (status != VXGE_HW_OK) {
4165
vxge_debug_init(VXGE_ERR,
4166
"%s: Upgrade read version failed '%s'.",
4167
VXGE_DRIVER_NAME, fw_name);
4172
cmaj = vdev->config.device_hw_info.fw_version.major;
4173
cmin = vdev->config.device_hw_info.fw_version.minor;
4174
cbld = vdev->config.device_hw_info.fw_version.build;
4175
/* It's possible the version in /lib/firmware is not the latest version.
4176
* If so, we could get into a loop of trying to upgrade to the latest
4177
* and flashing the older version.
4179
if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4185
printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4188
/* Flash the adapter with the new firmware */
4189
status = vxge_hw_flash_fw(hldev);
4190
if (status != VXGE_HW_OK) {
4191
vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4192
VXGE_DRIVER_NAME, fw_name);
4197
printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4198
"hard reset before using, thus requiring a system reboot or a "
4199
"hotplug event.\n");
4202
release_firmware(fw);
4206
static int vxge_probe_fw_update(struct vxgedev *vdev)
4212
maj = vdev->config.device_hw_info.fw_version.major;
4213
min = vdev->config.device_hw_info.fw_version.minor;
4214
bld = vdev->config.device_hw_info.fw_version.build;
4216
if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4219
/* Ignore the build number when determining if the current firmware is
4220
* "too new" to load the driver
4222
if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4223
vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4224
"version, unable to load driver\n",
4229
/* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4230
* work with this driver.
4232
if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4233
vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4234
"upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4238
/* If file not specified, determine gPXE or not */
4239
if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4241
for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4242
if (vdev->devh->eprom_versions[i]) {
4248
fw_name = "vxge/X3fw-pxe.ncf";
4250
fw_name = "vxge/X3fw.ncf";
4252
ret = vxge_fw_upgrade(vdev, fw_name, 0);
4253
/* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4254
* probe, so ignore them
4256
if (ret != -EINVAL && ret != -ENOENT)
4261
if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4262
VXGE_FW_VER(maj, min, 0)) {
4263
vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4264
" be used with this driver.\n"
4265
"Please get the latest version from "
4266
"ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4267
VXGE_DRIVER_NAME, maj, min, bld);
4274
static int __devinit is_sriov_initialized(struct pci_dev *pdev)
4279
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4281
pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
4282
if (ctrl & PCI_SRIOV_CTRL_VFE)
4288
static const struct vxge_hw_uld_cbs vxge_callbacks = {
4289
.link_up = vxge_callback_link_up,
4290
.link_down = vxge_callback_link_down,
4291
.crit_err = vxge_callback_crit_err,
4296
* @pdev : structure containing the PCI related information of the device.
4297
* @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4299
* This function is called when a new PCI device gets detected and initializes
4302
* returns 0 on success and negative on failure.
4305
static int __devinit
4306
vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4308
struct __vxge_hw_device *hldev;
4309
enum vxge_hw_status status;
4313
struct vxgedev *vdev;
4314
struct vxge_config *ll_config = NULL;
4315
struct vxge_hw_device_config *device_config = NULL;
4316
struct vxge_hw_device_attr attr;
4317
int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4319
struct vxge_mac_addrs *entry;
4320
static int bus = -1, device = -1;
4323
enum vxge_hw_status is_privileged;
4327
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4330
/* In SRIOV-17 mode, functions of the same adapter
4331
* can be deployed on different buses
4333
if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
4337
bus = pdev->bus->number;
4338
device = PCI_SLOT(pdev->devfn);
4341
if (driver_config->config_dev_cnt &&
4342
(driver_config->config_dev_cnt !=
4343
driver_config->total_dev_cnt))
4344
vxge_debug_init(VXGE_ERR,
4345
"%s: Configured %d of %d devices",
4347
driver_config->config_dev_cnt,
4348
driver_config->total_dev_cnt);
4349
driver_config->config_dev_cnt = 0;
4350
driver_config->total_dev_cnt = 0;
4353
/* Now making the CPU based no of vpath calculation
4354
* applicable for individual functions as well.
4356
driver_config->g_no_cpus = 0;
4357
driver_config->vpath_per_dev = max_config_vpath;
4359
driver_config->total_dev_cnt++;
4360
if (++driver_config->config_dev_cnt > max_config_dev) {
4365
device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4367
if (!device_config) {
4369
vxge_debug_init(VXGE_ERR,
4370
"device_config : malloc failed %s %d",
4371
__FILE__, __LINE__);
4375
ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
4378
vxge_debug_init(VXGE_ERR,
4379
"device_config : malloc failed %s %d",
4380
__FILE__, __LINE__);
4383
ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4384
ll_config->intr_type = MSI_X;
4385
ll_config->napi_weight = NEW_NAPI_WEIGHT;
4386
ll_config->rth_steering = RTH_STEERING;
4388
/* get the default configuration parameters */
4389
vxge_hw_device_config_default_get(device_config);
4391
/* initialize configuration parameters */
4392
vxge_device_config_init(device_config, &ll_config->intr_type);
4394
ret = pci_enable_device(pdev);
4396
vxge_debug_init(VXGE_ERR,
4397
"%s : can not enable PCI device", __func__);
4401
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4402
vxge_debug_ll_config(VXGE_TRACE,
4403
"%s : using 64bit DMA", __func__);
4407
if (pci_set_consistent_dma_mask(pdev,
4408
DMA_BIT_MASK(64))) {
4409
vxge_debug_init(VXGE_ERR,
4410
"%s : unable to obtain 64bit DMA for "
4411
"consistent allocations", __func__);
4415
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
4416
vxge_debug_ll_config(VXGE_TRACE,
4417
"%s : using 32bit DMA", __func__);
4423
ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4425
vxge_debug_init(VXGE_ERR,
4426
"%s : request regions failed", __func__);
4430
pci_set_master(pdev);
4432
attr.bar0 = pci_ioremap_bar(pdev, 0);
4434
vxge_debug_init(VXGE_ERR,
4435
"%s : cannot remap io memory bar0", __func__);
4439
vxge_debug_ll_config(VXGE_TRACE,
4440
"pci ioremap bar0: %p:0x%llx",
4442
(unsigned long long)pci_resource_start(pdev, 0));
4444
status = vxge_hw_device_hw_info_get(attr.bar0,
4445
&ll_config->device_hw_info);
4446
if (status != VXGE_HW_OK) {
4447
vxge_debug_init(VXGE_ERR,
4448
"%s: Reading of hardware info failed."
4449
"Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4454
vpath_mask = ll_config->device_hw_info.vpath_mask;
4455
if (vpath_mask == 0) {
4456
vxge_debug_ll_config(VXGE_TRACE,
4457
"%s: No vpaths available in device", VXGE_DRIVER_NAME);
4462
vxge_debug_ll_config(VXGE_TRACE,
4463
"%s:%d Vpath mask = %llx", __func__, __LINE__,
4464
(unsigned long long)vpath_mask);
4466
function_mode = ll_config->device_hw_info.function_mode;
4467
host_type = ll_config->device_hw_info.host_type;
4468
is_privileged = __vxge_hw_device_is_privilaged(host_type,
4469
ll_config->device_hw_info.func_id);
4471
/* Check how many vpaths are available */
4472
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4473
if (!((vpath_mask) & vxge_mBIT(i)))
4475
max_vpath_supported++;
4479
num_vfs = vxge_get_num_vfs(function_mode) - 1;
4481
/* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4482
if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4483
(ll_config->intr_type != INTA)) {
4484
ret = pci_enable_sriov(pdev, num_vfs);
4486
vxge_debug_ll_config(VXGE_ERR,
4487
"Failed in enabling SRIOV mode: %d\n", ret);
4488
/* No need to fail out, as an error here is non-fatal */
4492
* Configure vpaths and get driver configured number of vpaths
4493
* which is less than or equal to the maximum vpaths per function.
4495
no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
4497
vxge_debug_ll_config(VXGE_ERR,
4498
"%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4503
/* Setting driver callbacks */
4504
attr.uld_callbacks = &vxge_callbacks;
4506
status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4507
if (status != VXGE_HW_OK) {
4508
vxge_debug_init(VXGE_ERR,
4509
"Failed to initialize device (%d)", status);
4514
if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4515
ll_config->device_hw_info.fw_version.minor,
4516
ll_config->device_hw_info.fw_version.build) >=
4517
VXGE_EPROM_FW_VER) {
4518
struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4520
status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4521
if (status != VXGE_HW_OK) {
4522
vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4524
/* This is a non-fatal error, continue */
4527
for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4528
hldev->eprom_versions[i] = img[i].version;
4529
if (!img[i].is_valid)
4531
vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4532
"%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
4533
VXGE_EPROM_IMG_MAJOR(img[i].version),
4534
VXGE_EPROM_IMG_MINOR(img[i].version),
4535
VXGE_EPROM_IMG_FIX(img[i].version),
4536
VXGE_EPROM_IMG_BUILD(img[i].version));
4540
/* if FCS stripping is not disabled in MAC fail driver load */
4541
status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4542
if (status != VXGE_HW_OK) {
4543
vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4544
" failing driver load", VXGE_DRIVER_NAME);
4549
/* Always enable HWTS. This will always cause the FCS to be invalid,
4550
* due to the fact that HWTS is using the FCS as the location of the
4551
* timestamp. The HW FCS checking will still correctly determine if
4552
* there is a valid checksum, and the FCS is being removed by the driver
4553
* anyway. So no fucntionality is being lost. Since it is always
4554
* enabled, we now simply use the ioctl call to set whether or not the
4555
* driver should be paying attention to the HWTS.
4557
if (is_privileged == VXGE_HW_OK) {
4558
status = vxge_timestamp_config(hldev);
4559
if (status != VXGE_HW_OK) {
4560
vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
4567
vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4569
/* set private device info */
4570
pci_set_drvdata(pdev, hldev);
4572
ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4573
ll_config->addr_learn_en = addr_learn_en;
4574
ll_config->rth_algorithm = RTH_ALG_JENKINS;
4575
ll_config->rth_hash_type_tcpipv4 = 1;
4576
ll_config->rth_hash_type_ipv4 = 0;
4577
ll_config->rth_hash_type_tcpipv6 = 0;
4578
ll_config->rth_hash_type_ipv6 = 0;
4579
ll_config->rth_hash_type_tcpipv6ex = 0;
4580
ll_config->rth_hash_type_ipv6ex = 0;
4581
ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4582
ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4583
ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4585
ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4592
ret = vxge_probe_fw_update(vdev);
4596
vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4597
VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4598
vxge_hw_device_trace_level_get(hldev));
4600
/* set private HW device info */
4601
vdev->mtu = VXGE_HW_DEFAULT_MTU;
4602
vdev->bar0 = attr.bar0;
4603
vdev->max_vpath_supported = max_vpath_supported;
4604
vdev->no_of_vpath = no_of_vpath;
4606
/* Virtual Path count */
4607
for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4608
if (!vxge_bVALn(vpath_mask, i, 1))
4610
if (j >= vdev->no_of_vpath)
4613
vdev->vpaths[j].is_configured = 1;
4614
vdev->vpaths[j].device_id = i;
4615
vdev->vpaths[j].ring.driver_id = j;
4616
vdev->vpaths[j].vdev = vdev;
4617
vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4618
memcpy((u8 *)vdev->vpaths[j].macaddr,
4619
ll_config->device_hw_info.mac_addrs[i],
4622
/* Initialize the mac address list header */
4623
INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4625
vdev->vpaths[j].mac_addr_cnt = 0;
4626
vdev->vpaths[j].mcast_addr_cnt = 0;
4629
vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4630
vdev->max_config_port = max_config_port;
4632
vdev->vlan_tag_strip = vlan_tag_strip;
4634
/* map the hashing selector table to the configured vpaths */
4635
for (i = 0; i < vdev->no_of_vpath; i++)
4636
vdev->vpath_selector[i] = vpath_selector[i];
4638
macaddr = (u8 *)vdev->vpaths[0].macaddr;
4640
ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4641
ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4642
ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4644
vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4645
vdev->ndev->name, ll_config->device_hw_info.serial_number);
4647
vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4648
vdev->ndev->name, ll_config->device_hw_info.part_number);
4650
vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4651
vdev->ndev->name, ll_config->device_hw_info.product_desc);
4653
vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4654
vdev->ndev->name, macaddr);
4656
vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4657
vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4659
vxge_debug_init(VXGE_TRACE,
4660
"%s: Firmware version : %s Date : %s", vdev->ndev->name,
4661
ll_config->device_hw_info.fw_version.version,
4662
ll_config->device_hw_info.fw_date.date);
4665
switch (ll_config->device_hw_info.function_mode) {
4666
case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4667
vxge_debug_init(VXGE_TRACE,
4668
"%s: Single Function Mode Enabled", vdev->ndev->name);
4670
case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4671
vxge_debug_init(VXGE_TRACE,
4672
"%s: Multi Function Mode Enabled", vdev->ndev->name);
4674
case VXGE_HW_FUNCTION_MODE_SRIOV:
4675
vxge_debug_init(VXGE_TRACE,
4676
"%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4678
case VXGE_HW_FUNCTION_MODE_MRIOV:
4679
vxge_debug_init(VXGE_TRACE,
4680
"%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4685
vxge_print_parm(vdev, vpath_mask);
4687
/* Store the fw version for ethttool option */
4688
strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
4689
memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4690
memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4692
/* Copy the station mac address to the list */
4693
for (i = 0; i < vdev->no_of_vpath; i++) {
4694
entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
4695
if (NULL == entry) {
4696
vxge_debug_init(VXGE_ERR,
4697
"%s: mac_addr_list : memory allocation failed",
4702
macaddr = (u8 *)&entry->macaddr;
4703
memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4704
list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4705
vdev->vpaths[i].mac_addr_cnt = 1;
4708
kfree(device_config);
4711
* INTA is shared in multi-function mode. This is unlike the INTA
4712
* implementation in MR mode, where each VH has its own INTA message.
4713
* - INTA is masked (disabled) as long as at least one function sets
4714
* its TITAN_MASK_ALL_INT.ALARM bit.
4715
* - INTA is unmasked (enabled) when all enabled functions have cleared
4716
* their own TITAN_MASK_ALL_INT.ALARM bit.
4717
* The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4718
* Though this driver leaves the top level interrupts unmasked while
4719
* leaving the required module interrupt bits masked on exit, there
4720
* could be a rougue driver around that does not follow this procedure
4721
* resulting in a failure to generate interrupts. The following code is
4722
* present to prevent such a failure.
4725
if (ll_config->device_hw_info.function_mode ==
4726
VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4727
if (vdev->config.intr_type == INTA)
4728
vxge_hw_device_unmask_all(hldev);
4730
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4731
vdev->ndev->name, __func__, __LINE__);
4733
vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4734
VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4735
vxge_hw_device_trace_level_get(hldev));
4741
for (i = 0; i < vdev->no_of_vpath; i++)
4742
vxge_free_mac_add_list(&vdev->vpaths[i]);
4744
vxge_device_unregister(hldev);
4746
pci_set_drvdata(pdev, NULL);
4747
vxge_hw_device_terminate(hldev);
4748
pci_disable_sriov(pdev);
4752
pci_release_region(pdev, 0);
4754
pci_disable_device(pdev);
4757
kfree(device_config);
4758
driver_config->config_dev_cnt--;
4759
driver_config->total_dev_cnt--;
4764
* vxge_rem_nic - Free the PCI device
4765
* @pdev: structure containing the PCI related information of the device.
4766
* Description: This function is called by the Pci subsystem to release a
4767
* PCI device and free up all resource held up by the device.
4769
static void __devexit vxge_remove(struct pci_dev *pdev)
4771
struct __vxge_hw_device *hldev;
4772
struct vxgedev *vdev;
4775
hldev = pci_get_drvdata(pdev);
4779
vdev = netdev_priv(hldev->ndev);
4781
vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4782
vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4785
for (i = 0; i < vdev->no_of_vpath; i++)
4786
vxge_free_mac_add_list(&vdev->vpaths[i]);
4788
vxge_device_unregister(hldev);
4789
pci_set_drvdata(pdev, NULL);
4790
/* Do not call pci_disable_sriov here, as it will break child devices */
4791
vxge_hw_device_terminate(hldev);
4792
iounmap(vdev->bar0);
4793
pci_release_region(pdev, 0);
4794
pci_disable_device(pdev);
4795
driver_config->config_dev_cnt--;
4796
driver_config->total_dev_cnt--;
4798
vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4799
__func__, __LINE__);
4800
vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4804
static struct pci_error_handlers vxge_err_handler = {
4805
.error_detected = vxge_io_error_detected,
4806
.slot_reset = vxge_io_slot_reset,
4807
.resume = vxge_io_resume,
4810
static struct pci_driver vxge_driver = {
4811
.name = VXGE_DRIVER_NAME,
4812
.id_table = vxge_id_table,
4813
.probe = vxge_probe,
4814
.remove = __devexit_p(vxge_remove),
4816
.suspend = vxge_pm_suspend,
4817
.resume = vxge_pm_resume,
4819
.err_handler = &vxge_err_handler,
4827
pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4828
pr_info("Driver version: %s\n", DRV_VERSION);
4832
driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4836
ret = pci_register_driver(&vxge_driver);
4838
kfree(driver_config);
4842
if (driver_config->config_dev_cnt &&
4843
(driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4844
vxge_debug_init(VXGE_ERR,
4845
"%s: Configured %d of %d devices",
4846
VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4847
driver_config->total_dev_cnt);
4855
pci_unregister_driver(&vxge_driver);
4856
kfree(driver_config);
4858
module_init(vxge_starter);
4859
module_exit(vxge_closer);