1
/******************************************************************************
2
* This software may be used and distributed according to the terms of
3
* the GNU General Public License (GPL), incorporated herein by reference.
4
* Drivers based on or derived from this code fall under the GPL and must
5
* retain the authorship, copyright and license notice. This file is not
6
* a complete program and may only be used when the entire operating
7
* system is licensed under the GPL.
8
* See the file COPYING in this distribution for more information.
10
* vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11
* Virtualized Server Adapter.
12
* Copyright(c) 2002-2010 Exar Corp.
13
******************************************************************************/
14
#include <linux/vmalloc.h>
15
#include <linux/etherdevice.h>
16
#include <linux/pci.h>
17
#include <linux/pci_hotplug.h>
18
#include <linux/slab.h>
20
#include "vxge-traffic.h"
21
#include "vxge-config.h"
22
#include "vxge-main.h"
24
#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
25
status = __vxge_hw_vpath_stats_access(vpath, \
26
VXGE_HW_STATS_OP_READ, \
29
if (status != VXGE_HW_OK) \
34
vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
38
val64 = readq(&vp_reg->rxmac_vcfg0);
39
val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
40
writeq(val64, &vp_reg->rxmac_vcfg0);
41
val64 = readq(&vp_reg->rxmac_vcfg0);
45
* vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
47
int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
49
struct vxge_hw_vpath_reg __iomem *vp_reg;
50
struct __vxge_hw_virtualpath *vpath;
51
u64 val64, rxd_count, rxd_spat;
52
int count = 0, total_count = 0;
54
vpath = &hldev->virtual_paths[vp_id];
55
vp_reg = vpath->vp_reg;
57
vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
59
/* Check that the ring controller for this vpath has enough free RxDs
60
* to send frames to the host. This is done by reading the
61
* PRC_RXD_DOORBELL_VPn register and comparing the read value to the
62
* RXD_SPAT value for the vpath.
64
val64 = readq(&vp_reg->prc_cfg6);
65
rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
66
/* Use a factor of 2 when comparing rxd_count against rxd_spat for some
74
rxd_count = readq(&vp_reg->prc_rxd_doorbell);
76
/* Check that the ring controller for this vpath does
77
* not have any frame in its pipeline.
79
val64 = readq(&vp_reg->frm_in_progress_cnt);
80
if ((rxd_count <= rxd_spat) || (val64 > 0))
85
} while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
86
(total_count < VXGE_HW_MAX_POLLING_COUNT));
88
if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
89
printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
95
/* vxge_hw_device_wait_receive_idle - This function waits until all frames
96
* stored in the frame buffer for each vpath assigned to the given
97
* function (hldev) have been sent to the host.
99
void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
101
int i, total_count = 0;
103
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
104
if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
107
total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
108
if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
114
* __vxge_hw_device_register_poll
115
* Will poll certain register for specified amount of time.
116
* Will poll until masked bit is not cleared.
118
static enum vxge_hw_status
119
__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
123
enum vxge_hw_status ret = VXGE_HW_FAIL;
140
} while (++i <= max_millis);
145
static inline enum vxge_hw_status
146
__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
147
u64 mask, u32 max_millis)
149
__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
151
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
154
return __vxge_hw_device_register_poll(addr, mask, max_millis);
157
static enum vxge_hw_status
158
vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
159
u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
162
struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
163
enum vxge_hw_status status;
165
u32 retry = 0, max_retry = 3;
167
spin_lock(&vpath->lock);
168
if (!vpath->vp_open) {
169
spin_unlock(&vpath->lock);
173
writeq(*data0, &vp_reg->rts_access_steer_data0);
174
writeq(*data1, &vp_reg->rts_access_steer_data1);
177
val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
178
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
179
VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
180
VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
183
status = __vxge_hw_pio_mem_write64(val64,
184
&vp_reg->rts_access_steer_ctrl,
185
VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
186
VXGE_HW_DEF_DEVICE_POLL_MILLIS);
188
/* The __vxge_hw_device_register_poll can udelay for a significant
189
* amount of time, blocking other process from the CPU. If it delays
190
* for ~5secs, a NMI error can occur. A way around this is to give up
191
* the processor via msleep, but this is not allowed is under lock.
192
* So, only allow it to sleep for ~4secs if open. Otherwise, delay for
193
* 1sec and sleep for 10ms until the firmware operation has completed
196
while ((status != VXGE_HW_OK) && retry++ < max_retry) {
199
status = __vxge_hw_device_register_poll(
200
&vp_reg->rts_access_steer_ctrl,
201
VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
202
VXGE_HW_DEF_DEVICE_POLL_MILLIS);
205
if (status != VXGE_HW_OK)
208
val64 = readq(&vp_reg->rts_access_steer_ctrl);
209
if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
210
*data0 = readq(&vp_reg->rts_access_steer_data0);
211
*data1 = readq(&vp_reg->rts_access_steer_data1);
214
status = VXGE_HW_FAIL;
218
spin_unlock(&vpath->lock);
223
vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
224
u32 *minor, u32 *build)
226
u64 data0 = 0, data1 = 0, steer_ctrl = 0;
227
struct __vxge_hw_virtualpath *vpath;
228
enum vxge_hw_status status;
230
vpath = &hldev->virtual_paths[hldev->first_vp_id];
232
status = vxge_hw_vpath_fw_api(vpath,
233
VXGE_HW_FW_UPGRADE_ACTION,
234
VXGE_HW_FW_UPGRADE_MEMO,
235
VXGE_HW_FW_UPGRADE_OFFSET_READ,
236
&data0, &data1, &steer_ctrl);
237
if (status != VXGE_HW_OK)
240
*major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
241
*minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
242
*build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
247
enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
249
u64 data0 = 0, data1 = 0, steer_ctrl = 0;
250
struct __vxge_hw_virtualpath *vpath;
251
enum vxge_hw_status status;
254
vpath = &hldev->virtual_paths[hldev->first_vp_id];
256
status = vxge_hw_vpath_fw_api(vpath,
257
VXGE_HW_FW_UPGRADE_ACTION,
258
VXGE_HW_FW_UPGRADE_MEMO,
259
VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
260
&data0, &data1, &steer_ctrl);
261
if (status != VXGE_HW_OK) {
262
vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
266
ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
268
vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
270
status = VXGE_HW_FAIL;
278
vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
280
u64 data0 = 0, data1 = 0, steer_ctrl = 0;
281
struct __vxge_hw_virtualpath *vpath;
282
enum vxge_hw_status status;
283
int ret_code, sec_code;
285
vpath = &hldev->virtual_paths[hldev->first_vp_id];
287
/* send upgrade start command */
288
status = vxge_hw_vpath_fw_api(vpath,
289
VXGE_HW_FW_UPGRADE_ACTION,
290
VXGE_HW_FW_UPGRADE_MEMO,
291
VXGE_HW_FW_UPGRADE_OFFSET_START,
292
&data0, &data1, &steer_ctrl);
293
if (status != VXGE_HW_OK) {
294
vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
299
/* Transfer fw image to adapter 16 bytes at a time */
300
for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
303
/* The next 128bits of fwdata to be loaded onto the adapter */
304
data0 = *((u64 *)fwdata);
305
data1 = *((u64 *)fwdata + 1);
307
status = vxge_hw_vpath_fw_api(vpath,
308
VXGE_HW_FW_UPGRADE_ACTION,
309
VXGE_HW_FW_UPGRADE_MEMO,
310
VXGE_HW_FW_UPGRADE_OFFSET_SEND,
311
&data0, &data1, &steer_ctrl);
312
if (status != VXGE_HW_OK) {
313
vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
318
ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
320
case VXGE_HW_FW_UPGRADE_OK:
321
/* All OK, send next 16 bytes. */
323
case VXGE_FW_UPGRADE_BYTES2SKIP:
324
/* skip bytes in the stream */
325
fwdata += (data0 >> 8) & 0xFFFFFFFF;
327
case VXGE_HW_FW_UPGRADE_DONE:
329
case VXGE_HW_FW_UPGRADE_ERR:
330
sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
332
case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
333
case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
335
"corrupted data from .ncf file\n");
337
case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
338
case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
339
case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
340
case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
341
case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
342
printk(KERN_ERR "invalid .ncf file\n");
344
case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
345
printk(KERN_ERR "buffer overflow\n");
347
case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
348
printk(KERN_ERR "failed to flash the image\n");
350
case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
352
"generic error. Unknown error type\n");
355
printk(KERN_ERR "Unknown error of type %d\n",
359
status = VXGE_HW_FAIL;
362
printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
363
status = VXGE_HW_FAIL;
366
/* point to next 16 bytes */
367
fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
374
vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
375
struct eprom_image *img)
377
u64 data0 = 0, data1 = 0, steer_ctrl = 0;
378
struct __vxge_hw_virtualpath *vpath;
379
enum vxge_hw_status status;
382
vpath = &hldev->virtual_paths[hldev->first_vp_id];
384
for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
385
data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
386
data1 = steer_ctrl = 0;
388
status = vxge_hw_vpath_fw_api(vpath,
389
VXGE_HW_FW_API_GET_EPROM_REV,
390
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
391
0, &data0, &data1, &steer_ctrl);
392
if (status != VXGE_HW_OK)
395
img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
396
img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
397
img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
398
img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
405
* __vxge_hw_channel_free - Free memory allocated for channel
406
* This function deallocates memory from the channel and various arrays
409
static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
411
kfree(channel->work_arr);
412
kfree(channel->free_arr);
413
kfree(channel->reserve_arr);
414
kfree(channel->orig_arr);
419
* __vxge_hw_channel_initialize - Initialize a channel
420
* This function initializes a channel by properly setting the
423
static enum vxge_hw_status
424
__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
427
struct __vxge_hw_virtualpath *vpath;
429
vpath = channel->vph->vpath;
431
if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
432
for (i = 0; i < channel->length; i++)
433
channel->orig_arr[i] = channel->reserve_arr[i];
436
switch (channel->type) {
437
case VXGE_HW_CHANNEL_TYPE_FIFO:
438
vpath->fifoh = (struct __vxge_hw_fifo *)channel;
439
channel->stats = &((struct __vxge_hw_fifo *)
440
channel)->stats->common_stats;
442
case VXGE_HW_CHANNEL_TYPE_RING:
443
vpath->ringh = (struct __vxge_hw_ring *)channel;
444
channel->stats = &((struct __vxge_hw_ring *)
445
channel)->stats->common_stats;
455
* __vxge_hw_channel_reset - Resets a channel
456
* This function resets a channel by properly setting the various references
458
static enum vxge_hw_status
459
__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
463
for (i = 0; i < channel->length; i++) {
464
if (channel->reserve_arr != NULL)
465
channel->reserve_arr[i] = channel->orig_arr[i];
466
if (channel->free_arr != NULL)
467
channel->free_arr[i] = NULL;
468
if (channel->work_arr != NULL)
469
channel->work_arr[i] = NULL;
471
channel->free_ptr = channel->length;
472
channel->reserve_ptr = channel->length;
473
channel->reserve_top = 0;
474
channel->post_index = 0;
475
channel->compl_index = 0;
481
* __vxge_hw_device_pci_e_init
482
* Initialize certain PCI/PCI-X configuration registers
483
* with recommended values. Save config space for future hw resets.
485
static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
489
/* Set the PErr Repconse bit and SERR in PCI command register. */
490
pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
492
pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
494
pci_save_state(hldev->pdev);
497
/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
499
* This routine checks the vpath reset in progress register is turned zero
501
static enum vxge_hw_status
502
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
504
enum vxge_hw_status status;
505
status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
506
VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
507
VXGE_HW_DEF_DEVICE_POLL_MILLIS);
512
* _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
513
* Set the swapper bits appropriately for the lagacy section.
515
static enum vxge_hw_status
516
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
519
enum vxge_hw_status status = VXGE_HW_OK;
521
val64 = readq(&legacy_reg->toc_swapper_fb);
526
case VXGE_HW_SWAPPER_INITIAL_VALUE:
529
case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
530
writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
531
&legacy_reg->pifm_rd_swap_en);
532
writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
533
&legacy_reg->pifm_rd_flip_en);
534
writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
535
&legacy_reg->pifm_wr_swap_en);
536
writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
537
&legacy_reg->pifm_wr_flip_en);
540
case VXGE_HW_SWAPPER_BYTE_SWAPPED:
541
writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
542
&legacy_reg->pifm_rd_swap_en);
543
writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
544
&legacy_reg->pifm_wr_swap_en);
547
case VXGE_HW_SWAPPER_BIT_FLIPPED:
548
writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
549
&legacy_reg->pifm_rd_flip_en);
550
writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
551
&legacy_reg->pifm_wr_flip_en);
557
val64 = readq(&legacy_reg->toc_swapper_fb);
559
if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
560
status = VXGE_HW_ERR_SWAPPER_CTRL;
566
* __vxge_hw_device_toc_get
567
* This routine sets the swapper and reads the toc pointer and returns the
568
* memory mapped address of the toc
570
static struct vxge_hw_toc_reg __iomem *
571
__vxge_hw_device_toc_get(void __iomem *bar0)
574
struct vxge_hw_toc_reg __iomem *toc = NULL;
575
enum vxge_hw_status status;
577
struct vxge_hw_legacy_reg __iomem *legacy_reg =
578
(struct vxge_hw_legacy_reg __iomem *)bar0;
580
status = __vxge_hw_legacy_swapper_set(legacy_reg);
581
if (status != VXGE_HW_OK)
584
val64 = readq(&legacy_reg->toc_first_pointer);
585
toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
591
* __vxge_hw_device_reg_addr_get
592
* This routine sets the swapper and reads the toc pointer and initializes the
593
* register location pointers in the device object. It waits until the ric is
594
* completed initializing registers.
596
static enum vxge_hw_status
597
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
601
enum vxge_hw_status status = VXGE_HW_OK;
603
hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
605
hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
606
if (hldev->toc_reg == NULL) {
607
status = VXGE_HW_FAIL;
611
val64 = readq(&hldev->toc_reg->toc_common_pointer);
613
(struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
615
val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
617
(struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
619
for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
620
val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
621
hldev->srpcim_reg[i] =
622
(struct vxge_hw_srpcim_reg __iomem *)
623
(hldev->bar0 + val64);
626
for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
627
val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
628
hldev->vpmgmt_reg[i] =
629
(struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
632
for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
633
val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
634
hldev->vpath_reg[i] =
635
(struct vxge_hw_vpath_reg __iomem *)
636
(hldev->bar0 + val64);
639
val64 = readq(&hldev->toc_reg->toc_kdfc);
641
switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
643
hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
644
VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
650
status = __vxge_hw_device_vpath_reset_in_prog_check(
651
(u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
657
* __vxge_hw_device_access_rights_get: Get Access Rights of the driver
658
* This routine returns the Access Rights of the driver
661
__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
663
u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
666
case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
668
access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
669
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
672
case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
673
access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
674
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
676
case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
677
access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
678
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
680
case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
681
case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
682
case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
684
case VXGE_HW_SR_VH_FUNCTION0:
685
case VXGE_HW_VH_NORMAL_FUNCTION:
686
access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
690
return access_rights;
693
* __vxge_hw_device_is_privilaged
694
* This routine checks if the device function is privilaged or not
698
__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
700
if (__vxge_hw_device_access_rights_get(host_type,
702
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
705
return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
709
* __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
710
* Returns the function number of the vpath.
713
__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
717
val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
720
(u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
724
* __vxge_hw_device_host_info_get
725
* This routine returns the host type assignments
727
static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
732
val64 = readq(&hldev->common_reg->host_type_assignments);
735
(u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
737
hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
739
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
740
if (!(hldev->vpath_assignments & vxge_mBIT(i)))
744
__vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
746
hldev->access_rights = __vxge_hw_device_access_rights_get(
747
hldev->host_type, hldev->func_id);
749
hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
750
hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
752
hldev->first_vp_id = i;
758
* __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
759
* link width and signalling rate.
761
static enum vxge_hw_status
762
__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
767
/* Get the negotiated link width and speed from PCI config space */
768
exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
769
pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
771
if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
772
return VXGE_HW_ERR_INVALID_PCI_INFO;
774
switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
775
case PCIE_LNK_WIDTH_RESRV:
782
return VXGE_HW_ERR_INVALID_PCI_INFO;
789
* __vxge_hw_device_initialize
790
* Initialize Titan-V hardware.
792
static enum vxge_hw_status
793
__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
795
enum vxge_hw_status status = VXGE_HW_OK;
797
if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
799
/* Validate the pci-e link width and speed */
800
status = __vxge_hw_verify_pci_e_info(hldev);
801
if (status != VXGE_HW_OK)
810
* __vxge_hw_vpath_fw_ver_get - Get the fw version
813
static enum vxge_hw_status
814
__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
815
struct vxge_hw_device_hw_info *hw_info)
817
struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
818
struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
819
struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
820
struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
821
u64 data0, data1 = 0, steer_ctrl = 0;
822
enum vxge_hw_status status;
824
status = vxge_hw_vpath_fw_api(vpath,
825
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
826
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
827
0, &data0, &data1, &steer_ctrl);
828
if (status != VXGE_HW_OK)
832
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
834
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
836
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
838
snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
839
fw_date->month, fw_date->day, fw_date->year);
842
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
844
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
846
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
848
snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
849
fw_version->major, fw_version->minor, fw_version->build);
852
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
854
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
856
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
858
snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
859
flash_date->month, flash_date->day, flash_date->year);
861
flash_version->major =
862
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
863
flash_version->minor =
864
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
865
flash_version->build =
866
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
868
snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
869
flash_version->major, flash_version->minor,
870
flash_version->build);
877
* __vxge_hw_vpath_card_info_get - Get the serial numbers,
878
* part number and product description.
880
static enum vxge_hw_status
881
__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
882
struct vxge_hw_device_hw_info *hw_info)
884
enum vxge_hw_status status;
885
u64 data0, data1 = 0, steer_ctrl = 0;
886
u8 *serial_number = hw_info->serial_number;
887
u8 *part_number = hw_info->part_number;
888
u8 *product_desc = hw_info->product_desc;
891
data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
893
status = vxge_hw_vpath_fw_api(vpath,
894
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
895
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
896
0, &data0, &data1, &steer_ctrl);
897
if (status != VXGE_HW_OK)
900
((u64 *)serial_number)[0] = be64_to_cpu(data0);
901
((u64 *)serial_number)[1] = be64_to_cpu(data1);
903
data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
904
data1 = steer_ctrl = 0;
906
status = vxge_hw_vpath_fw_api(vpath,
907
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
908
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
909
0, &data0, &data1, &steer_ctrl);
910
if (status != VXGE_HW_OK)
913
((u64 *)part_number)[0] = be64_to_cpu(data0);
914
((u64 *)part_number)[1] = be64_to_cpu(data1);
916
for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
917
i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
919
data1 = steer_ctrl = 0;
921
status = vxge_hw_vpath_fw_api(vpath,
922
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
923
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
924
0, &data0, &data1, &steer_ctrl);
925
if (status != VXGE_HW_OK)
928
((u64 *)product_desc)[j++] = be64_to_cpu(data0);
929
((u64 *)product_desc)[j++] = be64_to_cpu(data1);
936
* __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
937
* Returns pci function mode
939
static enum vxge_hw_status
940
__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
941
struct vxge_hw_device_hw_info *hw_info)
943
u64 data0, data1 = 0, steer_ctrl = 0;
944
enum vxge_hw_status status;
948
status = vxge_hw_vpath_fw_api(vpath,
949
VXGE_HW_FW_API_GET_FUNC_MODE,
950
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
951
0, &data0, &data1, &steer_ctrl);
952
if (status != VXGE_HW_OK)
955
hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
960
* __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
961
* from MAC address table.
963
static enum vxge_hw_status
964
__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
965
u8 *macaddr, u8 *macaddr_mask)
967
u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
968
data0 = 0, data1 = 0, steer_ctrl = 0;
969
enum vxge_hw_status status;
973
status = vxge_hw_vpath_fw_api(vpath, action,
974
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
975
0, &data0, &data1, &steer_ctrl);
976
if (status != VXGE_HW_OK)
979
data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
980
data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
983
for (i = ETH_ALEN; i > 0; i--) {
984
macaddr[i - 1] = (u8) (data0 & 0xFF);
987
macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
991
action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
992
data0 = 0, data1 = 0, steer_ctrl = 0;
994
} while (!is_valid_ether_addr(macaddr));
1000
* vxge_hw_device_hw_info_get - Get the hw information
1001
* Returns the vpath mask that has the bits set for each vpath allocated
1002
* for the driver, FW version information, and the first mac address for
1005
enum vxge_hw_status __devinit
1006
vxge_hw_device_hw_info_get(void __iomem *bar0,
1007
struct vxge_hw_device_hw_info *hw_info)
1011
struct vxge_hw_toc_reg __iomem *toc;
1012
struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
1013
struct vxge_hw_common_reg __iomem *common_reg;
1014
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
1015
enum vxge_hw_status status;
1016
struct __vxge_hw_virtualpath vpath;
1018
memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
1020
toc = __vxge_hw_device_toc_get(bar0);
1022
status = VXGE_HW_ERR_CRITICAL;
1026
val64 = readq(&toc->toc_common_pointer);
1027
common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
1029
status = __vxge_hw_device_vpath_reset_in_prog_check(
1030
(u64 __iomem *)&common_reg->vpath_rst_in_prog);
1031
if (status != VXGE_HW_OK)
1034
hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
1036
val64 = readq(&common_reg->host_type_assignments);
1038
hw_info->host_type =
1039
(u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
1041
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1042
if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1045
val64 = readq(&toc->toc_vpmgmt_pointer[i]);
1047
vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
1050
hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
1051
if (__vxge_hw_device_access_rights_get(hw_info->host_type,
1053
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
1055
val64 = readq(&toc->toc_mrpcim_pointer);
1057
mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
1060
writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
1064
val64 = readq(&toc->toc_vpath_pointer[i]);
1066
spin_lock_init(&vpath.lock);
1067
vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1069
vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1071
status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
1072
if (status != VXGE_HW_OK)
1075
status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
1076
if (status != VXGE_HW_OK)
1079
status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
1080
if (status != VXGE_HW_OK)
1086
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1087
if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1090
val64 = readq(&toc->toc_vpath_pointer[i]);
1091
vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1093
vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1095
status = __vxge_hw_vpath_addr_get(&vpath,
1096
hw_info->mac_addrs[i],
1097
hw_info->mac_addr_masks[i]);
1098
if (status != VXGE_HW_OK)
1106
* __vxge_hw_blockpool_destroy - Deallocates the block pool
1108
static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
1110
struct __vxge_hw_device *hldev;
1111
struct list_head *p, *n;
1114
if (blockpool == NULL) {
1119
hldev = blockpool->hldev;
1121
list_for_each_safe(p, n, &blockpool->free_block_list) {
1122
pci_unmap_single(hldev->pdev,
1123
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
1124
((struct __vxge_hw_blockpool_entry *)p)->length,
1125
PCI_DMA_BIDIRECTIONAL);
1127
vxge_os_dma_free(hldev->pdev,
1128
((struct __vxge_hw_blockpool_entry *)p)->memblock,
1129
&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
1131
list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1133
blockpool->pool_size--;
1136
list_for_each_safe(p, n, &blockpool->free_entry_list) {
1137
list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1146
* __vxge_hw_blockpool_create - Create block pool
1148
static enum vxge_hw_status
1149
__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1150
struct __vxge_hw_blockpool *blockpool,
1155
struct __vxge_hw_blockpool_entry *entry = NULL;
1157
dma_addr_t dma_addr;
1158
struct pci_dev *dma_handle;
1159
struct pci_dev *acc_handle;
1160
enum vxge_hw_status status = VXGE_HW_OK;
1162
if (blockpool == NULL) {
1163
status = VXGE_HW_FAIL;
1164
goto blockpool_create_exit;
1167
blockpool->hldev = hldev;
1168
blockpool->block_size = VXGE_HW_BLOCK_SIZE;
1169
blockpool->pool_size = 0;
1170
blockpool->pool_max = pool_max;
1171
blockpool->req_out = 0;
1173
INIT_LIST_HEAD(&blockpool->free_block_list);
1174
INIT_LIST_HEAD(&blockpool->free_entry_list);
1176
for (i = 0; i < pool_size + pool_max; i++) {
1177
entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1179
if (entry == NULL) {
1180
__vxge_hw_blockpool_destroy(blockpool);
1181
status = VXGE_HW_ERR_OUT_OF_MEMORY;
1182
goto blockpool_create_exit;
1184
list_add(&entry->item, &blockpool->free_entry_list);
1187
for (i = 0; i < pool_size; i++) {
1188
memblock = vxge_os_dma_malloc(
1193
if (memblock == NULL) {
1194
__vxge_hw_blockpool_destroy(blockpool);
1195
status = VXGE_HW_ERR_OUT_OF_MEMORY;
1196
goto blockpool_create_exit;
1199
dma_addr = pci_map_single(hldev->pdev, memblock,
1200
VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
1201
if (unlikely(pci_dma_mapping_error(hldev->pdev,
1203
vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
1204
__vxge_hw_blockpool_destroy(blockpool);
1205
status = VXGE_HW_ERR_OUT_OF_MEMORY;
1206
goto blockpool_create_exit;
1209
if (!list_empty(&blockpool->free_entry_list))
1210
entry = (struct __vxge_hw_blockpool_entry *)
1211
list_first_entry(&blockpool->free_entry_list,
1212
struct __vxge_hw_blockpool_entry,
1217
kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1219
if (entry != NULL) {
1220
list_del(&entry->item);
1221
entry->length = VXGE_HW_BLOCK_SIZE;
1222
entry->memblock = memblock;
1223
entry->dma_addr = dma_addr;
1224
entry->acc_handle = acc_handle;
1225
entry->dma_handle = dma_handle;
1226
list_add(&entry->item,
1227
&blockpool->free_block_list);
1228
blockpool->pool_size++;
1230
__vxge_hw_blockpool_destroy(blockpool);
1231
status = VXGE_HW_ERR_OUT_OF_MEMORY;
1232
goto blockpool_create_exit;
1236
blockpool_create_exit:
1241
* __vxge_hw_device_fifo_config_check - Check fifo configuration.
1242
* Check the fifo configuration
1244
static enum vxge_hw_status
1245
__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1247
if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1248
(fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1249
return VXGE_HW_BADCFG_FIFO_BLOCKS;
1255
* __vxge_hw_device_vpath_config_check - Check vpath configuration.
1256
* Check the vpath configuration
1258
static enum vxge_hw_status
1259
__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1261
enum vxge_hw_status status;
1263
if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1264
(vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
1265
return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1267
status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1268
if (status != VXGE_HW_OK)
1271
if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1272
((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1273
(vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1274
return VXGE_HW_BADCFG_VPATH_MTU;
1276
if ((vp_config->rpa_strip_vlan_tag !=
1277
VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1278
(vp_config->rpa_strip_vlan_tag !=
1279
VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1280
(vp_config->rpa_strip_vlan_tag !=
1281
VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1282
return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1288
* __vxge_hw_device_config_check - Check device configuration.
1289
* Check the device configuration
1291
static enum vxge_hw_status
1292
__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1295
enum vxge_hw_status status;
1297
if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1298
(new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1299
(new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1300
(new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1301
return VXGE_HW_BADCFG_INTR_MODE;
1303
if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1304
(new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1305
return VXGE_HW_BADCFG_RTS_MAC_EN;
1307
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1308
status = __vxge_hw_device_vpath_config_check(
1309
&new_config->vp_config[i]);
1310
if (status != VXGE_HW_OK)
1318
* vxge_hw_device_initialize - Initialize Titan device.
1319
* Initialize Titan device. Note that all the arguments of this public API
1320
* are 'IN', including @hldev. Driver cooperates with
1321
* OS to find new Titan device, locate its PCI and memory spaces.
1323
* When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
1324
* to enable the latter to perform Titan hardware initialization.
1326
enum vxge_hw_status __devinit
1327
vxge_hw_device_initialize(
1328
struct __vxge_hw_device **devh,
1329
struct vxge_hw_device_attr *attr,
1330
struct vxge_hw_device_config *device_config)
1334
struct __vxge_hw_device *hldev = NULL;
1335
enum vxge_hw_status status = VXGE_HW_OK;
1337
status = __vxge_hw_device_config_check(device_config);
1338
if (status != VXGE_HW_OK)
1341
hldev = vzalloc(sizeof(struct __vxge_hw_device));
1342
if (hldev == NULL) {
1343
status = VXGE_HW_ERR_OUT_OF_MEMORY;
1347
hldev->magic = VXGE_HW_DEVICE_MAGIC;
1349
vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
1352
memcpy(&hldev->config, device_config,
1353
sizeof(struct vxge_hw_device_config));
1355
hldev->bar0 = attr->bar0;
1356
hldev->pdev = attr->pdev;
1358
hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
1359
hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
1360
hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
1362
__vxge_hw_device_pci_e_init(hldev);
1364
status = __vxge_hw_device_reg_addr_get(hldev);
1365
if (status != VXGE_HW_OK) {
1370
__vxge_hw_device_host_info_get(hldev);
1372
/* Incrementing for stats blocks */
1375
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1376
if (!(hldev->vpath_assignments & vxge_mBIT(i)))
1379
if (device_config->vp_config[i].ring.enable ==
1380
VXGE_HW_RING_ENABLE)
1381
nblocks += device_config->vp_config[i].ring.ring_blocks;
1383
if (device_config->vp_config[i].fifo.enable ==
1384
VXGE_HW_FIFO_ENABLE)
1385
nblocks += device_config->vp_config[i].fifo.fifo_blocks;
1389
if (__vxge_hw_blockpool_create(hldev,
1391
device_config->dma_blockpool_initial + nblocks,
1392
device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
1394
vxge_hw_device_terminate(hldev);
1395
status = VXGE_HW_ERR_OUT_OF_MEMORY;
1399
status = __vxge_hw_device_initialize(hldev);
1400
if (status != VXGE_HW_OK) {
1401
vxge_hw_device_terminate(hldev);
1411
* vxge_hw_device_terminate - Terminate Titan device.
1412
* Terminate HW device.
1415
vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
1417
vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
1419
hldev->magic = VXGE_HW_DEVICE_DEAD;
1420
__vxge_hw_blockpool_destroy(&hldev->block_pool);
1425
* __vxge_hw_vpath_stats_access - Get the statistics from the given location
1426
* and offset and perform an operation
1428
static enum vxge_hw_status
1429
__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
1430
u32 operation, u32 offset, u64 *stat)
1433
enum vxge_hw_status status = VXGE_HW_OK;
1434
struct vxge_hw_vpath_reg __iomem *vp_reg;
1436
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1437
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1438
goto vpath_stats_access_exit;
1441
vp_reg = vpath->vp_reg;
1443
val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
1444
VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
1445
VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
1447
status = __vxge_hw_pio_mem_write64(val64,
1448
&vp_reg->xmac_stats_access_cmd,
1449
VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
1450
vpath->hldev->config.device_poll_millis);
1451
if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1452
*stat = readq(&vp_reg->xmac_stats_access_data);
1456
vpath_stats_access_exit:
1461
* __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
1463
static enum vxge_hw_status
1464
__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
1465
struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
1469
u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
1470
enum vxge_hw_status status = VXGE_HW_OK;
1472
val64 = (u64 *)vpath_tx_stats;
1474
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1475
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1479
for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
1480
status = __vxge_hw_vpath_stats_access(vpath,
1481
VXGE_HW_STATS_OP_READ,
1483
if (status != VXGE_HW_OK)
1493
* __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
1495
static enum vxge_hw_status
1496
__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
1497
struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
1500
enum vxge_hw_status status = VXGE_HW_OK;
1502
u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
1503
val64 = (u64 *) vpath_rx_stats;
1505
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1506
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1509
for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
1510
status = __vxge_hw_vpath_stats_access(vpath,
1511
VXGE_HW_STATS_OP_READ,
1512
offset >> 3, val64);
1513
if (status != VXGE_HW_OK)
1524
* __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
1526
static enum vxge_hw_status
1527
__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
1528
struct vxge_hw_vpath_stats_hw_info *hw_stats)
1531
enum vxge_hw_status status = VXGE_HW_OK;
1532
struct vxge_hw_vpath_reg __iomem *vp_reg;
1534
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1535
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1538
vp_reg = vpath->vp_reg;
1540
val64 = readq(&vp_reg->vpath_debug_stats0);
1541
hw_stats->ini_num_mwr_sent =
1542
(u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
1544
val64 = readq(&vp_reg->vpath_debug_stats1);
1545
hw_stats->ini_num_mrd_sent =
1546
(u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
1548
val64 = readq(&vp_reg->vpath_debug_stats2);
1549
hw_stats->ini_num_cpl_rcvd =
1550
(u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
1552
val64 = readq(&vp_reg->vpath_debug_stats3);
1553
hw_stats->ini_num_mwr_byte_sent =
1554
VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
1556
val64 = readq(&vp_reg->vpath_debug_stats4);
1557
hw_stats->ini_num_cpl_byte_rcvd =
1558
VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
1560
val64 = readq(&vp_reg->vpath_debug_stats5);
1561
hw_stats->wrcrdtarb_xoff =
1562
(u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
1564
val64 = readq(&vp_reg->vpath_debug_stats6);
1565
hw_stats->rdcrdtarb_xoff =
1566
(u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
1568
val64 = readq(&vp_reg->vpath_genstats_count01);
1569
hw_stats->vpath_genstats_count0 =
1570
(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
1573
val64 = readq(&vp_reg->vpath_genstats_count01);
1574
hw_stats->vpath_genstats_count1 =
1575
(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
1578
val64 = readq(&vp_reg->vpath_genstats_count23);
1579
hw_stats->vpath_genstats_count2 =
1580
(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
1583
val64 = readq(&vp_reg->vpath_genstats_count01);
1584
hw_stats->vpath_genstats_count3 =
1585
(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
1588
val64 = readq(&vp_reg->vpath_genstats_count4);
1589
hw_stats->vpath_genstats_count4 =
1590
(u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
1593
val64 = readq(&vp_reg->vpath_genstats_count5);
1594
hw_stats->vpath_genstats_count5 =
1595
(u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
1598
status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
1599
if (status != VXGE_HW_OK)
1602
status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
1603
if (status != VXGE_HW_OK)
1606
VXGE_HW_VPATH_STATS_PIO_READ(
1607
VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
1609
hw_stats->prog_event_vnum0 =
1610
(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
1612
hw_stats->prog_event_vnum1 =
1613
(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
1615
VXGE_HW_VPATH_STATS_PIO_READ(
1616
VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
1618
hw_stats->prog_event_vnum2 =
1619
(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
1621
hw_stats->prog_event_vnum3 =
1622
(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
1624
val64 = readq(&vp_reg->rx_multi_cast_stats);
1625
hw_stats->rx_multi_cast_frame_discard =
1626
(u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
1628
val64 = readq(&vp_reg->rx_frm_transferred);
1629
hw_stats->rx_frm_transferred =
1630
(u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
1632
val64 = readq(&vp_reg->rxd_returned);
1633
hw_stats->rxd_returned =
1634
(u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
1636
val64 = readq(&vp_reg->dbg_stats_rx_mpa);
1637
hw_stats->rx_mpa_len_fail_frms =
1638
(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
1639
hw_stats->rx_mpa_mrk_fail_frms =
1640
(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
1641
hw_stats->rx_mpa_crc_fail_frms =
1642
(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
1644
val64 = readq(&vp_reg->dbg_stats_rx_fau);
1645
hw_stats->rx_permitted_frms =
1646
(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
1647
hw_stats->rx_vp_reset_discarded_frms =
1648
(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
1649
hw_stats->rx_wol_frms =
1650
(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
1652
val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
1653
hw_stats->tx_vp_reset_discarded_frms =
1654
(u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
1661
* vxge_hw_device_stats_get - Get the device hw statistics.
1662
* Returns the vpath h/w stats for the device.
1665
vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
1666
struct vxge_hw_device_stats_hw_info *hw_stats)
1669
enum vxge_hw_status status = VXGE_HW_OK;
1671
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1672
if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
1673
(hldev->virtual_paths[i].vp_open ==
1674
VXGE_HW_VP_NOT_OPEN))
1677
memcpy(hldev->virtual_paths[i].hw_stats_sav,
1678
hldev->virtual_paths[i].hw_stats,
1679
sizeof(struct vxge_hw_vpath_stats_hw_info));
1681
status = __vxge_hw_vpath_stats_get(
1682
&hldev->virtual_paths[i],
1683
hldev->virtual_paths[i].hw_stats);
1686
memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
1687
sizeof(struct vxge_hw_device_stats_hw_info));
1693
* vxge_hw_driver_stats_get - Get the device sw statistics.
1694
* Returns the vpath s/w stats for the device.
1696
enum vxge_hw_status vxge_hw_driver_stats_get(
1697
struct __vxge_hw_device *hldev,
1698
struct vxge_hw_device_stats_sw_info *sw_stats)
1700
enum vxge_hw_status status = VXGE_HW_OK;
1702
memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
1703
sizeof(struct vxge_hw_device_stats_sw_info));
1709
* vxge_hw_mrpcim_stats_access - Access the statistics from the given location
1710
* and offset and perform an operation
1711
* Get the statistics from the given location and offset.
1714
vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
1715
u32 operation, u32 location, u32 offset, u64 *stat)
1718
enum vxge_hw_status status = VXGE_HW_OK;
1720
status = __vxge_hw_device_is_privilaged(hldev->host_type,
1722
if (status != VXGE_HW_OK)
1725
val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
1726
VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
1727
VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
1728
VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
1730
status = __vxge_hw_pio_mem_write64(val64,
1731
&hldev->mrpcim_reg->xmac_stats_sys_cmd,
1732
VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
1733
hldev->config.device_poll_millis);
1735
if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1736
*stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
1744
* vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
1745
* Get the Statistics on aggregate port
1747
static enum vxge_hw_status
1748
vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
1749
struct vxge_hw_xmac_aggr_stats *aggr_stats)
1753
u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
1754
enum vxge_hw_status status = VXGE_HW_OK;
1756
val64 = (u64 *)aggr_stats;
1758
status = __vxge_hw_device_is_privilaged(hldev->host_type,
1760
if (status != VXGE_HW_OK)
1763
for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
1764
status = vxge_hw_mrpcim_stats_access(hldev,
1765
VXGE_HW_STATS_OP_READ,
1766
VXGE_HW_STATS_LOC_AGGR,
1767
((offset + (104 * port)) >> 3), val64);
1768
if (status != VXGE_HW_OK)
1779
* vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1780
* Get the Statistics on port
1782
static enum vxge_hw_status
1783
vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1784
struct vxge_hw_xmac_port_stats *port_stats)
1787
enum vxge_hw_status status = VXGE_HW_OK;
1790
val64 = (u64 *) port_stats;
1792
status = __vxge_hw_device_is_privilaged(hldev->host_type,
1794
if (status != VXGE_HW_OK)
1797
for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1798
status = vxge_hw_mrpcim_stats_access(hldev,
1799
VXGE_HW_STATS_OP_READ,
1800
VXGE_HW_STATS_LOC_AGGR,
1801
((offset + (608 * port)) >> 3), val64);
1802
if (status != VXGE_HW_OK)
1814
* vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1815
* Get the XMAC Statistics
1818
vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1819
struct vxge_hw_xmac_stats *xmac_stats)
1821
enum vxge_hw_status status = VXGE_HW_OK;
1824
status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1825
0, &xmac_stats->aggr_stats[0]);
1826
if (status != VXGE_HW_OK)
1829
status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1830
1, &xmac_stats->aggr_stats[1]);
1831
if (status != VXGE_HW_OK)
1834
for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1836
status = vxge_hw_device_xmac_port_stats_get(hldev,
1837
i, &xmac_stats->port_stats[i]);
1838
if (status != VXGE_HW_OK)
1842
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1844
if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1847
status = __vxge_hw_vpath_xmac_tx_stats_get(
1848
&hldev->virtual_paths[i],
1849
&xmac_stats->vpath_tx_stats[i]);
1850
if (status != VXGE_HW_OK)
1853
status = __vxge_hw_vpath_xmac_rx_stats_get(
1854
&hldev->virtual_paths[i],
1855
&xmac_stats->vpath_rx_stats[i]);
1856
if (status != VXGE_HW_OK)
1864
* vxge_hw_device_debug_set - Set the debug module, level and timestamp
1865
* This routine is used to dynamically change the debug output
1867
void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1868
enum vxge_debug_level level, u32 mask)
1873
#if defined(VXGE_DEBUG_TRACE_MASK) || \
1874
defined(VXGE_DEBUG_ERR_MASK)
1875
hldev->debug_module_mask = mask;
1876
hldev->debug_level = level;
1879
#if defined(VXGE_DEBUG_ERR_MASK)
1880
hldev->level_err = level & VXGE_ERR;
1883
#if defined(VXGE_DEBUG_TRACE_MASK)
1884
hldev->level_trace = level & VXGE_TRACE;
1889
* vxge_hw_device_error_level_get - Get the error level
1890
* This routine returns the current error level set
1892
u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1894
#if defined(VXGE_DEBUG_ERR_MASK)
1898
return hldev->level_err;
1905
* vxge_hw_device_trace_level_get - Get the trace level
1906
* This routine returns the current trace level set
1908
u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1910
#if defined(VXGE_DEBUG_TRACE_MASK)
1914
return hldev->level_trace;
1921
* vxge_hw_getpause_data -Pause frame frame generation and reception.
1922
* Returns the Pause frame generation and reception capability of the NIC.
1924
enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1925
u32 port, u32 *tx, u32 *rx)
1928
enum vxge_hw_status status = VXGE_HW_OK;
1930
if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1931
status = VXGE_HW_ERR_INVALID_DEVICE;
1935
if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1936
status = VXGE_HW_ERR_INVALID_PORT;
1940
if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1941
status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1945
val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1946
if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1948
if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1955
* vxge_hw_device_setpause_data - set/reset pause frame generation.
1956
* It can be used to set or reset Pause frame generation or reception
1957
* support of the NIC.
1959
enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1960
u32 port, u32 tx, u32 rx)
1963
enum vxge_hw_status status = VXGE_HW_OK;
1965
if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1966
status = VXGE_HW_ERR_INVALID_DEVICE;
1970
if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1971
status = VXGE_HW_ERR_INVALID_PORT;
1975
status = __vxge_hw_device_is_privilaged(hldev->host_type,
1977
if (status != VXGE_HW_OK)
1980
val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1982
val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1984
val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1986
val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1988
val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1990
writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1995
u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1997
int link_width, exp_cap;
2000
exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
2001
pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
2002
link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
2007
* __vxge_hw_ring_block_memblock_idx - Return the memblock index
2008
* This function returns the index of memory block
2011
__vxge_hw_ring_block_memblock_idx(u8 *block)
2013
return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
2017
* __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
2018
* This function sets index to a memory block
2021
__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
2023
*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
2027
* __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
2029
* Sets the next block pointer in RxD block
2032
__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
2034
*((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
2038
* __vxge_hw_ring_first_block_address_get - Returns the dma address of the
2040
* Returns the dma address of the first RxD block
2042
static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
2044
struct vxge_hw_mempool_dma *dma_object;
2046
dma_object = ring->mempool->memblocks_dma_arr;
2047
vxge_assert(dma_object != NULL);
2049
return dma_object->addr;
2053
* __vxge_hw_ring_item_dma_addr - Return the dma address of an item
2054
* This function returns the dma address of a given item
2056
static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
2061
struct vxge_hw_mempool_dma *memblock_dma_object;
2062
ptrdiff_t dma_item_offset;
2064
/* get owner memblock index */
2065
memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
2067
/* get owner memblock by memblock index */
2068
memblock = mempoolh->memblocks_arr[memblock_idx];
2070
/* get memblock DMA object by memblock index */
2071
memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
2073
/* calculate offset in the memblock of this item */
2074
dma_item_offset = (u8 *)item - (u8 *)memblock;
2076
return memblock_dma_object->addr + dma_item_offset;
2080
* __vxge_hw_ring_rxdblock_link - Link the RxD blocks
2081
* This function returns the dma address of a given item
2083
static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
2084
struct __vxge_hw_ring *ring, u32 from,
2087
u8 *to_item , *from_item;
2090
/* get "from" RxD block */
2091
from_item = mempoolh->items_arr[from];
2092
vxge_assert(from_item);
2094
/* get "to" RxD block */
2095
to_item = mempoolh->items_arr[to];
2096
vxge_assert(to_item);
2098
/* return address of the beginning of previous RxD block */
2099
to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
2101
/* set next pointer for this RxD block to point on
2102
* previous item's DMA start address */
2103
__vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
2107
* __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
2109
* This function is callback passed to __vxge_hw_mempool_create to create memory
2110
* pool for RxD block
2113
__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
2115
struct vxge_hw_mempool_dma *dma_object,
2116
u32 index, u32 is_last)
2119
void *item = mempoolh->items_arr[index];
2120
struct __vxge_hw_ring *ring =
2121
(struct __vxge_hw_ring *)mempoolh->userdata;
2123
/* format rxds array */
2124
for (i = 0; i < ring->rxds_per_block; i++) {
2125
void *rxdblock_priv;
2127
struct vxge_hw_ring_rxd_1 *rxdp;
2129
u32 reserve_index = ring->channel.reserve_ptr -
2130
(index * ring->rxds_per_block + i + 1);
2131
u32 memblock_item_idx;
2133
ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
2136
/* Note: memblock_item_idx is index of the item within
2137
* the memblock. For instance, in case of three RxD-blocks
2138
* per memblock this value can be 0, 1 or 2. */
2139
rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
2140
memblock_index, item,
2141
&memblock_item_idx);
2143
rxdp = (struct vxge_hw_ring_rxd_1 *)
2144
ring->channel.reserve_arr[reserve_index];
2146
uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
2148
/* pre-format Host_Control */
2149
rxdp->host_control = (u64)(size_t)uld_priv;
2152
__vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
2155
/* link last one with first one */
2156
__vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
2160
/* link this RxD block with previous one */
2161
__vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
2166
* __vxge_hw_ring_replenish - Initial replenish of RxDs
2167
* This function replenishes the RxDs from reserve array to work array
2170
vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
2173
struct __vxge_hw_channel *channel;
2174
enum vxge_hw_status status = VXGE_HW_OK;
2176
channel = &ring->channel;
2178
while (vxge_hw_channel_dtr_count(channel) > 0) {
2180
status = vxge_hw_ring_rxd_reserve(ring, &rxd);
2182
vxge_assert(status == VXGE_HW_OK);
2184
if (ring->rxd_init) {
2185
status = ring->rxd_init(rxd, channel->userdata);
2186
if (status != VXGE_HW_OK) {
2187
vxge_hw_ring_rxd_free(ring, rxd);
2192
vxge_hw_ring_rxd_post(ring, rxd);
2194
status = VXGE_HW_OK;
2200
* __vxge_hw_channel_allocate - Allocate memory for channel
2201
* This function allocates required memory for the channel and various arrays
2204
static struct __vxge_hw_channel *
2205
__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
2206
enum __vxge_hw_channel_type type,
2207
u32 length, u32 per_dtr_space,
2210
struct __vxge_hw_channel *channel;
2211
struct __vxge_hw_device *hldev;
2215
hldev = vph->vpath->hldev;
2216
vp_id = vph->vpath->vp_id;
2219
case VXGE_HW_CHANNEL_TYPE_FIFO:
2220
size = sizeof(struct __vxge_hw_fifo);
2222
case VXGE_HW_CHANNEL_TYPE_RING:
2223
size = sizeof(struct __vxge_hw_ring);
2229
channel = kzalloc(size, GFP_KERNEL);
2230
if (channel == NULL)
2232
INIT_LIST_HEAD(&channel->item);
2234
channel->common_reg = hldev->common_reg;
2235
channel->first_vp_id = hldev->first_vp_id;
2236
channel->type = type;
2237
channel->devh = hldev;
2239
channel->userdata = userdata;
2240
channel->per_dtr_space = per_dtr_space;
2241
channel->length = length;
2242
channel->vp_id = vp_id;
2244
channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2245
if (channel->work_arr == NULL)
2248
channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2249
if (channel->free_arr == NULL)
2251
channel->free_ptr = length;
2253
channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2254
if (channel->reserve_arr == NULL)
2256
channel->reserve_ptr = length;
2257
channel->reserve_top = 0;
2259
channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2260
if (channel->orig_arr == NULL)
2265
__vxge_hw_channel_free(channel);
2272
* vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
2273
* Adds a block to block pool
2275
static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
2278
struct pci_dev *dma_h,
2279
struct pci_dev *acc_handle)
2281
struct __vxge_hw_blockpool *blockpool;
2282
struct __vxge_hw_blockpool_entry *entry = NULL;
2283
dma_addr_t dma_addr;
2284
enum vxge_hw_status status = VXGE_HW_OK;
2287
blockpool = &devh->block_pool;
2289
if (block_addr == NULL) {
2290
blockpool->req_out--;
2291
status = VXGE_HW_FAIL;
2295
dma_addr = pci_map_single(devh->pdev, block_addr, length,
2296
PCI_DMA_BIDIRECTIONAL);
2298
if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
2299
vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
2300
blockpool->req_out--;
2301
status = VXGE_HW_FAIL;
2305
if (!list_empty(&blockpool->free_entry_list))
2306
entry = (struct __vxge_hw_blockpool_entry *)
2307
list_first_entry(&blockpool->free_entry_list,
2308
struct __vxge_hw_blockpool_entry,
2312
entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
2314
list_del(&entry->item);
2316
if (entry != NULL) {
2317
entry->length = length;
2318
entry->memblock = block_addr;
2319
entry->dma_addr = dma_addr;
2320
entry->acc_handle = acc_handle;
2321
entry->dma_handle = dma_h;
2322
list_add(&entry->item, &blockpool->free_block_list);
2323
blockpool->pool_size++;
2324
status = VXGE_HW_OK;
2326
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2328
blockpool->req_out--;
2330
req_out = blockpool->req_out;
2336
vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
2342
flags = GFP_ATOMIC | GFP_DMA;
2344
flags = GFP_KERNEL | GFP_DMA;
2346
vaddr = kmalloc((size), flags);
2348
vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
2352
* __vxge_hw_blockpool_blocks_add - Request additional blocks
2355
void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
2359
if ((blockpool->pool_size + blockpool->req_out) <
2360
VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
2361
nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
2362
blockpool->req_out += nreq;
2365
for (i = 0; i < nreq; i++)
2366
vxge_os_dma_malloc_async(
2367
((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2368
blockpool->hldev, VXGE_HW_BLOCK_SIZE);
2372
* __vxge_hw_blockpool_malloc - Allocate a memory block from pool
2373
* Allocates a block of memory of given size, either from block pool
2374
* or by calling vxge_os_dma_malloc()
2376
static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
2377
struct vxge_hw_mempool_dma *dma_object)
2379
struct __vxge_hw_blockpool_entry *entry = NULL;
2380
struct __vxge_hw_blockpool *blockpool;
2381
void *memblock = NULL;
2382
enum vxge_hw_status status = VXGE_HW_OK;
2384
blockpool = &devh->block_pool;
2386
if (size != blockpool->block_size) {
2388
memblock = vxge_os_dma_malloc(devh->pdev, size,
2389
&dma_object->handle,
2390
&dma_object->acc_handle);
2392
if (memblock == NULL) {
2393
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2397
dma_object->addr = pci_map_single(devh->pdev, memblock, size,
2398
PCI_DMA_BIDIRECTIONAL);
2400
if (unlikely(pci_dma_mapping_error(devh->pdev,
2401
dma_object->addr))) {
2402
vxge_os_dma_free(devh->pdev, memblock,
2403
&dma_object->acc_handle);
2404
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2410
if (!list_empty(&blockpool->free_block_list))
2411
entry = (struct __vxge_hw_blockpool_entry *)
2412
list_first_entry(&blockpool->free_block_list,
2413
struct __vxge_hw_blockpool_entry,
2416
if (entry != NULL) {
2417
list_del(&entry->item);
2418
dma_object->addr = entry->dma_addr;
2419
dma_object->handle = entry->dma_handle;
2420
dma_object->acc_handle = entry->acc_handle;
2421
memblock = entry->memblock;
2423
list_add(&entry->item,
2424
&blockpool->free_entry_list);
2425
blockpool->pool_size--;
2428
if (memblock != NULL)
2429
__vxge_hw_blockpool_blocks_add(blockpool);
2436
* __vxge_hw_blockpool_blocks_remove - Free additional blocks
2439
__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
2441
struct list_head *p, *n;
2443
list_for_each_safe(p, n, &blockpool->free_block_list) {
2445
if (blockpool->pool_size < blockpool->pool_max)
2449
((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2450
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
2451
((struct __vxge_hw_blockpool_entry *)p)->length,
2452
PCI_DMA_BIDIRECTIONAL);
2455
((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2456
((struct __vxge_hw_blockpool_entry *)p)->memblock,
2457
&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
2459
list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
2461
list_add(p, &blockpool->free_entry_list);
2463
blockpool->pool_size--;
2469
* __vxge_hw_blockpool_free - Frees the memory allcoated with
2470
* __vxge_hw_blockpool_malloc
2472
static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
2473
void *memblock, u32 size,
2474
struct vxge_hw_mempool_dma *dma_object)
2476
struct __vxge_hw_blockpool_entry *entry = NULL;
2477
struct __vxge_hw_blockpool *blockpool;
2478
enum vxge_hw_status status = VXGE_HW_OK;
2480
blockpool = &devh->block_pool;
2482
if (size != blockpool->block_size) {
2483
pci_unmap_single(devh->pdev, dma_object->addr, size,
2484
PCI_DMA_BIDIRECTIONAL);
2485
vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
2488
if (!list_empty(&blockpool->free_entry_list))
2489
entry = (struct __vxge_hw_blockpool_entry *)
2490
list_first_entry(&blockpool->free_entry_list,
2491
struct __vxge_hw_blockpool_entry,
2495
entry = vmalloc(sizeof(
2496
struct __vxge_hw_blockpool_entry));
2498
list_del(&entry->item);
2500
if (entry != NULL) {
2501
entry->length = size;
2502
entry->memblock = memblock;
2503
entry->dma_addr = dma_object->addr;
2504
entry->acc_handle = dma_object->acc_handle;
2505
entry->dma_handle = dma_object->handle;
2506
list_add(&entry->item,
2507
&blockpool->free_block_list);
2508
blockpool->pool_size++;
2509
status = VXGE_HW_OK;
2511
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2513
if (status == VXGE_HW_OK)
2514
__vxge_hw_blockpool_blocks_remove(blockpool);
2519
* vxge_hw_mempool_destroy
2521
static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
2524
struct __vxge_hw_device *devh = mempool->devh;
2526
for (i = 0; i < mempool->memblocks_allocated; i++) {
2527
struct vxge_hw_mempool_dma *dma_object;
2529
vxge_assert(mempool->memblocks_arr[i]);
2530
vxge_assert(mempool->memblocks_dma_arr + i);
2532
dma_object = mempool->memblocks_dma_arr + i;
2534
for (j = 0; j < mempool->items_per_memblock; j++) {
2535
u32 index = i * mempool->items_per_memblock + j;
2537
/* to skip last partially filled(if any) memblock */
2538
if (index >= mempool->items_current)
2542
vfree(mempool->memblocks_priv_arr[i]);
2544
__vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
2545
mempool->memblock_size, dma_object);
2548
vfree(mempool->items_arr);
2549
vfree(mempool->memblocks_dma_arr);
2550
vfree(mempool->memblocks_priv_arr);
2551
vfree(mempool->memblocks_arr);
2556
* __vxge_hw_mempool_grow
2557
* Will resize mempool up to %num_allocate value.
2559
static enum vxge_hw_status
2560
__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
2563
u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
2564
u32 n_items = mempool->items_per_memblock;
2565
u32 start_block_idx = mempool->memblocks_allocated;
2566
u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
2567
enum vxge_hw_status status = VXGE_HW_OK;
2571
if (end_block_idx > mempool->memblocks_max) {
2572
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2576
for (i = start_block_idx; i < end_block_idx; i++) {
2578
u32 is_last = ((end_block_idx - 1) == i);
2579
struct vxge_hw_mempool_dma *dma_object =
2580
mempool->memblocks_dma_arr + i;
2583
/* allocate memblock's private part. Each DMA memblock
2584
* has a space allocated for item's private usage upon
2585
* mempool's user request. Each time mempool grows, it will
2586
* allocate new memblock and its private part at once.
2587
* This helps to minimize memory usage a lot. */
2588
mempool->memblocks_priv_arr[i] =
2589
vzalloc(mempool->items_priv_size * n_items);
2590
if (mempool->memblocks_priv_arr[i] == NULL) {
2591
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2595
/* allocate DMA-capable memblock */
2596
mempool->memblocks_arr[i] =
2597
__vxge_hw_blockpool_malloc(mempool->devh,
2598
mempool->memblock_size, dma_object);
2599
if (mempool->memblocks_arr[i] == NULL) {
2600
vfree(mempool->memblocks_priv_arr[i]);
2601
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2606
mempool->memblocks_allocated++;
2608
memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
2610
the_memblock = mempool->memblocks_arr[i];
2612
/* fill the items hash array */
2613
for (j = 0; j < n_items; j++) {
2614
u32 index = i * n_items + j;
2616
if (first_time && index >= mempool->items_initial)
2619
mempool->items_arr[index] =
2620
((char *)the_memblock + j*mempool->item_size);
2622
/* let caller to do more job on each item */
2623
if (mempool->item_func_alloc != NULL)
2624
mempool->item_func_alloc(mempool, i,
2625
dma_object, index, is_last);
2627
mempool->items_current = index + 1;
2630
if (first_time && mempool->items_current ==
2631
mempool->items_initial)
2639
* vxge_hw_mempool_create
2640
* This function will create memory pool object. Pool may grow but will
2641
* never shrink. Pool consists of number of dynamically allocated blocks
2642
* with size enough to hold %items_initial number of items. Memory is
2643
* DMA-able but client must map/unmap before interoperating with the device.
2645
static struct vxge_hw_mempool *
2646
__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
2649
u32 items_priv_size,
2652
struct vxge_hw_mempool_cbs *mp_callback,
2655
enum vxge_hw_status status = VXGE_HW_OK;
2656
u32 memblocks_to_allocate;
2657
struct vxge_hw_mempool *mempool = NULL;
2660
if (memblock_size < item_size) {
2661
status = VXGE_HW_FAIL;
2665
mempool = vzalloc(sizeof(struct vxge_hw_mempool));
2666
if (mempool == NULL) {
2667
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2671
mempool->devh = devh;
2672
mempool->memblock_size = memblock_size;
2673
mempool->items_max = items_max;
2674
mempool->items_initial = items_initial;
2675
mempool->item_size = item_size;
2676
mempool->items_priv_size = items_priv_size;
2677
mempool->item_func_alloc = mp_callback->item_func_alloc;
2678
mempool->userdata = userdata;
2680
mempool->memblocks_allocated = 0;
2682
mempool->items_per_memblock = memblock_size / item_size;
2684
mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
2685
mempool->items_per_memblock;
2687
/* allocate array of memblocks */
2688
mempool->memblocks_arr =
2689
vzalloc(sizeof(void *) * mempool->memblocks_max);
2690
if (mempool->memblocks_arr == NULL) {
2691
__vxge_hw_mempool_destroy(mempool);
2692
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2697
/* allocate array of private parts of items per memblocks */
2698
mempool->memblocks_priv_arr =
2699
vzalloc(sizeof(void *) * mempool->memblocks_max);
2700
if (mempool->memblocks_priv_arr == NULL) {
2701
__vxge_hw_mempool_destroy(mempool);
2702
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2707
/* allocate array of memblocks DMA objects */
2708
mempool->memblocks_dma_arr =
2709
vzalloc(sizeof(struct vxge_hw_mempool_dma) *
2710
mempool->memblocks_max);
2711
if (mempool->memblocks_dma_arr == NULL) {
2712
__vxge_hw_mempool_destroy(mempool);
2713
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2718
/* allocate hash array of items */
2719
mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
2720
if (mempool->items_arr == NULL) {
2721
__vxge_hw_mempool_destroy(mempool);
2722
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2727
/* calculate initial number of memblocks */
2728
memblocks_to_allocate = (mempool->items_initial +
2729
mempool->items_per_memblock - 1) /
2730
mempool->items_per_memblock;
2732
/* pre-allocate the mempool */
2733
status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
2735
if (status != VXGE_HW_OK) {
2736
__vxge_hw_mempool_destroy(mempool);
2737
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2747
* __vxge_hw_ring_abort - Returns the RxD
2748
* This function terminates the RxDs of ring
2750
static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
2753
struct __vxge_hw_channel *channel;
2755
channel = &ring->channel;
2758
vxge_hw_channel_dtr_try_complete(channel, &rxdh);
2763
vxge_hw_channel_dtr_complete(channel);
2766
ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
2769
vxge_hw_channel_dtr_free(channel, rxdh);
2776
* __vxge_hw_ring_reset - Resets the ring
2777
* This function resets the ring during vpath reset operation
2779
static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
2781
enum vxge_hw_status status = VXGE_HW_OK;
2782
struct __vxge_hw_channel *channel;
2784
channel = &ring->channel;
2786
__vxge_hw_ring_abort(ring);
2788
status = __vxge_hw_channel_reset(channel);
2790
if (status != VXGE_HW_OK)
2793
if (ring->rxd_init) {
2794
status = vxge_hw_ring_replenish(ring);
2795
if (status != VXGE_HW_OK)
2803
* __vxge_hw_ring_delete - Removes the ring
2804
* This function freeup the memory pool and removes the ring
2806
static enum vxge_hw_status
2807
__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
2809
struct __vxge_hw_ring *ring = vp->vpath->ringh;
2811
__vxge_hw_ring_abort(ring);
2814
__vxge_hw_mempool_destroy(ring->mempool);
2816
vp->vpath->ringh = NULL;
2817
__vxge_hw_channel_free(&ring->channel);
2823
* __vxge_hw_ring_create - Create a Ring
2824
* This function creates Ring and initializes it.
2826
static enum vxge_hw_status
2827
__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2828
struct vxge_hw_ring_attr *attr)
2830
enum vxge_hw_status status = VXGE_HW_OK;
2831
struct __vxge_hw_ring *ring;
2833
struct vxge_hw_ring_config *config;
2834
struct __vxge_hw_device *hldev;
2836
struct vxge_hw_mempool_cbs ring_mp_callback;
2838
if ((vp == NULL) || (attr == NULL)) {
2839
status = VXGE_HW_FAIL;
2843
hldev = vp->vpath->hldev;
2844
vp_id = vp->vpath->vp_id;
2846
config = &hldev->config.vp_config[vp_id].ring;
2848
ring_length = config->ring_blocks *
2849
vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2851
ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
2852
VXGE_HW_CHANNEL_TYPE_RING,
2854
attr->per_rxd_space,
2857
status = VXGE_HW_ERR_OUT_OF_MEMORY;
2861
vp->vpath->ringh = ring;
2862
ring->vp_id = vp_id;
2863
ring->vp_reg = vp->vpath->vp_reg;
2864
ring->common_reg = hldev->common_reg;
2865
ring->stats = &vp->vpath->sw_stats->ring_stats;
2866
ring->config = config;
2867
ring->callback = attr->callback;
2868
ring->rxd_init = attr->rxd_init;
2869
ring->rxd_term = attr->rxd_term;
2870
ring->buffer_mode = config->buffer_mode;
2871
ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
2872
ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
2873
ring->rxds_limit = config->rxds_limit;
2875
ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
2876
ring->rxd_priv_size =
2877
sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
2878
ring->per_rxd_space = attr->per_rxd_space;
2880
ring->rxd_priv_size =
2881
((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
2882
VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2884
/* how many RxDs can fit into one block. Depends on configured
2886
ring->rxds_per_block =
2887
vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2889
/* calculate actual RxD block private size */
2890
ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
2891
ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
2892
ring->mempool = __vxge_hw_mempool_create(hldev,
2895
ring->rxdblock_priv_size,
2896
ring->config->ring_blocks,
2897
ring->config->ring_blocks,
2900
if (ring->mempool == NULL) {
2901
__vxge_hw_ring_delete(vp);
2902
return VXGE_HW_ERR_OUT_OF_MEMORY;
2905
status = __vxge_hw_channel_initialize(&ring->channel);
2906
if (status != VXGE_HW_OK) {
2907
__vxge_hw_ring_delete(vp);
2912
* Specifying rxd_init callback means two things:
2913
* 1) rxds need to be initialized by driver at channel-open time;
2914
* 2) rxds need to be posted at channel-open time
2915
* (that's what the initial_replenish() below does)
2916
* Currently we don't have a case when the 1) is done without the 2).
2918
if (ring->rxd_init) {
2919
status = vxge_hw_ring_replenish(ring);
2920
if (status != VXGE_HW_OK) {
2921
__vxge_hw_ring_delete(vp);
2926
/* initial replenish will increment the counter in its post() routine,
2927
* we have to reset it */
2928
ring->stats->common_stats.usage_cnt = 0;
2934
* vxge_hw_device_config_default_get - Initialize device config with defaults.
2935
* Initialize Titan device config with default values.
2937
enum vxge_hw_status __devinit
2938
vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2942
device_config->dma_blockpool_initial =
2943
VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
2944
device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
2945
device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
2946
device_config->rth_en = VXGE_HW_RTH_DEFAULT;
2947
device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
2948
device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
2949
device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
2951
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2952
device_config->vp_config[i].vp_id = i;
2954
device_config->vp_config[i].min_bandwidth =
2955
VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
2957
device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
2959
device_config->vp_config[i].ring.ring_blocks =
2960
VXGE_HW_DEF_RING_BLOCKS;
2962
device_config->vp_config[i].ring.buffer_mode =
2963
VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
2965
device_config->vp_config[i].ring.scatter_mode =
2966
VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
2968
device_config->vp_config[i].ring.rxds_limit =
2969
VXGE_HW_DEF_RING_RXDS_LIMIT;
2971
device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
2973
device_config->vp_config[i].fifo.fifo_blocks =
2974
VXGE_HW_MIN_FIFO_BLOCKS;
2976
device_config->vp_config[i].fifo.max_frags =
2977
VXGE_HW_MAX_FIFO_FRAGS;
2979
device_config->vp_config[i].fifo.memblock_size =
2980
VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
2982
device_config->vp_config[i].fifo.alignment_size =
2983
VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
2985
device_config->vp_config[i].fifo.intr =
2986
VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
2988
device_config->vp_config[i].fifo.no_snoop_bits =
2989
VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
2990
device_config->vp_config[i].tti.intr_enable =
2991
VXGE_HW_TIM_INTR_DEFAULT;
2993
device_config->vp_config[i].tti.btimer_val =
2994
VXGE_HW_USE_FLASH_DEFAULT;
2996
device_config->vp_config[i].tti.timer_ac_en =
2997
VXGE_HW_USE_FLASH_DEFAULT;
2999
device_config->vp_config[i].tti.timer_ci_en =
3000
VXGE_HW_USE_FLASH_DEFAULT;
3002
device_config->vp_config[i].tti.timer_ri_en =
3003
VXGE_HW_USE_FLASH_DEFAULT;
3005
device_config->vp_config[i].tti.rtimer_val =
3006
VXGE_HW_USE_FLASH_DEFAULT;
3008
device_config->vp_config[i].tti.util_sel =
3009
VXGE_HW_USE_FLASH_DEFAULT;
3011
device_config->vp_config[i].tti.ltimer_val =
3012
VXGE_HW_USE_FLASH_DEFAULT;
3014
device_config->vp_config[i].tti.urange_a =
3015
VXGE_HW_USE_FLASH_DEFAULT;
3017
device_config->vp_config[i].tti.uec_a =
3018
VXGE_HW_USE_FLASH_DEFAULT;
3020
device_config->vp_config[i].tti.urange_b =
3021
VXGE_HW_USE_FLASH_DEFAULT;
3023
device_config->vp_config[i].tti.uec_b =
3024
VXGE_HW_USE_FLASH_DEFAULT;
3026
device_config->vp_config[i].tti.urange_c =
3027
VXGE_HW_USE_FLASH_DEFAULT;
3029
device_config->vp_config[i].tti.uec_c =
3030
VXGE_HW_USE_FLASH_DEFAULT;
3032
device_config->vp_config[i].tti.uec_d =
3033
VXGE_HW_USE_FLASH_DEFAULT;
3035
device_config->vp_config[i].rti.intr_enable =
3036
VXGE_HW_TIM_INTR_DEFAULT;
3038
device_config->vp_config[i].rti.btimer_val =
3039
VXGE_HW_USE_FLASH_DEFAULT;
3041
device_config->vp_config[i].rti.timer_ac_en =
3042
VXGE_HW_USE_FLASH_DEFAULT;
3044
device_config->vp_config[i].rti.timer_ci_en =
3045
VXGE_HW_USE_FLASH_DEFAULT;
3047
device_config->vp_config[i].rti.timer_ri_en =
3048
VXGE_HW_USE_FLASH_DEFAULT;
3050
device_config->vp_config[i].rti.rtimer_val =
3051
VXGE_HW_USE_FLASH_DEFAULT;
3053
device_config->vp_config[i].rti.util_sel =
3054
VXGE_HW_USE_FLASH_DEFAULT;
3056
device_config->vp_config[i].rti.ltimer_val =
3057
VXGE_HW_USE_FLASH_DEFAULT;
3059
device_config->vp_config[i].rti.urange_a =
3060
VXGE_HW_USE_FLASH_DEFAULT;
3062
device_config->vp_config[i].rti.uec_a =
3063
VXGE_HW_USE_FLASH_DEFAULT;
3065
device_config->vp_config[i].rti.urange_b =
3066
VXGE_HW_USE_FLASH_DEFAULT;
3068
device_config->vp_config[i].rti.uec_b =
3069
VXGE_HW_USE_FLASH_DEFAULT;
3071
device_config->vp_config[i].rti.urange_c =
3072
VXGE_HW_USE_FLASH_DEFAULT;
3074
device_config->vp_config[i].rti.uec_c =
3075
VXGE_HW_USE_FLASH_DEFAULT;
3077
device_config->vp_config[i].rti.uec_d =
3078
VXGE_HW_USE_FLASH_DEFAULT;
3080
device_config->vp_config[i].mtu =
3081
VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
3083
device_config->vp_config[i].rpa_strip_vlan_tag =
3084
VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
3091
* __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
3092
* Set the swapper bits appropriately for the vpath.
3094
static enum vxge_hw_status
3095
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
3097
#ifndef __BIG_ENDIAN
3100
val64 = readq(&vpath_reg->vpath_general_cfg1);
3102
val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
3103
writeq(val64, &vpath_reg->vpath_general_cfg1);
3110
* __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
3111
* Set the swapper bits appropriately for the vpath.
3113
static enum vxge_hw_status
3114
__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
3115
struct vxge_hw_vpath_reg __iomem *vpath_reg)
3119
val64 = readq(&legacy_reg->pifm_wr_swap_en);
3121
if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
3122
val64 = readq(&vpath_reg->kdfcctl_cfg0);
3125
val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
3126
VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
3127
VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
3129
writeq(val64, &vpath_reg->kdfcctl_cfg0);
3137
* vxge_hw_mgmt_reg_read - Read Titan register.
3140
vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
3141
enum vxge_hw_mgmt_reg_type type,
3142
u32 index, u32 offset, u64 *value)
3144
enum vxge_hw_status status = VXGE_HW_OK;
3146
if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3147
status = VXGE_HW_ERR_INVALID_DEVICE;
3152
case vxge_hw_mgmt_reg_type_legacy:
3153
if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
3154
status = VXGE_HW_ERR_INVALID_OFFSET;
3157
*value = readq((void __iomem *)hldev->legacy_reg + offset);
3159
case vxge_hw_mgmt_reg_type_toc:
3160
if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
3161
status = VXGE_HW_ERR_INVALID_OFFSET;
3164
*value = readq((void __iomem *)hldev->toc_reg + offset);
3166
case vxge_hw_mgmt_reg_type_common:
3167
if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
3168
status = VXGE_HW_ERR_INVALID_OFFSET;
3171
*value = readq((void __iomem *)hldev->common_reg + offset);
3173
case vxge_hw_mgmt_reg_type_mrpcim:
3174
if (!(hldev->access_rights &
3175
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
3176
status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3179
if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
3180
status = VXGE_HW_ERR_INVALID_OFFSET;
3183
*value = readq((void __iomem *)hldev->mrpcim_reg + offset);
3185
case vxge_hw_mgmt_reg_type_srpcim:
3186
if (!(hldev->access_rights &
3187
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
3188
status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3191
if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3192
status = VXGE_HW_ERR_INVALID_INDEX;
3195
if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
3196
status = VXGE_HW_ERR_INVALID_OFFSET;
3199
*value = readq((void __iomem *)hldev->srpcim_reg[index] +
3202
case vxge_hw_mgmt_reg_type_vpmgmt:
3203
if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3204
(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3205
status = VXGE_HW_ERR_INVALID_INDEX;
3208
if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
3209
status = VXGE_HW_ERR_INVALID_OFFSET;
3212
*value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
3215
case vxge_hw_mgmt_reg_type_vpath:
3216
if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
3217
(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3218
status = VXGE_HW_ERR_INVALID_INDEX;
3221
if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
3222
status = VXGE_HW_ERR_INVALID_INDEX;
3225
if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
3226
status = VXGE_HW_ERR_INVALID_OFFSET;
3229
*value = readq((void __iomem *)hldev->vpath_reg[index] +
3233
status = VXGE_HW_ERR_INVALID_TYPE;
3242
* vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
3245
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3247
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
3248
enum vxge_hw_status status = VXGE_HW_OK;
3251
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3252
if (!((vpath_mask) & vxge_mBIT(i)))
3254
vpmgmt_reg = hldev->vpmgmt_reg[i];
3255
for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
3256
if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
3257
& VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
3258
return VXGE_HW_FAIL;
3264
* vxge_hw_mgmt_reg_Write - Write Titan register.
3267
vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
3268
enum vxge_hw_mgmt_reg_type type,
3269
u32 index, u32 offset, u64 value)
3271
enum vxge_hw_status status = VXGE_HW_OK;
3273
if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3274
status = VXGE_HW_ERR_INVALID_DEVICE;
3279
case vxge_hw_mgmt_reg_type_legacy:
3280
if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
3281
status = VXGE_HW_ERR_INVALID_OFFSET;
3284
writeq(value, (void __iomem *)hldev->legacy_reg + offset);
3286
case vxge_hw_mgmt_reg_type_toc:
3287
if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
3288
status = VXGE_HW_ERR_INVALID_OFFSET;
3291
writeq(value, (void __iomem *)hldev->toc_reg + offset);
3293
case vxge_hw_mgmt_reg_type_common:
3294
if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
3295
status = VXGE_HW_ERR_INVALID_OFFSET;
3298
writeq(value, (void __iomem *)hldev->common_reg + offset);
3300
case vxge_hw_mgmt_reg_type_mrpcim:
3301
if (!(hldev->access_rights &
3302
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
3303
status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3306
if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
3307
status = VXGE_HW_ERR_INVALID_OFFSET;
3310
writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
3312
case vxge_hw_mgmt_reg_type_srpcim:
3313
if (!(hldev->access_rights &
3314
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
3315
status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3318
if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3319
status = VXGE_HW_ERR_INVALID_INDEX;
3322
if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
3323
status = VXGE_HW_ERR_INVALID_OFFSET;
3326
writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
3330
case vxge_hw_mgmt_reg_type_vpmgmt:
3331
if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3332
(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3333
status = VXGE_HW_ERR_INVALID_INDEX;
3336
if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
3337
status = VXGE_HW_ERR_INVALID_OFFSET;
3340
writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
3343
case vxge_hw_mgmt_reg_type_vpath:
3344
if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
3345
(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3346
status = VXGE_HW_ERR_INVALID_INDEX;
3349
if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
3350
status = VXGE_HW_ERR_INVALID_OFFSET;
3353
writeq(value, (void __iomem *)hldev->vpath_reg[index] +
3357
status = VXGE_HW_ERR_INVALID_TYPE;
3365
* __vxge_hw_fifo_abort - Returns the TxD
3366
* This function terminates the TxDs of fifo
3368
static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
3373
vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
3378
vxge_hw_channel_dtr_complete(&fifo->channel);
3380
if (fifo->txdl_term) {
3381
fifo->txdl_term(txdlh,
3382
VXGE_HW_TXDL_STATE_POSTED,
3383
fifo->channel.userdata);
3386
vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
3393
* __vxge_hw_fifo_reset - Resets the fifo
3394
* This function resets the fifo during vpath reset operation
3396
static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
3398
enum vxge_hw_status status = VXGE_HW_OK;
3400
__vxge_hw_fifo_abort(fifo);
3401
status = __vxge_hw_channel_reset(&fifo->channel);
3407
* __vxge_hw_fifo_delete - Removes the FIFO
3408
* This function freeup the memory pool and removes the FIFO
3410
static enum vxge_hw_status
3411
__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
3413
struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
3415
__vxge_hw_fifo_abort(fifo);
3418
__vxge_hw_mempool_destroy(fifo->mempool);
3420
vp->vpath->fifoh = NULL;
3422
__vxge_hw_channel_free(&fifo->channel);
3428
* __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
3430
* This function is callback passed to __vxge_hw_mempool_create to create memory
3434
__vxge_hw_fifo_mempool_item_alloc(
3435
struct vxge_hw_mempool *mempoolh,
3436
u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
3437
u32 index, u32 is_last)
3439
u32 memblock_item_idx;
3440
struct __vxge_hw_fifo_txdl_priv *txdl_priv;
3441
struct vxge_hw_fifo_txd *txdp =
3442
(struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
3443
struct __vxge_hw_fifo *fifo =
3444
(struct __vxge_hw_fifo *)mempoolh->userdata;
3445
void *memblock = mempoolh->memblocks_arr[memblock_index];
3449
txdp->host_control = (u64) (size_t)
3450
__vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
3451
&memblock_item_idx);
3453
txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
3455
vxge_assert(txdl_priv);
3457
fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
3459
/* pre-format HW's TxDL's private */
3460
txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
3461
txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
3462
txdl_priv->dma_handle = dma_object->handle;
3463
txdl_priv->memblock = memblock;
3464
txdl_priv->first_txdp = txdp;
3465
txdl_priv->next_txdl_priv = NULL;
3466
txdl_priv->alloc_frags = 0;
3470
* __vxge_hw_fifo_create - Create a FIFO
3471
* This function creates FIFO and initializes it.
3473
static enum vxge_hw_status
3474
__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
3475
struct vxge_hw_fifo_attr *attr)
3477
enum vxge_hw_status status = VXGE_HW_OK;
3478
struct __vxge_hw_fifo *fifo;
3479
struct vxge_hw_fifo_config *config;
3480
u32 txdl_size, txdl_per_memblock;
3481
struct vxge_hw_mempool_cbs fifo_mp_callback;
3482
struct __vxge_hw_virtualpath *vpath;
3484
if ((vp == NULL) || (attr == NULL)) {
3485
status = VXGE_HW_ERR_INVALID_HANDLE;
3489
config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
3491
txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
3493
txdl_per_memblock = config->memblock_size / txdl_size;
3495
fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
3496
VXGE_HW_CHANNEL_TYPE_FIFO,
3497
config->fifo_blocks * txdl_per_memblock,
3498
attr->per_txdl_space, attr->userdata);
3501
status = VXGE_HW_ERR_OUT_OF_MEMORY;
3505
vpath->fifoh = fifo;
3506
fifo->nofl_db = vpath->nofl_db;
3508
fifo->vp_id = vpath->vp_id;
3509
fifo->vp_reg = vpath->vp_reg;
3510
fifo->stats = &vpath->sw_stats->fifo_stats;
3512
fifo->config = config;
3514
/* apply "interrupts per txdl" attribute */
3515
fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
3516
fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
3517
fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
3519
if (fifo->config->intr)
3520
fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
3522
fifo->no_snoop_bits = config->no_snoop_bits;
3525
* FIFO memory management strategy:
3527
* TxDL split into three independent parts:
3529
* - TxD HW private part
3530
* - driver private part
3532
* Adaptative memory allocation used. i.e. Memory allocated on
3533
* demand with the size which will fit into one memory block.
3534
* One memory block may contain more than one TxDL.
3536
* During "reserve" operations more memory can be allocated on demand
3537
* for example due to FIFO full condition.
3539
* Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
3540
* routine which will essentially stop the channel and free resources.
3543
/* TxDL common private size == TxDL private + driver private */
3545
sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
3546
fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
3547
VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
3549
fifo->per_txdl_space = attr->per_txdl_space;
3551
/* recompute txdl size to be cacheline aligned */
3552
fifo->txdl_size = txdl_size;
3553
fifo->txdl_per_memblock = txdl_per_memblock;
3555
fifo->txdl_term = attr->txdl_term;
3556
fifo->callback = attr->callback;
3558
if (fifo->txdl_per_memblock == 0) {
3559
__vxge_hw_fifo_delete(vp);
3560
status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
3564
fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
3567
__vxge_hw_mempool_create(vpath->hldev,
3568
fifo->config->memblock_size,
3571
(fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3572
(fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3576
if (fifo->mempool == NULL) {
3577
__vxge_hw_fifo_delete(vp);
3578
status = VXGE_HW_ERR_OUT_OF_MEMORY;
3582
status = __vxge_hw_channel_initialize(&fifo->channel);
3583
if (status != VXGE_HW_OK) {
3584
__vxge_hw_fifo_delete(vp);
3588
vxge_assert(fifo->channel.reserve_ptr);
3594
* __vxge_hw_vpath_pci_read - Read the content of given address
3595
* in pci config space.
3596
* Read from the vpath pci config space.
3598
static enum vxge_hw_status
3599
__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
3600
u32 phy_func_0, u32 offset, u32 *val)
3603
enum vxge_hw_status status = VXGE_HW_OK;
3604
struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
3606
val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
3609
val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
3611
writeq(val64, &vp_reg->pci_config_access_cfg1);
3613
writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
3614
&vp_reg->pci_config_access_cfg2);
3617
status = __vxge_hw_device_register_poll(
3618
&vp_reg->pci_config_access_cfg2,
3619
VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3621
if (status != VXGE_HW_OK)
3624
val64 = readq(&vp_reg->pci_config_access_status);
3626
if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
3627
status = VXGE_HW_FAIL;
3630
*val = (u32)vxge_bVALn(val64, 32, 32);
3636
* vxge_hw_device_flick_link_led - Flick (blink) link LED.
3637
* @hldev: HW device.
3638
* @on_off: TRUE if flickering to be on, FALSE to be off
3640
* Flicker the link LED.
3643
vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
3645
struct __vxge_hw_virtualpath *vpath;
3646
u64 data0, data1 = 0, steer_ctrl = 0;
3647
enum vxge_hw_status status;
3649
if (hldev == NULL) {
3650
status = VXGE_HW_ERR_INVALID_DEVICE;
3654
vpath = &hldev->virtual_paths[hldev->first_vp_id];
3657
status = vxge_hw_vpath_fw_api(vpath,
3658
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
3659
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
3660
0, &data0, &data1, &steer_ctrl);
3666
* __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3669
__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3670
u32 action, u32 rts_table, u32 offset,
3671
u64 *data0, u64 *data1)
3673
enum vxge_hw_status status;
3677
status = VXGE_HW_ERR_INVALID_HANDLE;
3682
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3684
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3686
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3688
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3689
steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3692
status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3693
data0, data1, &steer_ctrl);
3694
if (status != VXGE_HW_OK)
3697
if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
3699
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3706
* __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3709
__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
3710
u32 rts_table, u32 offset, u64 steer_data0,
3713
u64 data0, data1 = 0, steer_ctrl = 0;
3714
enum vxge_hw_status status;
3717
status = VXGE_HW_ERR_INVALID_HANDLE;
3721
data0 = steer_data0;
3723
if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3725
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3726
data1 = steer_data1;
3728
status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3729
&data0, &data1, &steer_ctrl);
3735
* vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3737
enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3738
struct __vxge_hw_vpath_handle *vp,
3739
enum vxge_hw_rth_algoritms algorithm,
3740
struct vxge_hw_rth_hash_types *hash_type,
3744
enum vxge_hw_status status = VXGE_HW_OK;
3747
status = VXGE_HW_ERR_INVALID_HANDLE;
3751
status = __vxge_hw_vpath_rts_table_get(vp,
3752
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3753
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3755
if (status != VXGE_HW_OK)
3758
data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3759
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3761
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3762
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3763
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3765
if (hash_type->hash_type_tcpipv4_en)
3766
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3768
if (hash_type->hash_type_ipv4_en)
3769
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3771
if (hash_type->hash_type_tcpipv6_en)
3772
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3774
if (hash_type->hash_type_ipv6_en)
3775
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3777
if (hash_type->hash_type_tcpipv6ex_en)
3779
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3781
if (hash_type->hash_type_ipv6ex_en)
3782
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3784
if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3785
data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3787
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3789
status = __vxge_hw_vpath_rts_table_set(vp,
3790
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3791
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3798
vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3799
u16 flag, u8 *itable)
3803
*data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3804
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3805
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3809
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3810
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3811
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3814
*data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3815
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3816
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3820
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3821
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3822
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3829
* vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3831
enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3832
struct __vxge_hw_vpath_handle **vpath_handles,
3838
u32 i, j, action, rts_table;
3842
enum vxge_hw_status status = VXGE_HW_OK;
3843
struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3846
status = VXGE_HW_ERR_INVALID_HANDLE;
3850
max_entries = (((u32)1) << itable_size);
3852
if (vp->vpath->hldev->config.rth_it_type
3853
== VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3854
action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3856
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3858
for (j = 0; j < max_entries; j++) {
3863
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3866
status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3867
action, rts_table, j, data0, data1);
3869
if (status != VXGE_HW_OK)
3873
for (j = 0; j < max_entries; j++) {
3878
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3879
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3882
status = __vxge_hw_vpath_rts_table_set(
3883
vpath_handles[mtable[itable[j]]], action,
3884
rts_table, j, data0, data1);
3886
if (status != VXGE_HW_OK)
3890
action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3892
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3893
for (i = 0; i < vpath_count; i++) {
3895
for (j = 0; j < max_entries;) {
3900
while (j < max_entries) {
3901
if (mtable[itable[j]] != i) {
3905
vxge_hw_rts_rth_data0_data1_get(j,
3906
&data0, &data1, 1, itable);
3911
while (j < max_entries) {
3912
if (mtable[itable[j]] != i) {
3916
vxge_hw_rts_rth_data0_data1_get(j,
3917
&data0, &data1, 2, itable);
3922
while (j < max_entries) {
3923
if (mtable[itable[j]] != i) {
3927
vxge_hw_rts_rth_data0_data1_get(j,
3928
&data0, &data1, 3, itable);
3933
while (j < max_entries) {
3934
if (mtable[itable[j]] != i) {
3938
vxge_hw_rts_rth_data0_data1_get(j,
3939
&data0, &data1, 4, itable);
3945
status = __vxge_hw_vpath_rts_table_set(
3950
if (status != VXGE_HW_OK)
3961
* vxge_hw_vpath_check_leak - Check for memory leak
3962
* @ringh: Handle to the ring object used for receive
3964
* If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3965
* PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3966
* Returns: VXGE_HW_FAIL, if leak has occurred.
3970
vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3972
enum vxge_hw_status status = VXGE_HW_OK;
3973
u64 rxd_new_count, rxd_spat;
3978
rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3979
rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3980
rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3982
if (rxd_new_count >= rxd_spat)
3983
status = VXGE_HW_FAIL;
3989
* __vxge_hw_vpath_mgmt_read
3990
* This routine reads the vpath_mgmt registers
3992
static enum vxge_hw_status
3993
__vxge_hw_vpath_mgmt_read(
3994
struct __vxge_hw_device *hldev,
3995
struct __vxge_hw_virtualpath *vpath)
3997
u32 i, mtu = 0, max_pyld = 0;
3999
enum vxge_hw_status status = VXGE_HW_OK;
4001
for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
4003
val64 = readq(&vpath->vpmgmt_reg->
4004
rxmac_cfg0_port_vpmgmt_clone[i]);
4007
VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
4013
vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
4015
val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
4017
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4018
if (val64 & vxge_mBIT(i))
4019
vpath->vsport_number = i;
4022
val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
4024
if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
4025
VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
4027
VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
4033
* __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
4034
* This routine checks the vpath_rst_in_prog register to see if
4035
* adapter completed the reset process for the vpath
4037
static enum vxge_hw_status
4038
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
4040
enum vxge_hw_status status;
4042
status = __vxge_hw_device_register_poll(
4043
&vpath->hldev->common_reg->vpath_rst_in_prog,
4044
VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
4045
1 << (16 - vpath->vp_id)),
4046
vpath->hldev->config.device_poll_millis);
4052
* __vxge_hw_vpath_reset
4053
* This routine resets the vpath on the device
4055
static enum vxge_hw_status
4056
__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4059
enum vxge_hw_status status = VXGE_HW_OK;
4061
val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
4063
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4064
&hldev->common_reg->cmn_rsthdlr_cfg0);
4070
* __vxge_hw_vpath_sw_reset
4071
* This routine resets the vpath structures
4073
static enum vxge_hw_status
4074
__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4076
enum vxge_hw_status status = VXGE_HW_OK;
4077
struct __vxge_hw_virtualpath *vpath;
4079
vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
4082
status = __vxge_hw_ring_reset(vpath->ringh);
4083
if (status != VXGE_HW_OK)
4088
status = __vxge_hw_fifo_reset(vpath->fifoh);
4094
* __vxge_hw_vpath_prc_configure
4095
* This routine configures the prc registers of virtual path using the config
4099
__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4102
struct __vxge_hw_virtualpath *vpath;
4103
struct vxge_hw_vp_config *vp_config;
4104
struct vxge_hw_vpath_reg __iomem *vp_reg;
4106
vpath = &hldev->virtual_paths[vp_id];
4107
vp_reg = vpath->vp_reg;
4108
vp_config = vpath->vp_config;
4110
if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
4113
val64 = readq(&vp_reg->prc_cfg1);
4114
val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
4115
writeq(val64, &vp_reg->prc_cfg1);
4117
val64 = readq(&vpath->vp_reg->prc_cfg6);
4118
val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
4119
writeq(val64, &vpath->vp_reg->prc_cfg6);
4121
val64 = readq(&vp_reg->prc_cfg7);
4123
if (vpath->vp_config->ring.scatter_mode !=
4124
VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
4126
val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
4128
switch (vpath->vp_config->ring.scatter_mode) {
4129
case VXGE_HW_RING_SCATTER_MODE_A:
4130
val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4131
VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
4133
case VXGE_HW_RING_SCATTER_MODE_B:
4134
val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4135
VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
4137
case VXGE_HW_RING_SCATTER_MODE_C:
4138
val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4139
VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
4144
writeq(val64, &vp_reg->prc_cfg7);
4146
writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
4147
__vxge_hw_ring_first_block_address_get(
4148
vpath->ringh) >> 3), &vp_reg->prc_cfg5);
4150
val64 = readq(&vp_reg->prc_cfg4);
4151
val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
4152
val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
4154
val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
4155
VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
4157
if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
4158
val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
4160
val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
4162
writeq(val64, &vp_reg->prc_cfg4);
4166
* __vxge_hw_vpath_kdfc_configure
4167
* This routine configures the kdfc registers of virtual path using the
4170
static enum vxge_hw_status
4171
__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4175
enum vxge_hw_status status = VXGE_HW_OK;
4176
struct __vxge_hw_virtualpath *vpath;
4177
struct vxge_hw_vpath_reg __iomem *vp_reg;
4179
vpath = &hldev->virtual_paths[vp_id];
4180
vp_reg = vpath->vp_reg;
4181
status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
4183
if (status != VXGE_HW_OK)
4186
val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
4188
vpath->max_kdfc_db =
4189
(u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
4192
if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4194
vpath->max_nofl_db = vpath->max_kdfc_db;
4196
if (vpath->max_nofl_db <
4197
((vpath->vp_config->fifo.memblock_size /
4198
(vpath->vp_config->fifo.max_frags *
4199
sizeof(struct vxge_hw_fifo_txd))) *
4200
vpath->vp_config->fifo.fifo_blocks)) {
4202
return VXGE_HW_BADCFG_FIFO_BLOCKS;
4204
val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
4205
(vpath->max_nofl_db*2)-1);
4208
writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
4210
writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
4211
&vp_reg->kdfc_fifo_trpl_ctrl);
4213
val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
4215
val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
4216
VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
4218
val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
4219
VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
4220
#ifndef __BIG_ENDIAN
4221
VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
4223
VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
4225
writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
4226
writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
4228
vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
4231
(struct __vxge_hw_non_offload_db_wrapper __iomem *)
4232
(hldev->kdfc + (vp_id *
4233
VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
4240
* __vxge_hw_vpath_mac_configure
4241
* This routine configures the mac of virtual path using the config passed
4243
static enum vxge_hw_status
4244
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4247
enum vxge_hw_status status = VXGE_HW_OK;
4248
struct __vxge_hw_virtualpath *vpath;
4249
struct vxge_hw_vp_config *vp_config;
4250
struct vxge_hw_vpath_reg __iomem *vp_reg;
4252
vpath = &hldev->virtual_paths[vp_id];
4253
vp_reg = vpath->vp_reg;
4254
vp_config = vpath->vp_config;
4256
writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
4257
vpath->vsport_number), &vp_reg->xmac_vsport_choice);
4259
if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4261
val64 = readq(&vp_reg->xmac_rpa_vcfg);
4263
if (vp_config->rpa_strip_vlan_tag !=
4264
VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
4265
if (vp_config->rpa_strip_vlan_tag)
4266
val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
4268
val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
4271
writeq(val64, &vp_reg->xmac_rpa_vcfg);
4272
val64 = readq(&vp_reg->rxmac_vcfg0);
4274
if (vp_config->mtu !=
4275
VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
4276
val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4277
if ((vp_config->mtu +
4278
VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
4279
val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
4281
VXGE_HW_MAC_HEADER_MAX_SIZE);
4283
val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
4287
writeq(val64, &vp_reg->rxmac_vcfg0);
4289
val64 = readq(&vp_reg->rxmac_vcfg1);
4291
val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
4292
VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
4294
if (hldev->config.rth_it_type ==
4295
VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
4296
val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
4298
VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
4301
writeq(val64, &vp_reg->rxmac_vcfg1);
4307
* __vxge_hw_vpath_tim_configure
4308
* This routine configures the tim registers of virtual path using the config
4311
static enum vxge_hw_status
4312
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4315
enum vxge_hw_status status = VXGE_HW_OK;
4316
struct __vxge_hw_virtualpath *vpath;
4317
struct vxge_hw_vpath_reg __iomem *vp_reg;
4318
struct vxge_hw_vp_config *config;
4320
vpath = &hldev->virtual_paths[vp_id];
4321
vp_reg = vpath->vp_reg;
4322
config = vpath->vp_config;
4324
writeq(0, &vp_reg->tim_dest_addr);
4325
writeq(0, &vp_reg->tim_vpath_map);
4326
writeq(0, &vp_reg->tim_bitmap);
4327
writeq(0, &vp_reg->tim_remap);
4329
if (config->ring.enable == VXGE_HW_RING_ENABLE)
4330
writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
4331
(vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4332
VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
4334
val64 = readq(&vp_reg->tim_pci_cfg);
4335
val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
4336
writeq(val64, &vp_reg->tim_pci_cfg);
4338
if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4340
val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4342
if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4343
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4345
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4346
config->tti.btimer_val);
4349
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4351
if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4352
if (config->tti.timer_ac_en)
4353
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4355
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4358
if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4359
if (config->tti.timer_ci_en)
4360
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4362
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4365
if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4366
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4367
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4368
config->tti.urange_a);
4371
if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4372
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4373
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4374
config->tti.urange_b);
4377
if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4378
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4379
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4380
config->tti.urange_c);
4383
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4384
vpath->tim_tti_cfg1_saved = val64;
4386
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4388
if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4389
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4390
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4394
if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4395
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4396
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4400
if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4401
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4402
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4406
if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4407
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4408
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4412
writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4413
val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4415
if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4416
if (config->tti.timer_ri_en)
4417
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4419
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4422
if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4423
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4425
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4426
config->tti.rtimer_val);
4429
if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4430
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4431
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
4434
if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4435
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4437
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4438
config->tti.ltimer_val);
4441
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4442
vpath->tim_tti_cfg3_saved = val64;
4445
if (config->ring.enable == VXGE_HW_RING_ENABLE) {
4447
val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4449
if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4450
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4452
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4453
config->rti.btimer_val);
4456
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4458
if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4459
if (config->rti.timer_ac_en)
4460
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4462
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4465
if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4466
if (config->rti.timer_ci_en)
4467
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4469
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4472
if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4473
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4474
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4475
config->rti.urange_a);
4478
if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4479
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4480
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4481
config->rti.urange_b);
4484
if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4485
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4486
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4487
config->rti.urange_c);
4490
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4491
vpath->tim_rti_cfg1_saved = val64;
4493
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4495
if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4496
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4497
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4501
if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4502
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4503
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4507
if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4508
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4509
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4513
if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4514
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4515
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4519
writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4520
val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4522
if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4523
if (config->rti.timer_ri_en)
4524
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4526
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4529
if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4530
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4532
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4533
config->rti.rtimer_val);
4536
if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4537
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4538
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
4541
if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4542
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4544
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4545
config->rti.ltimer_val);
4548
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4549
vpath->tim_rti_cfg3_saved = val64;
4553
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4554
writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4555
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4556
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4557
writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4558
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4560
val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
4561
val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
4562
val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
4563
writeq(val64, &vp_reg->tim_wrkld_clc);
4569
* __vxge_hw_vpath_initialize
4570
* This routine is the final phase of init which initializes the
4571
* registers of the vpath using the configuration passed.
4573
static enum vxge_hw_status
4574
__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4578
enum vxge_hw_status status = VXGE_HW_OK;
4579
struct __vxge_hw_virtualpath *vpath;
4580
struct vxge_hw_vpath_reg __iomem *vp_reg;
4582
vpath = &hldev->virtual_paths[vp_id];
4584
if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4585
status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4588
vp_reg = vpath->vp_reg;
4590
status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4591
if (status != VXGE_HW_OK)
4594
status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
4595
if (status != VXGE_HW_OK)
4598
status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4599
if (status != VXGE_HW_OK)
4602
status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4603
if (status != VXGE_HW_OK)
4606
val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4608
/* Get MRRS value from device control */
4609
status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4610
if (status == VXGE_HW_OK) {
4611
val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4613
~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4615
VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4617
val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4620
val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4622
VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4623
VXGE_HW_MAX_PAYLOAD_SIZE_512);
4625
val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4626
writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4633
* __vxge_hw_vp_terminate - Terminate Virtual Path structure
4634
* This routine closes all channels it opened and freeup memory
4636
static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4638
struct __vxge_hw_virtualpath *vpath;
4640
vpath = &hldev->virtual_paths[vp_id];
4642
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4645
VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4646
vpath->hldev->tim_int_mask1, vpath->vp_id);
4647
hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4649
/* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
4650
* work after the interface is brought down.
4652
spin_lock(&vpath->lock);
4653
vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4654
spin_unlock(&vpath->lock);
4656
vpath->vpmgmt_reg = NULL;
4657
vpath->nofl_db = NULL;
4659
vpath->vsport_number = 0;
4660
vpath->max_kdfc_db = 0;
4661
vpath->max_nofl_db = 0;
4662
vpath->ringh = NULL;
4663
vpath->fifoh = NULL;
4664
memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
4665
vpath->stats_block = 0;
4666
vpath->hw_stats = NULL;
4667
vpath->hw_stats_sav = NULL;
4668
vpath->sw_stats = NULL;
4675
* __vxge_hw_vp_initialize - Initialize Virtual Path structure
4676
* This routine is the initial phase of init which resets the vpath and
4677
* initializes the software support structures.
4679
static enum vxge_hw_status
4680
__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4681
struct vxge_hw_vp_config *config)
4683
struct __vxge_hw_virtualpath *vpath;
4684
enum vxge_hw_status status = VXGE_HW_OK;
4686
if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4687
status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4691
vpath = &hldev->virtual_paths[vp_id];
4693
spin_lock_init(&vpath->lock);
4694
vpath->vp_id = vp_id;
4695
vpath->vp_open = VXGE_HW_VP_OPEN;
4696
vpath->hldev = hldev;
4697
vpath->vp_config = config;
4698
vpath->vp_reg = hldev->vpath_reg[vp_id];
4699
vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4701
__vxge_hw_vpath_reset(hldev, vp_id);
4703
status = __vxge_hw_vpath_reset_check(vpath);
4704
if (status != VXGE_HW_OK) {
4705
memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4709
status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4710
if (status != VXGE_HW_OK) {
4711
memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4715
INIT_LIST_HEAD(&vpath->vpath_handles);
4717
vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4719
VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4720
hldev->tim_int_mask1, vp_id);
4722
status = __vxge_hw_vpath_initialize(hldev, vp_id);
4723
if (status != VXGE_HW_OK)
4724
__vxge_hw_vp_terminate(hldev, vp_id);
4730
* vxge_hw_vpath_mtu_set - Set MTU.
4731
* Set new MTU value. Example, to use jumbo frames:
4732
* vxge_hw_vpath_mtu_set(my_device, 9600);
4735
vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4738
enum vxge_hw_status status = VXGE_HW_OK;
4739
struct __vxge_hw_virtualpath *vpath;
4742
status = VXGE_HW_ERR_INVALID_HANDLE;
4747
new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4749
if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4750
status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4752
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4754
val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4755
val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4757
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4759
vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4766
* vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4767
* Enable the DMA vpath statistics. The function is to be called to re-enable
4768
* the adapter to update stats into the host memory
4770
static enum vxge_hw_status
4771
vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4773
enum vxge_hw_status status = VXGE_HW_OK;
4774
struct __vxge_hw_virtualpath *vpath;
4778
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4779
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4783
memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4784
sizeof(struct vxge_hw_vpath_stats_hw_info));
4786
status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4792
* __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
4793
* This function allocates a block from block pool or from the system
4795
static struct __vxge_hw_blockpool_entry *
4796
__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
4798
struct __vxge_hw_blockpool_entry *entry = NULL;
4799
struct __vxge_hw_blockpool *blockpool;
4801
blockpool = &devh->block_pool;
4803
if (size == blockpool->block_size) {
4805
if (!list_empty(&blockpool->free_block_list))
4806
entry = (struct __vxge_hw_blockpool_entry *)
4807
list_first_entry(&blockpool->free_block_list,
4808
struct __vxge_hw_blockpool_entry,
4811
if (entry != NULL) {
4812
list_del(&entry->item);
4813
blockpool->pool_size--;
4818
__vxge_hw_blockpool_blocks_add(blockpool);
4824
* vxge_hw_vpath_open - Open a virtual path on a given adapter
4825
* This function is used to open access to virtual path of an
4826
* adapter for offload, GRO operations. This function returns
4830
vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4831
struct vxge_hw_vpath_attr *attr,
4832
struct __vxge_hw_vpath_handle **vpath_handle)
4834
struct __vxge_hw_virtualpath *vpath;
4835
struct __vxge_hw_vpath_handle *vp;
4836
enum vxge_hw_status status;
4838
vpath = &hldev->virtual_paths[attr->vp_id];
4840
if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4841
status = VXGE_HW_ERR_INVALID_STATE;
4842
goto vpath_open_exit1;
4845
status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4846
&hldev->config.vp_config[attr->vp_id]);
4847
if (status != VXGE_HW_OK)
4848
goto vpath_open_exit1;
4850
vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
4852
status = VXGE_HW_ERR_OUT_OF_MEMORY;
4853
goto vpath_open_exit2;
4858
if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4859
status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4860
if (status != VXGE_HW_OK)
4861
goto vpath_open_exit6;
4864
if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4865
status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4866
if (status != VXGE_HW_OK)
4867
goto vpath_open_exit7;
4869
__vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4872
vpath->fifoh->tx_intr_num =
4873
(attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4874
VXGE_HW_VPATH_INTR_TX;
4876
vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4877
VXGE_HW_BLOCK_SIZE);
4878
if (vpath->stats_block == NULL) {
4879
status = VXGE_HW_ERR_OUT_OF_MEMORY;
4880
goto vpath_open_exit8;
4883
vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4884
stats_block->memblock;
4885
memset(vpath->hw_stats, 0,
4886
sizeof(struct vxge_hw_vpath_stats_hw_info));
4888
hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4891
vpath->hw_stats_sav =
4892
&hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4893
memset(vpath->hw_stats_sav, 0,
4894
sizeof(struct vxge_hw_vpath_stats_hw_info));
4896
writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4898
status = vxge_hw_vpath_stats_enable(vp);
4899
if (status != VXGE_HW_OK)
4900
goto vpath_open_exit8;
4902
list_add(&vp->item, &vpath->vpath_handles);
4904
hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4908
attr->fifo_attr.userdata = vpath->fifoh;
4909
attr->ring_attr.userdata = vpath->ringh;
4914
if (vpath->ringh != NULL)
4915
__vxge_hw_ring_delete(vp);
4917
if (vpath->fifoh != NULL)
4918
__vxge_hw_fifo_delete(vp);
4922
__vxge_hw_vp_terminate(hldev, attr->vp_id);
4929
* vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4931
* @vp: Handle got from previous vpath open
4933
* This function is used to close access to virtual path opened
4936
void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4938
struct __vxge_hw_virtualpath *vpath = vp->vpath;
4939
struct __vxge_hw_ring *ring = vpath->ringh;
4940
struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
4941
u64 new_count, val64, val164;
4944
new_count = readq(&vpath->vp_reg->rxdmem_size);
4945
new_count &= 0x1fff;
4947
new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
4949
val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
4951
writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4952
&vpath->vp_reg->prc_rxd_doorbell);
4953
readl(&vpath->vp_reg->prc_rxd_doorbell);
4956
val64 = readq(&vpath->vp_reg->prc_cfg6);
4957
val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4961
* Each RxD is of 4 qwords
4963
new_count -= (val64 + 1);
4964
val64 = min(val164, new_count) / 4;
4966
ring->rxds_limit = min(ring->rxds_limit, val64);
4967
if (ring->rxds_limit < 4)
4968
ring->rxds_limit = 4;
4972
* __vxge_hw_blockpool_block_free - Frees a block from block pool
4974
* @entry: Entry of block to be freed
4976
* This function frees a block from block pool
4979
__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
4980
struct __vxge_hw_blockpool_entry *entry)
4982
struct __vxge_hw_blockpool *blockpool;
4984
blockpool = &devh->block_pool;
4986
if (entry->length == blockpool->block_size) {
4987
list_add(&entry->item, &blockpool->free_block_list);
4988
blockpool->pool_size++;
4991
__vxge_hw_blockpool_blocks_remove(blockpool);
4995
* vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4996
* This function is used to close access to virtual path opened
4999
enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
5001
struct __vxge_hw_virtualpath *vpath = NULL;
5002
struct __vxge_hw_device *devh = NULL;
5003
u32 vp_id = vp->vpath->vp_id;
5004
u32 is_empty = TRUE;
5005
enum vxge_hw_status status = VXGE_HW_OK;
5008
devh = vpath->hldev;
5010
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5011
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5012
goto vpath_close_exit;
5015
list_del(&vp->item);
5017
if (!list_empty(&vpath->vpath_handles)) {
5018
list_add(&vp->item, &vpath->vpath_handles);
5023
status = VXGE_HW_FAIL;
5024
goto vpath_close_exit;
5027
devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
5029
if (vpath->ringh != NULL)
5030
__vxge_hw_ring_delete(vp);
5032
if (vpath->fifoh != NULL)
5033
__vxge_hw_fifo_delete(vp);
5035
if (vpath->stats_block != NULL)
5036
__vxge_hw_blockpool_block_free(devh, vpath->stats_block);
5040
__vxge_hw_vp_terminate(devh, vp_id);
5047
* vxge_hw_vpath_reset - Resets vpath
5048
* This function is used to request a reset of vpath
5050
enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
5052
enum vxge_hw_status status;
5054
struct __vxge_hw_virtualpath *vpath = vp->vpath;
5056
vp_id = vpath->vp_id;
5058
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5059
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5063
status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
5064
if (status == VXGE_HW_OK)
5065
vpath->sw_stats->soft_reset_cnt++;
5071
* vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
5072
* This function poll's for the vpath reset completion and re initializes
5076
vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
5078
struct __vxge_hw_virtualpath *vpath = NULL;
5079
enum vxge_hw_status status;
5080
struct __vxge_hw_device *hldev;
5083
vp_id = vp->vpath->vp_id;
5085
hldev = vpath->hldev;
5087
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5088
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5092
status = __vxge_hw_vpath_reset_check(vpath);
5093
if (status != VXGE_HW_OK)
5096
status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
5097
if (status != VXGE_HW_OK)
5100
status = __vxge_hw_vpath_initialize(hldev, vp_id);
5101
if (status != VXGE_HW_OK)
5104
if (vpath->ringh != NULL)
5105
__vxge_hw_vpath_prc_configure(hldev, vp_id);
5107
memset(vpath->hw_stats, 0,
5108
sizeof(struct vxge_hw_vpath_stats_hw_info));
5110
memset(vpath->hw_stats_sav, 0,
5111
sizeof(struct vxge_hw_vpath_stats_hw_info));
5113
writeq(vpath->stats_block->dma_addr,
5114
&vpath->vp_reg->stats_cfg);
5116
status = vxge_hw_vpath_stats_enable(vp);
5123
* vxge_hw_vpath_enable - Enable vpath.
5124
* This routine clears the vpath reset thereby enabling a vpath
5125
* to start forwarding frames and generating interrupts.
5128
vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
5130
struct __vxge_hw_device *hldev;
5133
hldev = vp->vpath->hldev;
5135
val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
5136
1 << (16 - vp->vpath->vp_id));
5138
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
5139
&hldev->common_reg->cmn_rsthdlr_cfg1);