1
/******************************************************************************
2
* This software may be used and distributed according to the terms of
3
* the GNU General Public License (GPL), incorporated herein by reference.
4
* Drivers based on or derived from this code fall under the GPL and must
5
* retain the authorship, copyright and license notice. This file is not
6
* a complete program and may only be used when the entire operating
7
* system is licensed under the GPL.
8
* See the file COPYING in this distribution for more information.
10
* vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11
* Virtualized Server Adapter.
12
* Copyright(c) 2002-2010 Exar Corp.
13
******************************************************************************/
14
#include <linux/etherdevice.h>
15
#include <linux/prefetch.h>
17
#include "vxge-traffic.h"
18
#include "vxge-config.h"
19
#include "vxge-main.h"
22
* vxge_hw_vpath_intr_enable - Enable vpath interrupts.
23
* @vp: Virtual Path handle.
25
* Enable vpath interrupts. The function is to be executed the last in
26
* vpath initialization sequence.
28
* See also: vxge_hw_vpath_intr_disable()
30
enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
34
struct __vxge_hw_virtualpath *vpath;
35
struct vxge_hw_vpath_reg __iomem *vp_reg;
36
enum vxge_hw_status status = VXGE_HW_OK;
38
status = VXGE_HW_ERR_INVALID_HANDLE;
44
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
45
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
49
vp_reg = vpath->vp_reg;
51
writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
53
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
54
&vp_reg->general_errors_reg);
56
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
57
&vp_reg->pci_config_errors_reg);
59
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
60
&vp_reg->mrpcim_to_vpath_alarm_reg);
62
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
63
&vp_reg->srpcim_to_vpath_alarm_reg);
65
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
66
&vp_reg->vpath_ppif_int_status);
68
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
69
&vp_reg->srpcim_msg_to_vpath_reg);
71
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
72
&vp_reg->vpath_pcipif_int_status);
74
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
75
&vp_reg->prc_alarm_reg);
77
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
78
&vp_reg->wrdma_alarm_status);
80
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
81
&vp_reg->asic_ntwk_vp_err_reg);
83
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
84
&vp_reg->xgmac_vp_int_status);
86
val64 = readq(&vp_reg->vpath_general_int_status);
88
/* Mask unwanted interrupts */
90
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
91
&vp_reg->vpath_pcipif_int_mask);
93
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
94
&vp_reg->srpcim_msg_to_vpath_mask);
96
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
97
&vp_reg->srpcim_to_vpath_alarm_mask);
99
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
100
&vp_reg->mrpcim_to_vpath_alarm_mask);
102
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
103
&vp_reg->pci_config_errors_mask);
105
/* Unmask the individual interrupts */
107
writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
108
VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
109
VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
110
VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
111
&vp_reg->general_errors_mask);
113
__vxge_hw_pio_mem_write32_upper(
114
(u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
115
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
116
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
117
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
118
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
119
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
120
&vp_reg->kdfcctl_errors_mask);
122
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
124
__vxge_hw_pio_mem_write32_upper(
125
(u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
126
&vp_reg->prc_alarm_mask);
128
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
129
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
131
if (vpath->hldev->first_vp_id != vpath->vp_id)
132
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
133
&vp_reg->asic_ntwk_vp_err_mask);
135
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
136
VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
137
VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
138
&vp_reg->asic_ntwk_vp_err_mask);
140
__vxge_hw_pio_mem_write32_upper(0,
141
&vp_reg->vpath_general_int_mask);
148
* vxge_hw_vpath_intr_disable - Disable vpath interrupts.
149
* @vp: Virtual Path handle.
151
* Disable vpath interrupts. The function is to be executed the last in
152
* vpath initialization sequence.
154
* See also: vxge_hw_vpath_intr_enable()
156
enum vxge_hw_status vxge_hw_vpath_intr_disable(
157
struct __vxge_hw_vpath_handle *vp)
161
struct __vxge_hw_virtualpath *vpath;
162
enum vxge_hw_status status = VXGE_HW_OK;
163
struct vxge_hw_vpath_reg __iomem *vp_reg;
165
status = VXGE_HW_ERR_INVALID_HANDLE;
171
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
172
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
175
vp_reg = vpath->vp_reg;
177
__vxge_hw_pio_mem_write32_upper(
178
(u32)VXGE_HW_INTR_MASK_ALL,
179
&vp_reg->vpath_general_int_mask);
181
val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
183
writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
185
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
186
&vp_reg->general_errors_mask);
188
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
189
&vp_reg->pci_config_errors_mask);
191
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
192
&vp_reg->mrpcim_to_vpath_alarm_mask);
194
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
195
&vp_reg->srpcim_to_vpath_alarm_mask);
197
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
198
&vp_reg->vpath_ppif_int_mask);
200
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
201
&vp_reg->srpcim_msg_to_vpath_mask);
203
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
204
&vp_reg->vpath_pcipif_int_mask);
206
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
207
&vp_reg->wrdma_alarm_mask);
209
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
210
&vp_reg->prc_alarm_mask);
212
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
213
&vp_reg->xgmac_vp_int_mask);
215
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
216
&vp_reg->asic_ntwk_vp_err_mask);
222
void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
224
struct vxge_hw_vpath_reg __iomem *vp_reg;
225
struct vxge_hw_vp_config *config;
228
if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
231
vp_reg = fifo->vp_reg;
232
config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
234
if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
235
config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
236
val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
237
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
238
fifo->tim_tti_cfg1_saved = val64;
239
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
243
void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
245
u64 val64 = ring->tim_rti_cfg1_saved;
247
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
248
ring->tim_rti_cfg1_saved = val64;
249
writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
252
void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
254
u64 val64 = fifo->tim_tti_cfg3_saved;
255
u64 timer = (fifo->rtimer * 1000) / 272;
257
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
259
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
260
VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
262
writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
263
/* tti_cfg3_saved is not updated again because it is
264
* initialized at one place only - init time.
268
void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
270
u64 val64 = ring->tim_rti_cfg3_saved;
271
u64 timer = (ring->rtimer * 1000) / 272;
273
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
275
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
276
VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
278
writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
279
/* rti_cfg3_saved is not updated again because it is
280
* initialized at one place only - init time.
285
* vxge_hw_channel_msix_mask - Mask MSIX Vector.
286
* @channeh: Channel for rx or tx handle
289
* The function masks the msix interrupt for the given msix_id
293
void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
296
__vxge_hw_pio_mem_write32_upper(
297
(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
298
&channel->common_reg->set_msix_mask_vect[msix_id%4]);
302
* vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
303
* @channeh: Channel for rx or tx handle
306
* The function unmasks the msix interrupt for the given msix_id
311
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
314
__vxge_hw_pio_mem_write32_upper(
315
(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
316
&channel->common_reg->clear_msix_mask_vect[msix_id%4]);
320
* vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
321
* @channel: Channel for rx or tx handle
324
* The function unmasks the msix interrupt for the given msix_id
325
* if configured in MSIX oneshot mode
329
void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
331
__vxge_hw_pio_mem_write32_upper(
332
(u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
333
&channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
337
* vxge_hw_device_set_intr_type - Updates the configuration
338
* with new interrupt type.
339
* @hldev: HW device handle.
340
* @intr_mode: New interrupt type
342
u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
345
if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
346
(intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
347
(intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
348
(intr_mode != VXGE_HW_INTR_MODE_DEF))
349
intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
351
hldev->config.intr_mode = intr_mode;
356
* vxge_hw_device_intr_enable - Enable interrupts.
357
* @hldev: HW device handle.
358
* @op: One of the enum vxge_hw_device_intr enumerated values specifying
359
* the type(s) of interrupts to enable.
361
* Enable Titan interrupts. The function is to be executed the last in
362
* Titan initialization sequence.
364
* See also: vxge_hw_device_intr_disable()
366
void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
372
vxge_hw_device_mask_all(hldev);
374
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
376
if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
379
vxge_hw_vpath_intr_enable(
380
VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
383
if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
384
val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
385
hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
388
writeq(val64, &hldev->common_reg->tim_int_status0);
390
writeq(~val64, &hldev->common_reg->tim_int_mask0);
393
val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
394
hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
397
__vxge_hw_pio_mem_write32_upper(val32,
398
&hldev->common_reg->tim_int_status1);
400
__vxge_hw_pio_mem_write32_upper(~val32,
401
&hldev->common_reg->tim_int_mask1);
405
val64 = readq(&hldev->common_reg->titan_general_int_status);
407
vxge_hw_device_unmask_all(hldev);
411
* vxge_hw_device_intr_disable - Disable Titan interrupts.
412
* @hldev: HW device handle.
413
* @op: One of the enum vxge_hw_device_intr enumerated values specifying
414
* the type(s) of interrupts to disable.
416
* Disable Titan interrupts.
418
* See also: vxge_hw_device_intr_enable()
420
void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
424
vxge_hw_device_mask_all(hldev);
426
/* mask all the tim interrupts */
427
writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
428
__vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
429
&hldev->common_reg->tim_int_mask1);
431
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
433
if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
436
vxge_hw_vpath_intr_disable(
437
VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
442
* vxge_hw_device_mask_all - Mask all device interrupts.
443
* @hldev: HW device handle.
445
* Mask all device interrupts.
447
* See also: vxge_hw_device_unmask_all()
449
void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
453
val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
454
VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
456
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
457
&hldev->common_reg->titan_mask_all_int);
461
* vxge_hw_device_unmask_all - Unmask all device interrupts.
462
* @hldev: HW device handle.
464
* Unmask all device interrupts.
466
* See also: vxge_hw_device_mask_all()
468
void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
472
if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
473
val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
475
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
476
&hldev->common_reg->titan_mask_all_int);
480
* vxge_hw_device_flush_io - Flush io writes.
481
* @hldev: HW device handle.
483
* The function performs a read operation to flush io writes.
487
void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
491
val32 = readl(&hldev->common_reg->titan_general_int_status);
495
* __vxge_hw_device_handle_error - Handle error
498
* @type: Error type. Please see enum vxge_hw_event{}
502
static enum vxge_hw_status
503
__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
504
enum vxge_hw_event type)
507
case VXGE_HW_EVENT_UNKNOWN:
509
case VXGE_HW_EVENT_RESET_START:
510
case VXGE_HW_EVENT_RESET_COMPLETE:
511
case VXGE_HW_EVENT_LINK_DOWN:
512
case VXGE_HW_EVENT_LINK_UP:
514
case VXGE_HW_EVENT_ALARM_CLEARED:
516
case VXGE_HW_EVENT_ECCERR:
517
case VXGE_HW_EVENT_MRPCIM_ECCERR:
519
case VXGE_HW_EVENT_FIFO_ERR:
520
case VXGE_HW_EVENT_VPATH_ERR:
521
case VXGE_HW_EVENT_CRITICAL_ERR:
522
case VXGE_HW_EVENT_SERR:
524
case VXGE_HW_EVENT_SRPCIM_SERR:
525
case VXGE_HW_EVENT_MRPCIM_SERR:
527
case VXGE_HW_EVENT_SLOT_FREEZE:
535
if (hldev->uld_callbacks->crit_err)
536
hldev->uld_callbacks->crit_err(
537
(struct __vxge_hw_device *)hldev,
545
* __vxge_hw_device_handle_link_down_ind
546
* @hldev: HW device handle.
548
* Link down indication handler. The function is invoked by HW when
549
* Titan indicates that the link is down.
551
static enum vxge_hw_status
552
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
555
* If the previous link state is not down, return.
557
if (hldev->link_state == VXGE_HW_LINK_DOWN)
560
hldev->link_state = VXGE_HW_LINK_DOWN;
563
if (hldev->uld_callbacks->link_down)
564
hldev->uld_callbacks->link_down(hldev);
570
* __vxge_hw_device_handle_link_up_ind
571
* @hldev: HW device handle.
573
* Link up indication handler. The function is invoked by HW when
574
* Titan indicates that the link is up for programmable amount of time.
576
static enum vxge_hw_status
577
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
580
* If the previous link state is not down, return.
582
if (hldev->link_state == VXGE_HW_LINK_UP)
585
hldev->link_state = VXGE_HW_LINK_UP;
588
if (hldev->uld_callbacks->link_up)
589
hldev->uld_callbacks->link_up(hldev);
595
* __vxge_hw_vpath_alarm_process - Process Alarms.
596
* @vpath: Virtual Path.
597
* @skip_alarms: Do not clear the alarms
599
* Process vpath alarms.
602
static enum vxge_hw_status
603
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
609
struct __vxge_hw_device *hldev = NULL;
610
enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
612
struct vxge_hw_vpath_stats_sw_info *sw_stats;
613
struct vxge_hw_vpath_reg __iomem *vp_reg;
616
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
621
hldev = vpath->hldev;
622
vp_reg = vpath->vp_reg;
623
alarm_status = readq(&vp_reg->vpath_general_int_status);
625
if (alarm_status == VXGE_HW_ALL_FOXES) {
626
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
631
sw_stats = vpath->sw_stats;
633
if (alarm_status & ~(
634
VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
635
VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
636
VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
637
VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
638
sw_stats->error_stats.unknown_alarms++;
640
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
645
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
647
val64 = readq(&vp_reg->xgmac_vp_int_status);
650
VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
652
val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
655
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
657
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
659
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
661
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
663
sw_stats->error_stats.network_sustained_fault++;
666
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
667
&vp_reg->asic_ntwk_vp_err_mask);
669
__vxge_hw_device_handle_link_down_ind(hldev);
670
alarm_event = VXGE_HW_SET_LEVEL(
671
VXGE_HW_EVENT_LINK_DOWN, alarm_event);
675
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
677
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
679
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
681
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
684
sw_stats->error_stats.network_sustained_ok++;
687
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
688
&vp_reg->asic_ntwk_vp_err_mask);
690
__vxge_hw_device_handle_link_up_ind(hldev);
691
alarm_event = VXGE_HW_SET_LEVEL(
692
VXGE_HW_EVENT_LINK_UP, alarm_event);
695
writeq(VXGE_HW_INTR_MASK_ALL,
696
&vp_reg->asic_ntwk_vp_err_reg);
698
alarm_event = VXGE_HW_SET_LEVEL(
699
VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
706
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
708
pic_status = readq(&vp_reg->vpath_ppif_int_status);
711
VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
713
val64 = readq(&vp_reg->general_errors_reg);
714
mask64 = readq(&vp_reg->general_errors_mask);
717
VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
719
sw_stats->error_stats.ini_serr_det++;
721
alarm_event = VXGE_HW_SET_LEVEL(
722
VXGE_HW_EVENT_SERR, alarm_event);
726
VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
728
sw_stats->error_stats.dblgen_fifo0_overflow++;
730
alarm_event = VXGE_HW_SET_LEVEL(
731
VXGE_HW_EVENT_FIFO_ERR, alarm_event);
735
VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
737
sw_stats->error_stats.statsb_pif_chain_error++;
740
VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
742
sw_stats->error_stats.statsb_drop_timeout++;
745
VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
747
sw_stats->error_stats.target_illegal_access++;
750
writeq(VXGE_HW_INTR_MASK_ALL,
751
&vp_reg->general_errors_reg);
752
alarm_event = VXGE_HW_SET_LEVEL(
753
VXGE_HW_EVENT_ALARM_CLEARED,
759
VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
761
val64 = readq(&vp_reg->kdfcctl_errors_reg);
762
mask64 = readq(&vp_reg->kdfcctl_errors_mask);
765
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
767
sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
769
alarm_event = VXGE_HW_SET_LEVEL(
770
VXGE_HW_EVENT_FIFO_ERR,
775
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
777
sw_stats->error_stats.kdfcctl_fifo0_poison++;
779
alarm_event = VXGE_HW_SET_LEVEL(
780
VXGE_HW_EVENT_FIFO_ERR,
785
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
787
sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
789
alarm_event = VXGE_HW_SET_LEVEL(
790
VXGE_HW_EVENT_FIFO_ERR,
795
writeq(VXGE_HW_INTR_MASK_ALL,
796
&vp_reg->kdfcctl_errors_reg);
797
alarm_event = VXGE_HW_SET_LEVEL(
798
VXGE_HW_EVENT_ALARM_CLEARED,
805
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
807
val64 = readq(&vp_reg->wrdma_alarm_status);
809
if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
811
val64 = readq(&vp_reg->prc_alarm_reg);
812
mask64 = readq(&vp_reg->prc_alarm_mask);
814
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
816
sw_stats->error_stats.prc_ring_bumps++;
818
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
820
sw_stats->error_stats.prc_rxdcm_sc_err++;
822
alarm_event = VXGE_HW_SET_LEVEL(
823
VXGE_HW_EVENT_VPATH_ERR,
827
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
829
sw_stats->error_stats.prc_rxdcm_sc_abort++;
831
alarm_event = VXGE_HW_SET_LEVEL(
832
VXGE_HW_EVENT_VPATH_ERR,
836
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
838
sw_stats->error_stats.prc_quanta_size_err++;
840
alarm_event = VXGE_HW_SET_LEVEL(
841
VXGE_HW_EVENT_VPATH_ERR,
846
writeq(VXGE_HW_INTR_MASK_ALL,
847
&vp_reg->prc_alarm_reg);
848
alarm_event = VXGE_HW_SET_LEVEL(
849
VXGE_HW_EVENT_ALARM_CLEARED,
855
hldev->stats.sw_dev_err_stats.vpath_alarms++;
857
if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
858
(alarm_event == VXGE_HW_EVENT_UNKNOWN))
861
__vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
863
if (alarm_event == VXGE_HW_EVENT_SERR)
864
return VXGE_HW_ERR_CRITICAL;
866
return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
867
VXGE_HW_ERR_SLOT_FREEZE :
868
(alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
873
* vxge_hw_device_begin_irq - Begin IRQ processing.
874
* @hldev: HW device handle.
875
* @skip_alarms: Do not clear the alarms
876
* @reason: "Reason" for the interrupt, the value of Titan's
877
* general_int_status register.
879
* The function performs two actions, It first checks whether (shared IRQ) the
880
* interrupt was raised by the device. Next, it masks the device interrupts.
883
* vxge_hw_device_begin_irq() does not flush MMIO writes through the
884
* bridge. Therefore, two back-to-back interrupts are potentially possible.
886
* Returns: 0, if the interrupt is not "ours" (note that in this case the
887
* device remain enabled).
888
* Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
891
enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
892
u32 skip_alarms, u64 *reason)
898
enum vxge_hw_status ret = VXGE_HW_OK;
900
val64 = readq(&hldev->common_reg->titan_general_int_status);
902
if (unlikely(!val64)) {
903
/* not Titan interrupt */
905
ret = VXGE_HW_ERR_WRONG_IRQ;
909
if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
911
adapter_status = readq(&hldev->common_reg->adapter_status);
913
if (adapter_status == VXGE_HW_ALL_FOXES) {
915
__vxge_hw_device_handle_error(hldev,
916
NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
918
ret = VXGE_HW_ERR_SLOT_FREEZE;
923
hldev->stats.sw_dev_info_stats.total_intr_cnt++;
927
vpath_mask = hldev->vpaths_deployed >>
928
(64 - VXGE_HW_MAX_VIRTUAL_PATHS);
931
VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
932
hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
937
hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
940
VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
942
enum vxge_hw_status error_level = VXGE_HW_OK;
944
hldev->stats.sw_dev_err_stats.vpath_alarms++;
946
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
948
if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
951
ret = __vxge_hw_vpath_alarm_process(
952
&hldev->virtual_paths[i], skip_alarms);
954
error_level = VXGE_HW_SET_LEVEL(ret, error_level);
956
if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
957
(ret == VXGE_HW_ERR_SLOT_FREEZE)))
968
* vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
969
* condition that has caused the Tx and RX interrupt.
972
* Acknowledge (that is, clear) the condition that has caused
973
* the Tx and Rx interrupt.
974
* See also: vxge_hw_device_begin_irq(),
975
* vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
977
void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
980
if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
981
(hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
982
writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
983
hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
984
&hldev->common_reg->tim_int_status0);
987
if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
988
(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
989
__vxge_hw_pio_mem_write32_upper(
990
(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
991
hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
992
&hldev->common_reg->tim_int_status1);
997
* vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
999
* @dtrh: Buffer to return the DTR pointer
1001
* Allocates a dtr from the reserve array. If the reserve array is empty,
1002
* it swaps the reserve and free arrays.
1005
static enum vxge_hw_status
1006
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1010
if (channel->reserve_ptr - channel->reserve_top > 0) {
1012
*dtrh = channel->reserve_arr[--channel->reserve_ptr];
1017
/* switch between empty and full arrays */
1019
/* the idea behind such a design is that by having free and reserved
1020
* arrays separated we basically separated irq and non-irq parts.
1021
* i.e. no additional lock need to be done when we free a resource */
1023
if (channel->length - channel->free_ptr > 0) {
1025
tmp_arr = channel->reserve_arr;
1026
channel->reserve_arr = channel->free_arr;
1027
channel->free_arr = tmp_arr;
1028
channel->reserve_ptr = channel->length;
1029
channel->reserve_top = channel->free_ptr;
1030
channel->free_ptr = channel->length;
1032
channel->stats->reserve_free_swaps_cnt++;
1034
goto _alloc_after_swap;
1037
channel->stats->full_cnt++;
1040
return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1044
* vxge_hw_channel_dtr_post - Post a dtr to the channel
1045
* @channelh: Channel
1046
* @dtrh: DTR pointer
1048
* Posts a dtr to work array.
1052
vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1054
vxge_assert(channel->work_arr[channel->post_index] == NULL);
1056
channel->work_arr[channel->post_index++] = dtrh;
1059
if (channel->post_index == channel->length)
1060
channel->post_index = 0;
1064
* vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1066
* @dtr: Buffer to return the next completed DTR pointer
1068
* Returns the next completed dtr with out removing it from work array
1072
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1074
vxge_assert(channel->compl_index < channel->length);
1076
*dtrh = channel->work_arr[channel->compl_index];
1081
* vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1082
* @channel: Channel handle
1084
* Removes the next completed dtr from work array
1087
void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1089
channel->work_arr[channel->compl_index] = NULL;
1092
if (++channel->compl_index == channel->length)
1093
channel->compl_index = 0;
1095
channel->stats->total_compl_cnt++;
1099
* vxge_hw_channel_dtr_free - Frees a dtr
1100
* @channel: Channel handle
1103
* Returns the dtr to free array
1106
void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1108
channel->free_arr[--channel->free_ptr] = dtrh;
1112
* vxge_hw_channel_dtr_count
1113
* @channel: Channel handle. Obtained via vxge_hw_channel_open().
1115
* Retrieve number of DTRs available. This function can not be called
1116
* from data path. ring_initial_replenishi() is the only user.
1118
int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1120
return (channel->reserve_ptr - channel->reserve_top) +
1121
(channel->length - channel->free_ptr);
1125
* vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
1126
* @ring: Handle to the ring object used for receive
1127
* @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1128
* with a valid handle.
1130
* Reserve Rx descriptor for the subsequent filling-in driver
1131
* and posting on the corresponding channel (@channelh)
1132
* via vxge_hw_ring_rxd_post().
1134
* Returns: VXGE_HW_OK - success.
1135
* VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1138
enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1141
enum vxge_hw_status status;
1142
struct __vxge_hw_channel *channel;
1144
channel = &ring->channel;
1146
status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1148
if (status == VXGE_HW_OK) {
1149
struct vxge_hw_ring_rxd_1 *rxdp =
1150
(struct vxge_hw_ring_rxd_1 *)*rxdh;
1152
rxdp->control_0 = rxdp->control_1 = 0;
1159
* vxge_hw_ring_rxd_free - Free descriptor.
1160
* @ring: Handle to the ring object used for receive
1161
* @rxdh: Descriptor handle.
1163
* Free the reserved descriptor. This operation is "symmetrical" to
1164
* vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1167
* After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1170
* - reserved (vxge_hw_ring_rxd_reserve);
1172
* - posted (vxge_hw_ring_rxd_post);
1174
* - completed (vxge_hw_ring_rxd_next_completed);
1176
* - and recycled again (vxge_hw_ring_rxd_free).
1178
* For alternative state transitions and more details please refer to
1182
void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1184
struct __vxge_hw_channel *channel;
1186
channel = &ring->channel;
1188
vxge_hw_channel_dtr_free(channel, rxdh);
1193
* vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1194
* @ring: Handle to the ring object used for receive
1195
* @rxdh: Descriptor handle.
1197
* This routine prepares a rxd and posts
1199
void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1201
struct __vxge_hw_channel *channel;
1203
channel = &ring->channel;
1205
vxge_hw_channel_dtr_post(channel, rxdh);
1209
* vxge_hw_ring_rxd_post_post - Process rxd after post.
1210
* @ring: Handle to the ring object used for receive
1211
* @rxdh: Descriptor handle.
1213
* Processes rxd after post
1215
void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1217
struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1218
struct __vxge_hw_channel *channel;
1220
channel = &ring->channel;
1222
rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1224
if (ring->stats->common_stats.usage_cnt > 0)
1225
ring->stats->common_stats.usage_cnt--;
1229
* vxge_hw_ring_rxd_post - Post descriptor on the ring.
1230
* @ring: Handle to the ring object used for receive
1231
* @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1233
* Post descriptor on the ring.
1234
* Prior to posting the descriptor should be filled in accordance with
1235
* Host/Titan interface specification for a given service (LL, etc.).
1238
void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1240
struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1241
struct __vxge_hw_channel *channel;
1243
channel = &ring->channel;
1246
rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1248
vxge_hw_channel_dtr_post(channel, rxdh);
1250
if (ring->stats->common_stats.usage_cnt > 0)
1251
ring->stats->common_stats.usage_cnt--;
1255
* vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1256
* @ring: Handle to the ring object used for receive
1257
* @rxdh: Descriptor handle.
1259
* Processes rxd after post with memory barrier.
1261
void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1264
vxge_hw_ring_rxd_post_post(ring, rxdh);
1268
* vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1269
* @ring: Handle to the ring object used for receive
1270
* @rxdh: Descriptor handle. Returned by HW.
1271
* @t_code: Transfer code, as per Titan User Guide,
1272
* Receive Descriptor Format. Returned by HW.
1274
* Retrieve the _next_ completed descriptor.
1275
* HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1276
* driver of new completed descriptors. After that
1277
* the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1278
* completions (the very first completion is passed by HW via
1279
* vxge_hw_ring_callback_f).
1281
* Implementation-wise, the driver is free to call
1282
* vxge_hw_ring_rxd_next_completed either immediately from inside the
1283
* ring callback, or in a deferred fashion and separate (from HW)
1286
* Non-zero @t_code means failure to fill-in receive buffer(s)
1287
* of the descriptor.
1288
* For instance, parity error detected during the data transfer.
1289
* In this case Titan will complete the descriptor and indicate
1290
* for the host that the received data is not to be used.
1291
* For details please refer to Titan User Guide.
1293
* Returns: VXGE_HW_OK - success.
1294
* VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1295
* are currently available for processing.
1297
* See also: vxge_hw_ring_callback_f{},
1298
* vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1300
enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1301
struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1303
struct __vxge_hw_channel *channel;
1304
struct vxge_hw_ring_rxd_1 *rxdp;
1305
enum vxge_hw_status status = VXGE_HW_OK;
1308
channel = &ring->channel;
1310
vxge_hw_channel_dtr_try_complete(channel, rxdh);
1314
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1318
control_0 = rxdp->control_0;
1319
own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1320
*t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1322
/* check whether it is not the end */
1323
if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1325
vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
1329
vxge_hw_channel_dtr_complete(channel);
1331
vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1333
ring->stats->common_stats.usage_cnt++;
1334
if (ring->stats->common_stats.usage_max <
1335
ring->stats->common_stats.usage_cnt)
1336
ring->stats->common_stats.usage_max =
1337
ring->stats->common_stats.usage_cnt;
1339
status = VXGE_HW_OK;
1343
/* reset it. since we don't want to return
1344
* garbage to the driver */
1346
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1352
* vxge_hw_ring_handle_tcode - Handle transfer code.
1353
* @ring: Handle to the ring object used for receive
1354
* @rxdh: Descriptor handle.
1355
* @t_code: One of the enumerated (and documented in the Titan user guide)
1358
* Handle descriptor's transfer code. The latter comes with each completed
1361
* Returns: one of the enum vxge_hw_status{} enumerated types.
1362
* VXGE_HW_OK - for success.
1363
* VXGE_HW_ERR_CRITICAL - when encounters critical error.
1365
enum vxge_hw_status vxge_hw_ring_handle_tcode(
1366
struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1368
struct __vxge_hw_channel *channel;
1369
enum vxge_hw_status status = VXGE_HW_OK;
1371
channel = &ring->channel;
1373
/* If the t_code is not supported and if the
1374
* t_code is other than 0x5 (unparseable packet
1375
* such as unknown UPV6 header), Drop it !!!
1378
if (t_code == VXGE_HW_RING_T_CODE_OK ||
1379
t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1380
status = VXGE_HW_OK;
1384
if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1385
status = VXGE_HW_ERR_INVALID_TCODE;
1389
ring->stats->rxd_t_code_err_cnt[t_code]++;
1395
* __vxge_hw_non_offload_db_post - Post non offload doorbell
1398
* @txdl_ptr: The starting location of the TxDL in host memory
1399
* @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1400
* @no_snoop: No snoop flags
1402
* This function posts a non-offload doorbell to doorbell FIFO
1405
static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1406
u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1408
struct __vxge_hw_channel *channel;
1410
channel = &fifo->channel;
1412
writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1413
VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1414
VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1415
&fifo->nofl_db->control_0);
1419
writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1425
* vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1427
* @fifoh: Handle to the fifo object used for non offload send
1429
u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1431
return vxge_hw_channel_dtr_count(&fifoh->channel);
1435
* vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1436
* @fifoh: Handle to the fifo object used for non offload send
1437
* @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1438
* with a valid handle.
1439
* @txdl_priv: Buffer to return the pointer to per txdl space
1441
* Reserve a single TxDL (that is, fifo descriptor)
1442
* for the subsequent filling-in by driver)
1443
* and posting on the corresponding channel (@channelh)
1444
* via vxge_hw_fifo_txdl_post().
1446
* Note: it is the responsibility of driver to reserve multiple descriptors
1447
* for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1448
* carries up to configured number (fifo.max_frags) of contiguous buffers.
1450
* Returns: VXGE_HW_OK - success;
1451
* VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1454
enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1455
struct __vxge_hw_fifo *fifo,
1456
void **txdlh, void **txdl_priv)
1458
struct __vxge_hw_channel *channel;
1459
enum vxge_hw_status status;
1462
channel = &fifo->channel;
1464
status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1466
if (status == VXGE_HW_OK) {
1467
struct vxge_hw_fifo_txd *txdp =
1468
(struct vxge_hw_fifo_txd *)*txdlh;
1469
struct __vxge_hw_fifo_txdl_priv *priv;
1471
priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1473
/* reset the TxDL's private */
1474
priv->align_dma_offset = 0;
1475
priv->align_vaddr_start = priv->align_vaddr;
1476
priv->align_used_frags = 0;
1478
priv->alloc_frags = fifo->config->max_frags;
1479
priv->next_txdl_priv = NULL;
1481
*txdl_priv = (void *)(size_t)txdp->host_control;
1483
for (i = 0; i < fifo->config->max_frags; i++) {
1484
txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1485
txdp->control_0 = txdp->control_1 = 0;
1493
* vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1495
* @fifo: Handle to the fifo object used for non offload send
1496
* @txdlh: Descriptor handle.
1497
* @frag_idx: Index of the data buffer in the caller's scatter-gather list
1499
* @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1500
* @size: Size of the data buffer (in bytes).
1502
* This API is part of the preparation of the transmit descriptor for posting
1503
* (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1504
* vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1505
* All three APIs fill in the fields of the fifo descriptor,
1506
* in accordance with the Titan specification.
1509
void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1510
void *txdlh, u32 frag_idx,
1511
dma_addr_t dma_pointer, u32 size)
1513
struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1514
struct vxge_hw_fifo_txd *txdp, *txdp_last;
1515
struct __vxge_hw_channel *channel;
1517
channel = &fifo->channel;
1519
txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1520
txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1523
txdp->control_0 = txdp->control_1 = 0;
1525
txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1526
VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1527
txdp->control_1 |= fifo->interrupt_type;
1528
txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1530
if (txdl_priv->frags) {
1531
txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1532
(txdl_priv->frags - 1);
1533
txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1534
VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1538
vxge_assert(frag_idx < txdl_priv->alloc_frags);
1540
txdp->buffer_pointer = (u64)dma_pointer;
1541
txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1542
fifo->stats->total_buffers++;
1547
* vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1548
* @fifo: Handle to the fifo object used for non offload send
1549
* @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1550
* @frags: Number of contiguous buffers that are part of a single
1551
* transmit operation.
1553
* Post descriptor on the 'fifo' type channel for transmission.
1554
* Prior to posting the descriptor should be filled in accordance with
1555
* Host/Titan interface specification for a given service (LL, etc.).
1558
void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1560
struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1561
struct vxge_hw_fifo_txd *txdp_last;
1562
struct vxge_hw_fifo_txd *txdp_first;
1563
struct __vxge_hw_channel *channel;
1565
channel = &fifo->channel;
1567
txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1570
txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1571
txdp_last->control_0 |=
1572
VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1573
txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1575
vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1577
__vxge_hw_non_offload_db_post(fifo,
1578
(u64)txdl_priv->dma_addr,
1579
txdl_priv->frags - 1,
1580
fifo->no_snoop_bits);
1582
fifo->stats->total_posts++;
1583
fifo->stats->common_stats.usage_cnt++;
1584
if (fifo->stats->common_stats.usage_max <
1585
fifo->stats->common_stats.usage_cnt)
1586
fifo->stats->common_stats.usage_max =
1587
fifo->stats->common_stats.usage_cnt;
1591
* vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1592
* @fifo: Handle to the fifo object used for non offload send
1593
* @txdlh: Descriptor handle. Returned by HW.
1594
* @t_code: Transfer code, as per Titan User Guide,
1595
* Transmit Descriptor Format.
1598
* Retrieve the _next_ completed descriptor.
1599
* HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1600
* driver of new completed descriptors. After that
1601
* the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1602
* completions (the very first completion is passed by HW via
1603
* vxge_hw_channel_callback_f).
1605
* Implementation-wise, the driver is free to call
1606
* vxge_hw_fifo_txdl_next_completed either immediately from inside the
1607
* channel callback, or in a deferred fashion and separate (from HW)
1610
* Non-zero @t_code means failure to process the descriptor.
1611
* The failure could happen, for instance, when the link is
1612
* down, in which case Titan completes the descriptor because it
1613
* is not able to send the data out.
1615
* For details please refer to Titan User Guide.
1617
* Returns: VXGE_HW_OK - success.
1618
* VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1619
* are currently available for processing.
1622
enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1623
struct __vxge_hw_fifo *fifo, void **txdlh,
1624
enum vxge_hw_fifo_tcode *t_code)
1626
struct __vxge_hw_channel *channel;
1627
struct vxge_hw_fifo_txd *txdp;
1628
enum vxge_hw_status status = VXGE_HW_OK;
1630
channel = &fifo->channel;
1632
vxge_hw_channel_dtr_try_complete(channel, txdlh);
1636
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1640
/* check whether host owns it */
1641
if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1643
vxge_assert(txdp->host_control != 0);
1645
vxge_hw_channel_dtr_complete(channel);
1647
*t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1649
if (fifo->stats->common_stats.usage_cnt > 0)
1650
fifo->stats->common_stats.usage_cnt--;
1652
status = VXGE_HW_OK;
1656
/* no more completions */
1658
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1664
* vxge_hw_fifo_handle_tcode - Handle transfer code.
1665
* @fifo: Handle to the fifo object used for non offload send
1666
* @txdlh: Descriptor handle.
1667
* @t_code: One of the enumerated (and documented in the Titan user guide)
1670
* Handle descriptor's transfer code. The latter comes with each completed
1673
* Returns: one of the enum vxge_hw_status{} enumerated types.
1674
* VXGE_HW_OK - for success.
1675
* VXGE_HW_ERR_CRITICAL - when encounters critical error.
1677
enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1679
enum vxge_hw_fifo_tcode t_code)
1681
struct __vxge_hw_channel *channel;
1683
enum vxge_hw_status status = VXGE_HW_OK;
1684
channel = &fifo->channel;
1686
if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1687
status = VXGE_HW_ERR_INVALID_TCODE;
1691
fifo->stats->txd_t_code_err_cnt[t_code]++;
1697
* vxge_hw_fifo_txdl_free - Free descriptor.
1698
* @fifo: Handle to the fifo object used for non offload send
1699
* @txdlh: Descriptor handle.
1701
* Free the reserved descriptor. This operation is "symmetrical" to
1702
* vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1705
* After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1708
* - reserved (vxge_hw_fifo_txdl_reserve);
1710
* - posted (vxge_hw_fifo_txdl_post);
1712
* - completed (vxge_hw_fifo_txdl_next_completed);
1714
* - and recycled again (vxge_hw_fifo_txdl_free).
1716
* For alternative state transitions and more details please refer to
1720
void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1722
struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1724
struct __vxge_hw_channel *channel;
1726
channel = &fifo->channel;
1728
txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1729
(struct vxge_hw_fifo_txd *)txdlh);
1731
max_frags = fifo->config->max_frags;
1733
vxge_hw_channel_dtr_free(channel, txdlh);
1737
* vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1738
* to MAC address table.
1739
* @vp: Vpath handle.
1740
* @macaddr: MAC address to be added for this vpath into the list
1741
* @macaddr_mask: MAC address mask for macaddr
1742
* @duplicate_mode: Duplicate MAC address add mode. Please see
1743
* enum vxge_hw_vpath_mac_addr_add_mode{}
1745
* Adds the given mac address and mac address mask into the list for this
1747
* see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1748
* vxge_hw_vpath_mac_addr_get_next
1752
vxge_hw_vpath_mac_addr_add(
1753
struct __vxge_hw_vpath_handle *vp,
1754
u8 (macaddr)[ETH_ALEN],
1755
u8 (macaddr_mask)[ETH_ALEN],
1756
enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1761
enum vxge_hw_status status = VXGE_HW_OK;
1764
status = VXGE_HW_ERR_INVALID_HANDLE;
1768
for (i = 0; i < ETH_ALEN; i++) {
1770
data1 |= (u8)macaddr[i];
1773
data2 |= (u8)macaddr_mask[i];
1776
switch (duplicate_mode) {
1777
case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1780
case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1783
case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1791
status = __vxge_hw_vpath_rts_table_set(vp,
1792
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1793
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1795
VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1796
VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1797
VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1803
* vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1804
* from MAC address table.
1805
* @vp: Vpath handle.
1806
* @macaddr: First MAC address entry for this vpath in the list
1807
* @macaddr_mask: MAC address mask for macaddr
1809
* Returns the first mac address and mac address mask in the list for this
1811
* see also: vxge_hw_vpath_mac_addr_get_next
1815
vxge_hw_vpath_mac_addr_get(
1816
struct __vxge_hw_vpath_handle *vp,
1817
u8 (macaddr)[ETH_ALEN],
1818
u8 (macaddr_mask)[ETH_ALEN])
1823
enum vxge_hw_status status = VXGE_HW_OK;
1826
status = VXGE_HW_ERR_INVALID_HANDLE;
1830
status = __vxge_hw_vpath_rts_table_get(vp,
1831
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1832
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1835
if (status != VXGE_HW_OK)
1838
data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1840
data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1842
for (i = ETH_ALEN; i > 0; i--) {
1843
macaddr[i-1] = (u8)(data1 & 0xFF);
1846
macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1854
* vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1856
* from MAC address table.
1857
* @vp: Vpath handle.
1858
* @macaddr: Next MAC address entry for this vpath in the list
1859
* @macaddr_mask: MAC address mask for macaddr
1861
* Returns the next mac address and mac address mask in the list for this
1863
* see also: vxge_hw_vpath_mac_addr_get
1867
vxge_hw_vpath_mac_addr_get_next(
1868
struct __vxge_hw_vpath_handle *vp,
1869
u8 (macaddr)[ETH_ALEN],
1870
u8 (macaddr_mask)[ETH_ALEN])
1875
enum vxge_hw_status status = VXGE_HW_OK;
1878
status = VXGE_HW_ERR_INVALID_HANDLE;
1882
status = __vxge_hw_vpath_rts_table_get(vp,
1883
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1884
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1887
if (status != VXGE_HW_OK)
1890
data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1892
data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1894
for (i = ETH_ALEN; i > 0; i--) {
1895
macaddr[i-1] = (u8)(data1 & 0xFF);
1898
macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1907
* vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1908
* to MAC address table.
1909
* @vp: Vpath handle.
1910
* @macaddr: MAC address to be added for this vpath into the list
1911
* @macaddr_mask: MAC address mask for macaddr
1913
* Delete the given mac address and mac address mask into the list for this
1915
* see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1916
* vxge_hw_vpath_mac_addr_get_next
1920
vxge_hw_vpath_mac_addr_delete(
1921
struct __vxge_hw_vpath_handle *vp,
1922
u8 (macaddr)[ETH_ALEN],
1923
u8 (macaddr_mask)[ETH_ALEN])
1928
enum vxge_hw_status status = VXGE_HW_OK;
1931
status = VXGE_HW_ERR_INVALID_HANDLE;
1935
for (i = 0; i < ETH_ALEN; i++) {
1937
data1 |= (u8)macaddr[i];
1940
data2 |= (u8)macaddr_mask[i];
1943
status = __vxge_hw_vpath_rts_table_set(vp,
1944
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1945
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1947
VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1948
VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1954
* vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1956
* @vp: Vpath handle.
1957
* @vid: vlan id to be added for this vpath into the list
1959
* Adds the given vlan id into the list for this vpath.
1960
* see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1961
* vxge_hw_vpath_vid_get_next
1965
vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1967
enum vxge_hw_status status = VXGE_HW_OK;
1970
status = VXGE_HW_ERR_INVALID_HANDLE;
1974
status = __vxge_hw_vpath_rts_table_set(vp,
1975
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1976
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1977
0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1983
* vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1984
* from vlan id table.
1985
* @vp: Vpath handle.
1986
* @vid: Buffer to return vlan id
1988
* Returns the first vlan id in the list for this vpath.
1989
* see also: vxge_hw_vpath_vid_get_next
1993
vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1996
enum vxge_hw_status status = VXGE_HW_OK;
1999
status = VXGE_HW_ERR_INVALID_HANDLE;
2003
status = __vxge_hw_vpath_rts_table_get(vp,
2004
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
2005
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
2008
*vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
2014
* vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
2016
* @vp: Vpath handle.
2017
* @vid: vlan id to be added for this vpath into the list
2019
* Adds the given vlan id into the list for this vpath.
2020
* see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
2021
* vxge_hw_vpath_vid_get_next
2025
vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
2027
enum vxge_hw_status status = VXGE_HW_OK;
2030
status = VXGE_HW_ERR_INVALID_HANDLE;
2034
status = __vxge_hw_vpath_rts_table_set(vp,
2035
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
2036
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
2037
0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
2043
* vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
2044
* @vp: Vpath handle.
2046
* Enable promiscuous mode of Titan-e operation.
2048
* See also: vxge_hw_vpath_promisc_disable().
2050
enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2051
struct __vxge_hw_vpath_handle *vp)
2054
struct __vxge_hw_virtualpath *vpath;
2055
enum vxge_hw_status status = VXGE_HW_OK;
2057
if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2058
status = VXGE_HW_ERR_INVALID_HANDLE;
2064
/* Enable promiscuous mode for function 0 only */
2065
if (!(vpath->hldev->access_rights &
2066
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
2069
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2071
if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2073
val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2074
VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2075
VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2076
VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2078
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2085
* vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2086
* @vp: Vpath handle.
2088
* Disable promiscuous mode of Titan-e operation.
2090
* See also: vxge_hw_vpath_promisc_enable().
2092
enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2093
struct __vxge_hw_vpath_handle *vp)
2096
struct __vxge_hw_virtualpath *vpath;
2097
enum vxge_hw_status status = VXGE_HW_OK;
2099
if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2100
status = VXGE_HW_ERR_INVALID_HANDLE;
2106
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2108
if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2110
val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2111
VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2112
VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2114
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2121
* vxge_hw_vpath_bcast_enable - Enable broadcast
2122
* @vp: Vpath handle.
2124
* Enable receiving broadcasts.
2126
enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2127
struct __vxge_hw_vpath_handle *vp)
2130
struct __vxge_hw_virtualpath *vpath;
2131
enum vxge_hw_status status = VXGE_HW_OK;
2133
if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2134
status = VXGE_HW_ERR_INVALID_HANDLE;
2140
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2142
if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2143
val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2144
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2151
* vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2152
* @vp: Vpath handle.
2154
* Enable Titan-e multicast addresses.
2155
* Returns: VXGE_HW_OK on success.
2158
enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2159
struct __vxge_hw_vpath_handle *vp)
2162
struct __vxge_hw_virtualpath *vpath;
2163
enum vxge_hw_status status = VXGE_HW_OK;
2165
if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2166
status = VXGE_HW_ERR_INVALID_HANDLE;
2172
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2174
if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2175
val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2176
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2183
* vxge_hw_vpath_mcast_disable - Disable multicast addresses.
2184
* @vp: Vpath handle.
2186
* Disable Titan-e multicast addresses.
2187
* Returns: VXGE_HW_OK - success.
2188
* VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2192
vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2195
struct __vxge_hw_virtualpath *vpath;
2196
enum vxge_hw_status status = VXGE_HW_OK;
2198
if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2199
status = VXGE_HW_ERR_INVALID_HANDLE;
2205
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2207
if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2208
val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2209
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2216
* vxge_hw_vpath_alarm_process - Process Alarms.
2217
* @vpath: Virtual Path.
2218
* @skip_alarms: Do not clear the alarms
2220
* Process vpath alarms.
2223
enum vxge_hw_status vxge_hw_vpath_alarm_process(
2224
struct __vxge_hw_vpath_handle *vp,
2227
enum vxge_hw_status status = VXGE_HW_OK;
2230
status = VXGE_HW_ERR_INVALID_HANDLE;
2234
status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2240
* vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2242
* @vp: Virtual Path handle.
2243
* @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2244
* interrupts(Can be repeated). If fifo or ring are not enabled
2245
* the MSIX vector for that should be set to 0
2246
* @alarm_msix_id: MSIX vector for alarm.
2248
* This API will associate a given MSIX vector numbers with the four TIM
2249
* interrupts and alarm interrupt.
2252
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2256
struct __vxge_hw_virtualpath *vpath = vp->vpath;
2257
struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2258
u32 vp_id = vp->vpath->vp_id;
2260
val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2261
(vp_id * 4) + tim_msix_id[0]) |
2262
VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2263
(vp_id * 4) + tim_msix_id[1]);
2265
writeq(val64, &vp_reg->interrupt_cfg0);
2267
writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2268
(vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2269
&vp_reg->interrupt_cfg2);
2271
if (vpath->hldev->config.intr_mode ==
2272
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2273
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2274
VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2275
0, 32), &vp_reg->one_shot_vect0_en);
2276
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2277
VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2278
0, 32), &vp_reg->one_shot_vect1_en);
2279
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2280
VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2281
0, 32), &vp_reg->one_shot_vect2_en);
2286
* vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2287
* @vp: Virtual Path handle.
2290
* The function masks the msix interrupt for the given msix_id
2293
* Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2298
vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2300
struct __vxge_hw_device *hldev = vp->vpath->hldev;
2301
__vxge_hw_pio_mem_write32_upper(
2302
(u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2303
&hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2307
* vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2308
* @vp: Virtual Path handle.
2311
* The function clears the msix interrupt for the given msix_id
2314
* Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2318
void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2320
struct __vxge_hw_device *hldev = vp->vpath->hldev;
2322
if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2323
__vxge_hw_pio_mem_write32_upper(
2324
(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2325
&hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2327
__vxge_hw_pio_mem_write32_upper(
2328
(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2329
&hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2333
* vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2334
* @vp: Virtual Path handle.
2337
* The function unmasks the msix interrupt for the given msix_id
2340
* Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2345
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2347
struct __vxge_hw_device *hldev = vp->vpath->hldev;
2348
__vxge_hw_pio_mem_write32_upper(
2349
(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2350
&hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2354
* vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2355
* @vp: Virtual Path handle.
2357
* Mask Tx and Rx vpath interrupts.
2359
* See also: vxge_hw_vpath_inta_mask_tx_rx()
2361
void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2363
u64 tim_int_mask0[4] = {[0 ...3] = 0};
2364
u32 tim_int_mask1[4] = {[0 ...3] = 0};
2366
struct __vxge_hw_device *hldev = vp->vpath->hldev;
2368
VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2369
tim_int_mask1, vp->vpath->vp_id);
2371
val64 = readq(&hldev->common_reg->tim_int_mask0);
2373
if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2374
(tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2375
writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2376
tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2377
&hldev->common_reg->tim_int_mask0);
2380
val64 = readl(&hldev->common_reg->tim_int_mask1);
2382
if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2383
(tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2384
__vxge_hw_pio_mem_write32_upper(
2385
(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2386
tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2387
&hldev->common_reg->tim_int_mask1);
2392
* vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2393
* @vp: Virtual Path handle.
2395
* Unmask Tx and Rx vpath interrupts.
2397
* See also: vxge_hw_vpath_inta_mask_tx_rx()
2399
void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2401
u64 tim_int_mask0[4] = {[0 ...3] = 0};
2402
u32 tim_int_mask1[4] = {[0 ...3] = 0};
2404
struct __vxge_hw_device *hldev = vp->vpath->hldev;
2406
VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2407
tim_int_mask1, vp->vpath->vp_id);
2409
val64 = readq(&hldev->common_reg->tim_int_mask0);
2411
if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2412
(tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2413
writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2414
tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2415
&hldev->common_reg->tim_int_mask0);
2418
if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2419
(tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2420
__vxge_hw_pio_mem_write32_upper(
2421
(~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2422
tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2423
&hldev->common_reg->tim_int_mask1);
2428
* vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2429
* descriptors and process the same.
2430
* @ring: Handle to the ring object used for receive
2432
* The function polls the Rx for the completed descriptors and calls
2433
* the driver via supplied completion callback.
2435
* Returns: VXGE_HW_OK, if the polling is completed successful.
2436
* VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2437
* descriptors available which are yet to be processed.
2439
* See also: vxge_hw_vpath_poll_rx()
2441
enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2444
enum vxge_hw_status status = VXGE_HW_OK;
2451
status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2452
if (status == VXGE_HW_OK)
2453
ring->callback(ring, first_rxdh,
2454
t_code, ring->channel.userdata);
2456
if (ring->cmpl_cnt != 0) {
2457
ring->doorbell_cnt += ring->cmpl_cnt;
2458
if (ring->doorbell_cnt >= ring->rxds_limit) {
2460
* Each RxD is of 4 qwords, update the number of
2461
* qwords replenished
2463
new_count = (ring->doorbell_cnt * 4);
2465
/* For each block add 4 more qwords */
2466
ring->total_db_cnt += ring->doorbell_cnt;
2467
if (ring->total_db_cnt >= ring->rxds_per_block) {
2469
/* Reset total count */
2470
ring->total_db_cnt %= ring->rxds_per_block;
2472
writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2473
&ring->vp_reg->prc_rxd_doorbell);
2475
readl(&ring->common_reg->titan_general_int_status);
2476
ring->doorbell_cnt = 0;
2484
* vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2486
* @fifo: Handle to the fifo object used for non offload send
2488
* The function polls the Tx for the completed descriptors and calls
2489
* the driver via supplied completion callback.
2491
* Returns: VXGE_HW_OK, if the polling is completed successful.
2492
* VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2493
* descriptors available which are yet to be processed.
2495
enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2496
struct sk_buff ***skb_ptr, int nr_skb,
2499
enum vxge_hw_fifo_tcode t_code;
2501
enum vxge_hw_status status = VXGE_HW_OK;
2502
struct __vxge_hw_channel *channel;
2504
channel = &fifo->channel;
2506
status = vxge_hw_fifo_txdl_next_completed(fifo,
2507
&first_txdlh, &t_code);
2508
if (status == VXGE_HW_OK)
2509
if (fifo->callback(fifo, first_txdlh, t_code,
2510
channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2511
status = VXGE_HW_COMPLETIONS_REMAIN;