~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise-security

« back to all changes in this revision

Viewing changes to drivers/net/vxge/vxge-config.c

  • Committer: Package Import Robot
  • Author(s): Paolo Pisati, Paolo Pisati
  • Date: 2011-12-06 15:56:07 UTC
  • Revision ID: package-import@ubuntu.com-20111206155607-pcf44kv5fmhk564f
Tags: 3.2.0-1401.1
[ Paolo Pisati ]

* Rebased on top of Ubuntu-3.2.0-3.8
* Tilt-tracking @ ef2487af4bb15bdd0689631774b5a5e3a59f74e2
* Delete debian.ti-omap4/control, it shoudln't be tracked
* Fix architecture spelling (s/armel/armhf/)
* [Config] Update configs following 3.2 import
* [Config] Fix compilation: disable CODA and ARCH_OMAP3
* [Config] Fix compilation: disable Ethernet Faraday
* Update series to precise

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/******************************************************************************
2
 
 * This software may be used and distributed according to the terms of
3
 
 * the GNU General Public License (GPL), incorporated herein by reference.
4
 
 * Drivers based on or derived from this code fall under the GPL and must
5
 
 * retain the authorship, copyright and license notice.  This file is not
6
 
 * a complete program and may only be used when the entire operating
7
 
 * system is licensed under the GPL.
8
 
 * See the file COPYING in this distribution for more information.
9
 
 *
10
 
 * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11
 
 *                Virtualized Server Adapter.
12
 
 * Copyright(c) 2002-2010 Exar Corp.
13
 
 ******************************************************************************/
14
 
#include <linux/vmalloc.h>
15
 
#include <linux/etherdevice.h>
16
 
#include <linux/pci.h>
17
 
#include <linux/pci_hotplug.h>
18
 
#include <linux/slab.h>
19
 
 
20
 
#include "vxge-traffic.h"
21
 
#include "vxge-config.h"
22
 
#include "vxge-main.h"
23
 
 
24
 
#define VXGE_HW_VPATH_STATS_PIO_READ(offset) {                          \
25
 
        status = __vxge_hw_vpath_stats_access(vpath,                    \
26
 
                                              VXGE_HW_STATS_OP_READ,    \
27
 
                                              offset,                   \
28
 
                                              &val64);                  \
29
 
        if (status != VXGE_HW_OK)                                       \
30
 
                return status;                                          \
31
 
}
32
 
 
33
 
static void
34
 
vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
35
 
{
36
 
        u64 val64;
37
 
 
38
 
        val64 = readq(&vp_reg->rxmac_vcfg0);
39
 
        val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
40
 
        writeq(val64, &vp_reg->rxmac_vcfg0);
41
 
        val64 = readq(&vp_reg->rxmac_vcfg0);
42
 
}
43
 
 
44
 
/*
45
 
 * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
46
 
 */
47
 
int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
48
 
{
49
 
        struct vxge_hw_vpath_reg __iomem *vp_reg;
50
 
        struct __vxge_hw_virtualpath *vpath;
51
 
        u64 val64, rxd_count, rxd_spat;
52
 
        int count = 0, total_count = 0;
53
 
 
54
 
        vpath = &hldev->virtual_paths[vp_id];
55
 
        vp_reg = vpath->vp_reg;
56
 
 
57
 
        vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
58
 
 
59
 
        /* Check that the ring controller for this vpath has enough free RxDs
60
 
         * to send frames to the host.  This is done by reading the
61
 
         * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
62
 
         * RXD_SPAT value for the vpath.
63
 
         */
64
 
        val64 = readq(&vp_reg->prc_cfg6);
65
 
        rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
66
 
        /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
67
 
         * leg room.
68
 
         */
69
 
        rxd_spat *= 2;
70
 
 
71
 
        do {
72
 
                mdelay(1);
73
 
 
74
 
                rxd_count = readq(&vp_reg->prc_rxd_doorbell);
75
 
 
76
 
                /* Check that the ring controller for this vpath does
77
 
                 * not have any frame in its pipeline.
78
 
                 */
79
 
                val64 = readq(&vp_reg->frm_in_progress_cnt);
80
 
                if ((rxd_count <= rxd_spat) || (val64 > 0))
81
 
                        count = 0;
82
 
                else
83
 
                        count++;
84
 
                total_count++;
85
 
        } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
86
 
                        (total_count < VXGE_HW_MAX_POLLING_COUNT));
87
 
 
88
 
        if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
89
 
                printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
90
 
                        __func__);
91
 
 
92
 
        return total_count;
93
 
}
94
 
 
95
 
/* vxge_hw_device_wait_receive_idle - This function waits until all frames
96
 
 * stored in the frame buffer for each vpath assigned to the given
97
 
 * function (hldev) have been sent to the host.
98
 
 */
99
 
void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
100
 
{
101
 
        int i, total_count = 0;
102
 
 
103
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
104
 
                if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
105
 
                        continue;
106
 
 
107
 
                total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
108
 
                if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
109
 
                        break;
110
 
        }
111
 
}
112
 
 
113
 
/*
114
 
 * __vxge_hw_device_register_poll
115
 
 * Will poll certain register for specified amount of time.
116
 
 * Will poll until masked bit is not cleared.
117
 
 */
118
 
static enum vxge_hw_status
119
 
__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
120
 
{
121
 
        u64 val64;
122
 
        u32 i = 0;
123
 
        enum vxge_hw_status ret = VXGE_HW_FAIL;
124
 
 
125
 
        udelay(10);
126
 
 
127
 
        do {
128
 
                val64 = readq(reg);
129
 
                if (!(val64 & mask))
130
 
                        return VXGE_HW_OK;
131
 
                udelay(100);
132
 
        } while (++i <= 9);
133
 
 
134
 
        i = 0;
135
 
        do {
136
 
                val64 = readq(reg);
137
 
                if (!(val64 & mask))
138
 
                        return VXGE_HW_OK;
139
 
                mdelay(1);
140
 
        } while (++i <= max_millis);
141
 
 
142
 
        return ret;
143
 
}
144
 
 
145
 
static inline enum vxge_hw_status
146
 
__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
147
 
                          u64 mask, u32 max_millis)
148
 
{
149
 
        __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
150
 
        wmb();
151
 
        __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
152
 
        wmb();
153
 
 
154
 
        return __vxge_hw_device_register_poll(addr, mask, max_millis);
155
 
}
156
 
 
157
 
static enum vxge_hw_status
158
 
vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
159
 
                     u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
160
 
                     u64 *steer_ctrl)
161
 
{
162
 
        struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
163
 
        enum vxge_hw_status status;
164
 
        u64 val64;
165
 
        u32 retry = 0, max_retry = 3;
166
 
 
167
 
        spin_lock(&vpath->lock);
168
 
        if (!vpath->vp_open) {
169
 
                spin_unlock(&vpath->lock);
170
 
                max_retry = 100;
171
 
        }
172
 
 
173
 
        writeq(*data0, &vp_reg->rts_access_steer_data0);
174
 
        writeq(*data1, &vp_reg->rts_access_steer_data1);
175
 
        wmb();
176
 
 
177
 
        val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
178
 
                VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
179
 
                VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
180
 
                VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
181
 
                *steer_ctrl;
182
 
 
183
 
        status = __vxge_hw_pio_mem_write64(val64,
184
 
                                           &vp_reg->rts_access_steer_ctrl,
185
 
                                           VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
186
 
                                           VXGE_HW_DEF_DEVICE_POLL_MILLIS);
187
 
 
188
 
        /* The __vxge_hw_device_register_poll can udelay for a significant
189
 
         * amount of time, blocking other process from the CPU.  If it delays
190
 
         * for ~5secs, a NMI error can occur.  A way around this is to give up
191
 
         * the processor via msleep, but this is not allowed is under lock.
192
 
         * So, only allow it to sleep for ~4secs if open.  Otherwise, delay for
193
 
         * 1sec and sleep for 10ms until the firmware operation has completed
194
 
         * or timed-out.
195
 
         */
196
 
        while ((status != VXGE_HW_OK) && retry++ < max_retry) {
197
 
                if (!vpath->vp_open)
198
 
                        msleep(20);
199
 
                status = __vxge_hw_device_register_poll(
200
 
                                        &vp_reg->rts_access_steer_ctrl,
201
 
                                        VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
202
 
                                        VXGE_HW_DEF_DEVICE_POLL_MILLIS);
203
 
        }
204
 
 
205
 
        if (status != VXGE_HW_OK)
206
 
                goto out;
207
 
 
208
 
        val64 = readq(&vp_reg->rts_access_steer_ctrl);
209
 
        if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
210
 
                *data0 = readq(&vp_reg->rts_access_steer_data0);
211
 
                *data1 = readq(&vp_reg->rts_access_steer_data1);
212
 
                *steer_ctrl = val64;
213
 
        } else
214
 
                status = VXGE_HW_FAIL;
215
 
 
216
 
out:
217
 
        if (vpath->vp_open)
218
 
                spin_unlock(&vpath->lock);
219
 
        return status;
220
 
}
221
 
 
222
 
enum vxge_hw_status
223
 
vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
224
 
                             u32 *minor, u32 *build)
225
 
{
226
 
        u64 data0 = 0, data1 = 0, steer_ctrl = 0;
227
 
        struct __vxge_hw_virtualpath *vpath;
228
 
        enum vxge_hw_status status;
229
 
 
230
 
        vpath = &hldev->virtual_paths[hldev->first_vp_id];
231
 
 
232
 
        status = vxge_hw_vpath_fw_api(vpath,
233
 
                                      VXGE_HW_FW_UPGRADE_ACTION,
234
 
                                      VXGE_HW_FW_UPGRADE_MEMO,
235
 
                                      VXGE_HW_FW_UPGRADE_OFFSET_READ,
236
 
                                      &data0, &data1, &steer_ctrl);
237
 
        if (status != VXGE_HW_OK)
238
 
                return status;
239
 
 
240
 
        *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
241
 
        *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
242
 
        *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
243
 
 
244
 
        return status;
245
 
}
246
 
 
247
 
enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
248
 
{
249
 
        u64 data0 = 0, data1 = 0, steer_ctrl = 0;
250
 
        struct __vxge_hw_virtualpath *vpath;
251
 
        enum vxge_hw_status status;
252
 
        u32 ret;
253
 
 
254
 
        vpath = &hldev->virtual_paths[hldev->first_vp_id];
255
 
 
256
 
        status = vxge_hw_vpath_fw_api(vpath,
257
 
                                      VXGE_HW_FW_UPGRADE_ACTION,
258
 
                                      VXGE_HW_FW_UPGRADE_MEMO,
259
 
                                      VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
260
 
                                      &data0, &data1, &steer_ctrl);
261
 
        if (status != VXGE_HW_OK) {
262
 
                vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
263
 
                goto exit;
264
 
        }
265
 
 
266
 
        ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
267
 
        if (ret != 1) {
268
 
                vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
269
 
                                __func__, ret);
270
 
                status = VXGE_HW_FAIL;
271
 
        }
272
 
 
273
 
exit:
274
 
        return status;
275
 
}
276
 
 
277
 
enum vxge_hw_status
278
 
vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
279
 
{
280
 
        u64 data0 = 0, data1 = 0, steer_ctrl = 0;
281
 
        struct __vxge_hw_virtualpath *vpath;
282
 
        enum vxge_hw_status status;
283
 
        int ret_code, sec_code;
284
 
 
285
 
        vpath = &hldev->virtual_paths[hldev->first_vp_id];
286
 
 
287
 
        /* send upgrade start command */
288
 
        status = vxge_hw_vpath_fw_api(vpath,
289
 
                                      VXGE_HW_FW_UPGRADE_ACTION,
290
 
                                      VXGE_HW_FW_UPGRADE_MEMO,
291
 
                                      VXGE_HW_FW_UPGRADE_OFFSET_START,
292
 
                                      &data0, &data1, &steer_ctrl);
293
 
        if (status != VXGE_HW_OK) {
294
 
                vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
295
 
                                __func__);
296
 
                return status;
297
 
        }
298
 
 
299
 
        /* Transfer fw image to adapter 16 bytes at a time */
300
 
        for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
301
 
                steer_ctrl = 0;
302
 
 
303
 
                /* The next 128bits of fwdata to be loaded onto the adapter */
304
 
                data0 = *((u64 *)fwdata);
305
 
                data1 = *((u64 *)fwdata + 1);
306
 
 
307
 
                status = vxge_hw_vpath_fw_api(vpath,
308
 
                                              VXGE_HW_FW_UPGRADE_ACTION,
309
 
                                              VXGE_HW_FW_UPGRADE_MEMO,
310
 
                                              VXGE_HW_FW_UPGRADE_OFFSET_SEND,
311
 
                                              &data0, &data1, &steer_ctrl);
312
 
                if (status != VXGE_HW_OK) {
313
 
                        vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
314
 
                                        __func__);
315
 
                        goto out;
316
 
                }
317
 
 
318
 
                ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
319
 
                switch (ret_code) {
320
 
                case VXGE_HW_FW_UPGRADE_OK:
321
 
                        /* All OK, send next 16 bytes. */
322
 
                        break;
323
 
                case VXGE_FW_UPGRADE_BYTES2SKIP:
324
 
                        /* skip bytes in the stream */
325
 
                        fwdata += (data0 >> 8) & 0xFFFFFFFF;
326
 
                        break;
327
 
                case VXGE_HW_FW_UPGRADE_DONE:
328
 
                        goto out;
329
 
                case VXGE_HW_FW_UPGRADE_ERR:
330
 
                        sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
331
 
                        switch (sec_code) {
332
 
                        case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
333
 
                        case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
334
 
                                printk(KERN_ERR
335
 
                                       "corrupted data from .ncf file\n");
336
 
                                break;
337
 
                        case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
338
 
                        case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
339
 
                        case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
340
 
                        case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
341
 
                        case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
342
 
                                printk(KERN_ERR "invalid .ncf file\n");
343
 
                                break;
344
 
                        case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
345
 
                                printk(KERN_ERR "buffer overflow\n");
346
 
                                break;
347
 
                        case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
348
 
                                printk(KERN_ERR "failed to flash the image\n");
349
 
                                break;
350
 
                        case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
351
 
                                printk(KERN_ERR
352
 
                                       "generic error. Unknown error type\n");
353
 
                                break;
354
 
                        default:
355
 
                                printk(KERN_ERR "Unknown error of type %d\n",
356
 
                                       sec_code);
357
 
                                break;
358
 
                        }
359
 
                        status = VXGE_HW_FAIL;
360
 
                        goto out;
361
 
                default:
362
 
                        printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
363
 
                        status = VXGE_HW_FAIL;
364
 
                        goto out;
365
 
                }
366
 
                /* point to next 16 bytes */
367
 
                fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
368
 
        }
369
 
out:
370
 
        return status;
371
 
}
372
 
 
373
 
enum vxge_hw_status
374
 
vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
375
 
                                struct eprom_image *img)
376
 
{
377
 
        u64 data0 = 0, data1 = 0, steer_ctrl = 0;
378
 
        struct __vxge_hw_virtualpath *vpath;
379
 
        enum vxge_hw_status status;
380
 
        int i;
381
 
 
382
 
        vpath = &hldev->virtual_paths[hldev->first_vp_id];
383
 
 
384
 
        for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
385
 
                data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
386
 
                data1 = steer_ctrl = 0;
387
 
 
388
 
                status = vxge_hw_vpath_fw_api(vpath,
389
 
                        VXGE_HW_FW_API_GET_EPROM_REV,
390
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
391
 
                        0, &data0, &data1, &steer_ctrl);
392
 
                if (status != VXGE_HW_OK)
393
 
                        break;
394
 
 
395
 
                img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
396
 
                img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
397
 
                img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
398
 
                img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
399
 
        }
400
 
 
401
 
        return status;
402
 
}
403
 
 
404
 
/*
405
 
 * __vxge_hw_channel_free - Free memory allocated for channel
406
 
 * This function deallocates memory from the channel and various arrays
407
 
 * in the channel
408
 
 */
409
 
static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
410
 
{
411
 
        kfree(channel->work_arr);
412
 
        kfree(channel->free_arr);
413
 
        kfree(channel->reserve_arr);
414
 
        kfree(channel->orig_arr);
415
 
        kfree(channel);
416
 
}
417
 
 
418
 
/*
419
 
 * __vxge_hw_channel_initialize - Initialize a channel
420
 
 * This function initializes a channel by properly setting the
421
 
 * various references
422
 
 */
423
 
static enum vxge_hw_status
424
 
__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
425
 
{
426
 
        u32 i;
427
 
        struct __vxge_hw_virtualpath *vpath;
428
 
 
429
 
        vpath = channel->vph->vpath;
430
 
 
431
 
        if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
432
 
                for (i = 0; i < channel->length; i++)
433
 
                        channel->orig_arr[i] = channel->reserve_arr[i];
434
 
        }
435
 
 
436
 
        switch (channel->type) {
437
 
        case VXGE_HW_CHANNEL_TYPE_FIFO:
438
 
                vpath->fifoh = (struct __vxge_hw_fifo *)channel;
439
 
                channel->stats = &((struct __vxge_hw_fifo *)
440
 
                                channel)->stats->common_stats;
441
 
                break;
442
 
        case VXGE_HW_CHANNEL_TYPE_RING:
443
 
                vpath->ringh = (struct __vxge_hw_ring *)channel;
444
 
                channel->stats = &((struct __vxge_hw_ring *)
445
 
                                channel)->stats->common_stats;
446
 
                break;
447
 
        default:
448
 
                break;
449
 
        }
450
 
 
451
 
        return VXGE_HW_OK;
452
 
}
453
 
 
454
 
/*
455
 
 * __vxge_hw_channel_reset - Resets a channel
456
 
 * This function resets a channel by properly setting the various references
457
 
 */
458
 
static enum vxge_hw_status
459
 
__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
460
 
{
461
 
        u32 i;
462
 
 
463
 
        for (i = 0; i < channel->length; i++) {
464
 
                if (channel->reserve_arr != NULL)
465
 
                        channel->reserve_arr[i] = channel->orig_arr[i];
466
 
                if (channel->free_arr != NULL)
467
 
                        channel->free_arr[i] = NULL;
468
 
                if (channel->work_arr != NULL)
469
 
                        channel->work_arr[i] = NULL;
470
 
        }
471
 
        channel->free_ptr = channel->length;
472
 
        channel->reserve_ptr = channel->length;
473
 
        channel->reserve_top = 0;
474
 
        channel->post_index = 0;
475
 
        channel->compl_index = 0;
476
 
 
477
 
        return VXGE_HW_OK;
478
 
}
479
 
 
480
 
/*
481
 
 * __vxge_hw_device_pci_e_init
482
 
 * Initialize certain PCI/PCI-X configuration registers
483
 
 * with recommended values. Save config space for future hw resets.
484
 
 */
485
 
static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
486
 
{
487
 
        u16 cmd = 0;
488
 
 
489
 
        /* Set the PErr Repconse bit and SERR in PCI command register. */
490
 
        pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
491
 
        cmd |= 0x140;
492
 
        pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
493
 
 
494
 
        pci_save_state(hldev->pdev);
495
 
}
496
 
 
497
 
/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
498
 
 * in progress
499
 
 * This routine checks the vpath reset in progress register is turned zero
500
 
 */
501
 
static enum vxge_hw_status
502
 
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
503
 
{
504
 
        enum vxge_hw_status status;
505
 
        status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
506
 
                        VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
507
 
                        VXGE_HW_DEF_DEVICE_POLL_MILLIS);
508
 
        return status;
509
 
}
510
 
 
511
 
/*
512
 
 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
513
 
 * Set the swapper bits appropriately for the lagacy section.
514
 
 */
515
 
static enum vxge_hw_status
516
 
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
517
 
{
518
 
        u64 val64;
519
 
        enum vxge_hw_status status = VXGE_HW_OK;
520
 
 
521
 
        val64 = readq(&legacy_reg->toc_swapper_fb);
522
 
 
523
 
        wmb();
524
 
 
525
 
        switch (val64) {
526
 
        case VXGE_HW_SWAPPER_INITIAL_VALUE:
527
 
                return status;
528
 
 
529
 
        case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
530
 
                writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
531
 
                        &legacy_reg->pifm_rd_swap_en);
532
 
                writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
533
 
                        &legacy_reg->pifm_rd_flip_en);
534
 
                writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
535
 
                        &legacy_reg->pifm_wr_swap_en);
536
 
                writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
537
 
                        &legacy_reg->pifm_wr_flip_en);
538
 
                break;
539
 
 
540
 
        case VXGE_HW_SWAPPER_BYTE_SWAPPED:
541
 
                writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
542
 
                        &legacy_reg->pifm_rd_swap_en);
543
 
                writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
544
 
                        &legacy_reg->pifm_wr_swap_en);
545
 
                break;
546
 
 
547
 
        case VXGE_HW_SWAPPER_BIT_FLIPPED:
548
 
                writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
549
 
                        &legacy_reg->pifm_rd_flip_en);
550
 
                writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
551
 
                        &legacy_reg->pifm_wr_flip_en);
552
 
                break;
553
 
        }
554
 
 
555
 
        wmb();
556
 
 
557
 
        val64 = readq(&legacy_reg->toc_swapper_fb);
558
 
 
559
 
        if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
560
 
                status = VXGE_HW_ERR_SWAPPER_CTRL;
561
 
 
562
 
        return status;
563
 
}
564
 
 
565
 
/*
566
 
 * __vxge_hw_device_toc_get
567
 
 * This routine sets the swapper and reads the toc pointer and returns the
568
 
 * memory mapped address of the toc
569
 
 */
570
 
static struct vxge_hw_toc_reg __iomem *
571
 
__vxge_hw_device_toc_get(void __iomem *bar0)
572
 
{
573
 
        u64 val64;
574
 
        struct vxge_hw_toc_reg __iomem *toc = NULL;
575
 
        enum vxge_hw_status status;
576
 
 
577
 
        struct vxge_hw_legacy_reg __iomem *legacy_reg =
578
 
                (struct vxge_hw_legacy_reg __iomem *)bar0;
579
 
 
580
 
        status = __vxge_hw_legacy_swapper_set(legacy_reg);
581
 
        if (status != VXGE_HW_OK)
582
 
                goto exit;
583
 
 
584
 
        val64 = readq(&legacy_reg->toc_first_pointer);
585
 
        toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
586
 
exit:
587
 
        return toc;
588
 
}
589
 
 
590
 
/*
591
 
 * __vxge_hw_device_reg_addr_get
592
 
 * This routine sets the swapper and reads the toc pointer and initializes the
593
 
 * register location pointers in the device object. It waits until the ric is
594
 
 * completed initializing registers.
595
 
 */
596
 
static enum vxge_hw_status
597
 
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
598
 
{
599
 
        u64 val64;
600
 
        u32 i;
601
 
        enum vxge_hw_status status = VXGE_HW_OK;
602
 
 
603
 
        hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
604
 
 
605
 
        hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
606
 
        if (hldev->toc_reg  == NULL) {
607
 
                status = VXGE_HW_FAIL;
608
 
                goto exit;
609
 
        }
610
 
 
611
 
        val64 = readq(&hldev->toc_reg->toc_common_pointer);
612
 
        hldev->common_reg =
613
 
        (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
614
 
 
615
 
        val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
616
 
        hldev->mrpcim_reg =
617
 
                (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
618
 
 
619
 
        for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
620
 
                val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
621
 
                hldev->srpcim_reg[i] =
622
 
                        (struct vxge_hw_srpcim_reg __iomem *)
623
 
                                (hldev->bar0 + val64);
624
 
        }
625
 
 
626
 
        for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
627
 
                val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
628
 
                hldev->vpmgmt_reg[i] =
629
 
                (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
630
 
        }
631
 
 
632
 
        for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
633
 
                val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
634
 
                hldev->vpath_reg[i] =
635
 
                        (struct vxge_hw_vpath_reg __iomem *)
636
 
                                (hldev->bar0 + val64);
637
 
        }
638
 
 
639
 
        val64 = readq(&hldev->toc_reg->toc_kdfc);
640
 
 
641
 
        switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
642
 
        case 0:
643
 
                hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
644
 
                        VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
645
 
                break;
646
 
        default:
647
 
                break;
648
 
        }
649
 
 
650
 
        status = __vxge_hw_device_vpath_reset_in_prog_check(
651
 
                        (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
652
 
exit:
653
 
        return status;
654
 
}
655
 
 
656
 
/*
657
 
 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
658
 
 * This routine returns the Access Rights of the driver
659
 
 */
660
 
static u32
661
 
__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
662
 
{
663
 
        u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
664
 
 
665
 
        switch (host_type) {
666
 
        case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
667
 
                if (func_id == 0) {
668
 
                        access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
669
 
                                        VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
670
 
                }
671
 
                break;
672
 
        case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
673
 
                access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
674
 
                                VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
675
 
                break;
676
 
        case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
677
 
                access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
678
 
                                VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
679
 
                break;
680
 
        case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
681
 
        case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
682
 
        case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
683
 
                break;
684
 
        case VXGE_HW_SR_VH_FUNCTION0:
685
 
        case VXGE_HW_VH_NORMAL_FUNCTION:
686
 
                access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
687
 
                break;
688
 
        }
689
 
 
690
 
        return access_rights;
691
 
}
692
 
/*
693
 
 * __vxge_hw_device_is_privilaged
694
 
 * This routine checks if the device function is privilaged or not
695
 
 */
696
 
 
697
 
enum vxge_hw_status
698
 
__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
699
 
{
700
 
        if (__vxge_hw_device_access_rights_get(host_type,
701
 
                func_id) &
702
 
                VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
703
 
                return VXGE_HW_OK;
704
 
        else
705
 
                return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
706
 
}
707
 
 
708
 
/*
709
 
 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
710
 
 * Returns the function number of the vpath.
711
 
 */
712
 
static u32
713
 
__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
714
 
{
715
 
        u64 val64;
716
 
 
717
 
        val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
718
 
 
719
 
        return
720
 
         (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
721
 
}
722
 
 
723
 
/*
724
 
 * __vxge_hw_device_host_info_get
725
 
 * This routine returns the host type assignments
726
 
 */
727
 
static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
728
 
{
729
 
        u64 val64;
730
 
        u32 i;
731
 
 
732
 
        val64 = readq(&hldev->common_reg->host_type_assignments);
733
 
 
734
 
        hldev->host_type =
735
 
           (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
736
 
 
737
 
        hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
738
 
 
739
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
740
 
                if (!(hldev->vpath_assignments & vxge_mBIT(i)))
741
 
                        continue;
742
 
 
743
 
                hldev->func_id =
744
 
                        __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
745
 
 
746
 
                hldev->access_rights = __vxge_hw_device_access_rights_get(
747
 
                        hldev->host_type, hldev->func_id);
748
 
 
749
 
                hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
750
 
                hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
751
 
 
752
 
                hldev->first_vp_id = i;
753
 
                break;
754
 
        }
755
 
}
756
 
 
757
 
/*
758
 
 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
759
 
 * link width and signalling rate.
760
 
 */
761
 
static enum vxge_hw_status
762
 
__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
763
 
{
764
 
        int exp_cap;
765
 
        u16 lnk;
766
 
 
767
 
        /* Get the negotiated link width and speed from PCI config space */
768
 
        exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
769
 
        pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
770
 
 
771
 
        if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
772
 
                return VXGE_HW_ERR_INVALID_PCI_INFO;
773
 
 
774
 
        switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
775
 
        case PCIE_LNK_WIDTH_RESRV:
776
 
        case PCIE_LNK_X1:
777
 
        case PCIE_LNK_X2:
778
 
        case PCIE_LNK_X4:
779
 
        case PCIE_LNK_X8:
780
 
                break;
781
 
        default:
782
 
                return VXGE_HW_ERR_INVALID_PCI_INFO;
783
 
        }
784
 
 
785
 
        return VXGE_HW_OK;
786
 
}
787
 
 
788
 
/*
789
 
 * __vxge_hw_device_initialize
790
 
 * Initialize Titan-V hardware.
791
 
 */
792
 
static enum vxge_hw_status
793
 
__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
794
 
{
795
 
        enum vxge_hw_status status = VXGE_HW_OK;
796
 
 
797
 
        if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
798
 
                                hldev->func_id)) {
799
 
                /* Validate the pci-e link width and speed */
800
 
                status = __vxge_hw_verify_pci_e_info(hldev);
801
 
                if (status != VXGE_HW_OK)
802
 
                        goto exit;
803
 
        }
804
 
 
805
 
exit:
806
 
        return status;
807
 
}
808
 
 
809
 
/*
810
 
 * __vxge_hw_vpath_fw_ver_get - Get the fw version
811
 
 * Returns FW Version
812
 
 */
813
 
static enum vxge_hw_status
814
 
__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
815
 
                           struct vxge_hw_device_hw_info *hw_info)
816
 
{
817
 
        struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
818
 
        struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
819
 
        struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
820
 
        struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
821
 
        u64 data0, data1 = 0, steer_ctrl = 0;
822
 
        enum vxge_hw_status status;
823
 
 
824
 
        status = vxge_hw_vpath_fw_api(vpath,
825
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
826
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
827
 
                        0, &data0, &data1, &steer_ctrl);
828
 
        if (status != VXGE_HW_OK)
829
 
                goto exit;
830
 
 
831
 
        fw_date->day =
832
 
            (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
833
 
        fw_date->month =
834
 
            (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
835
 
        fw_date->year =
836
 
            (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
837
 
 
838
 
        snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
839
 
                 fw_date->month, fw_date->day, fw_date->year);
840
 
 
841
 
        fw_version->major =
842
 
            (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
843
 
        fw_version->minor =
844
 
            (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
845
 
        fw_version->build =
846
 
            (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
847
 
 
848
 
        snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
849
 
                 fw_version->major, fw_version->minor, fw_version->build);
850
 
 
851
 
        flash_date->day =
852
 
            (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
853
 
        flash_date->month =
854
 
            (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
855
 
        flash_date->year =
856
 
            (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
857
 
 
858
 
        snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
859
 
                 flash_date->month, flash_date->day, flash_date->year);
860
 
 
861
 
        flash_version->major =
862
 
            (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
863
 
        flash_version->minor =
864
 
            (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
865
 
        flash_version->build =
866
 
            (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
867
 
 
868
 
        snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
869
 
                 flash_version->major, flash_version->minor,
870
 
                 flash_version->build);
871
 
 
872
 
exit:
873
 
        return status;
874
 
}
875
 
 
876
 
/*
877
 
 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
878
 
 * part number and product description.
879
 
 */
880
 
static enum vxge_hw_status
881
 
__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
882
 
                              struct vxge_hw_device_hw_info *hw_info)
883
 
{
884
 
        enum vxge_hw_status status;
885
 
        u64 data0, data1 = 0, steer_ctrl = 0;
886
 
        u8 *serial_number = hw_info->serial_number;
887
 
        u8 *part_number = hw_info->part_number;
888
 
        u8 *product_desc = hw_info->product_desc;
889
 
        u32 i, j = 0;
890
 
 
891
 
        data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
892
 
 
893
 
        status = vxge_hw_vpath_fw_api(vpath,
894
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
895
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
896
 
                        0, &data0, &data1, &steer_ctrl);
897
 
        if (status != VXGE_HW_OK)
898
 
                return status;
899
 
 
900
 
        ((u64 *)serial_number)[0] = be64_to_cpu(data0);
901
 
        ((u64 *)serial_number)[1] = be64_to_cpu(data1);
902
 
 
903
 
        data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
904
 
        data1 = steer_ctrl = 0;
905
 
 
906
 
        status = vxge_hw_vpath_fw_api(vpath,
907
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
908
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
909
 
                        0, &data0, &data1, &steer_ctrl);
910
 
        if (status != VXGE_HW_OK)
911
 
                return status;
912
 
 
913
 
        ((u64 *)part_number)[0] = be64_to_cpu(data0);
914
 
        ((u64 *)part_number)[1] = be64_to_cpu(data1);
915
 
 
916
 
        for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
917
 
             i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
918
 
                data0 = i;
919
 
                data1 = steer_ctrl = 0;
920
 
 
921
 
                status = vxge_hw_vpath_fw_api(vpath,
922
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
923
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
924
 
                        0, &data0, &data1, &steer_ctrl);
925
 
                if (status != VXGE_HW_OK)
926
 
                        return status;
927
 
 
928
 
                ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
929
 
                ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
930
 
        }
931
 
 
932
 
        return status;
933
 
}
934
 
 
935
 
/*
936
 
 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
937
 
 * Returns pci function mode
938
 
 */
939
 
static enum vxge_hw_status
940
 
__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
941
 
                                  struct vxge_hw_device_hw_info *hw_info)
942
 
{
943
 
        u64 data0, data1 = 0, steer_ctrl = 0;
944
 
        enum vxge_hw_status status;
945
 
 
946
 
        data0 = 0;
947
 
 
948
 
        status = vxge_hw_vpath_fw_api(vpath,
949
 
                        VXGE_HW_FW_API_GET_FUNC_MODE,
950
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
951
 
                        0, &data0, &data1, &steer_ctrl);
952
 
        if (status != VXGE_HW_OK)
953
 
                return status;
954
 
 
955
 
        hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
956
 
        return status;
957
 
}
958
 
 
959
 
/*
960
 
 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
961
 
 *               from MAC address table.
962
 
 */
963
 
static enum vxge_hw_status
964
 
__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
965
 
                         u8 *macaddr, u8 *macaddr_mask)
966
 
{
967
 
        u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
968
 
            data0 = 0, data1 = 0, steer_ctrl = 0;
969
 
        enum vxge_hw_status status;
970
 
        int i;
971
 
 
972
 
        do {
973
 
                status = vxge_hw_vpath_fw_api(vpath, action,
974
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
975
 
                        0, &data0, &data1, &steer_ctrl);
976
 
                if (status != VXGE_HW_OK)
977
 
                        goto exit;
978
 
 
979
 
                data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
980
 
                data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
981
 
                                                                        data1);
982
 
 
983
 
                for (i = ETH_ALEN; i > 0; i--) {
984
 
                        macaddr[i - 1] = (u8) (data0 & 0xFF);
985
 
                        data0 >>= 8;
986
 
 
987
 
                        macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
988
 
                        data1 >>= 8;
989
 
                }
990
 
 
991
 
                action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
992
 
                data0 = 0, data1 = 0, steer_ctrl = 0;
993
 
 
994
 
        } while (!is_valid_ether_addr(macaddr));
995
 
exit:
996
 
        return status;
997
 
}
998
 
 
999
 
/**
1000
 
 * vxge_hw_device_hw_info_get - Get the hw information
1001
 
 * Returns the vpath mask that has the bits set for each vpath allocated
1002
 
 * for the driver, FW version information, and the first mac address for
1003
 
 * each vpath
1004
 
 */
1005
 
enum vxge_hw_status __devinit
1006
 
vxge_hw_device_hw_info_get(void __iomem *bar0,
1007
 
                           struct vxge_hw_device_hw_info *hw_info)
1008
 
{
1009
 
        u32 i;
1010
 
        u64 val64;
1011
 
        struct vxge_hw_toc_reg __iomem *toc;
1012
 
        struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
1013
 
        struct vxge_hw_common_reg __iomem *common_reg;
1014
 
        struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
1015
 
        enum vxge_hw_status status;
1016
 
        struct __vxge_hw_virtualpath vpath;
1017
 
 
1018
 
        memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
1019
 
 
1020
 
        toc = __vxge_hw_device_toc_get(bar0);
1021
 
        if (toc == NULL) {
1022
 
                status = VXGE_HW_ERR_CRITICAL;
1023
 
                goto exit;
1024
 
        }
1025
 
 
1026
 
        val64 = readq(&toc->toc_common_pointer);
1027
 
        common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
1028
 
 
1029
 
        status = __vxge_hw_device_vpath_reset_in_prog_check(
1030
 
                (u64 __iomem *)&common_reg->vpath_rst_in_prog);
1031
 
        if (status != VXGE_HW_OK)
1032
 
                goto exit;
1033
 
 
1034
 
        hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
1035
 
 
1036
 
        val64 = readq(&common_reg->host_type_assignments);
1037
 
 
1038
 
        hw_info->host_type =
1039
 
           (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
1040
 
 
1041
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1042
 
                if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1043
 
                        continue;
1044
 
 
1045
 
                val64 = readq(&toc->toc_vpmgmt_pointer[i]);
1046
 
 
1047
 
                vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
1048
 
                                (bar0 + val64);
1049
 
 
1050
 
                hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
1051
 
                if (__vxge_hw_device_access_rights_get(hw_info->host_type,
1052
 
                        hw_info->func_id) &
1053
 
                        VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
1054
 
 
1055
 
                        val64 = readq(&toc->toc_mrpcim_pointer);
1056
 
 
1057
 
                        mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
1058
 
                                        (bar0 + val64);
1059
 
 
1060
 
                        writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
1061
 
                        wmb();
1062
 
                }
1063
 
 
1064
 
                val64 = readq(&toc->toc_vpath_pointer[i]);
1065
 
 
1066
 
                spin_lock_init(&vpath.lock);
1067
 
                vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1068
 
                               (bar0 + val64);
1069
 
                vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1070
 
 
1071
 
                status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
1072
 
                if (status != VXGE_HW_OK)
1073
 
                        goto exit;
1074
 
 
1075
 
                status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
1076
 
                if (status != VXGE_HW_OK)
1077
 
                        goto exit;
1078
 
 
1079
 
                status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
1080
 
                if (status != VXGE_HW_OK)
1081
 
                        goto exit;
1082
 
 
1083
 
                break;
1084
 
        }
1085
 
 
1086
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1087
 
                if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1088
 
                        continue;
1089
 
 
1090
 
                val64 = readq(&toc->toc_vpath_pointer[i]);
1091
 
                vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1092
 
                               (bar0 + val64);
1093
 
                vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1094
 
 
1095
 
                status =  __vxge_hw_vpath_addr_get(&vpath,
1096
 
                                hw_info->mac_addrs[i],
1097
 
                                hw_info->mac_addr_masks[i]);
1098
 
                if (status != VXGE_HW_OK)
1099
 
                        goto exit;
1100
 
        }
1101
 
exit:
1102
 
        return status;
1103
 
}
1104
 
 
1105
 
/*
1106
 
 * __vxge_hw_blockpool_destroy - Deallocates the block pool
1107
 
 */
1108
 
static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
1109
 
{
1110
 
        struct __vxge_hw_device *hldev;
1111
 
        struct list_head *p, *n;
1112
 
        u16 ret;
1113
 
 
1114
 
        if (blockpool == NULL) {
1115
 
                ret = 1;
1116
 
                goto exit;
1117
 
        }
1118
 
 
1119
 
        hldev = blockpool->hldev;
1120
 
 
1121
 
        list_for_each_safe(p, n, &blockpool->free_block_list) {
1122
 
                pci_unmap_single(hldev->pdev,
1123
 
                        ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
1124
 
                        ((struct __vxge_hw_blockpool_entry *)p)->length,
1125
 
                        PCI_DMA_BIDIRECTIONAL);
1126
 
 
1127
 
                vxge_os_dma_free(hldev->pdev,
1128
 
                        ((struct __vxge_hw_blockpool_entry *)p)->memblock,
1129
 
                        &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
1130
 
 
1131
 
                list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1132
 
                kfree(p);
1133
 
                blockpool->pool_size--;
1134
 
        }
1135
 
 
1136
 
        list_for_each_safe(p, n, &blockpool->free_entry_list) {
1137
 
                list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1138
 
                kfree((void *)p);
1139
 
        }
1140
 
        ret = 0;
1141
 
exit:
1142
 
        return;
1143
 
}
1144
 
 
1145
 
/*
1146
 
 * __vxge_hw_blockpool_create - Create block pool
1147
 
 */
1148
 
static enum vxge_hw_status
1149
 
__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1150
 
                           struct __vxge_hw_blockpool *blockpool,
1151
 
                           u32 pool_size,
1152
 
                           u32 pool_max)
1153
 
{
1154
 
        u32 i;
1155
 
        struct __vxge_hw_blockpool_entry *entry = NULL;
1156
 
        void *memblock;
1157
 
        dma_addr_t dma_addr;
1158
 
        struct pci_dev *dma_handle;
1159
 
        struct pci_dev *acc_handle;
1160
 
        enum vxge_hw_status status = VXGE_HW_OK;
1161
 
 
1162
 
        if (blockpool == NULL) {
1163
 
                status = VXGE_HW_FAIL;
1164
 
                goto blockpool_create_exit;
1165
 
        }
1166
 
 
1167
 
        blockpool->hldev = hldev;
1168
 
        blockpool->block_size = VXGE_HW_BLOCK_SIZE;
1169
 
        blockpool->pool_size = 0;
1170
 
        blockpool->pool_max = pool_max;
1171
 
        blockpool->req_out = 0;
1172
 
 
1173
 
        INIT_LIST_HEAD(&blockpool->free_block_list);
1174
 
        INIT_LIST_HEAD(&blockpool->free_entry_list);
1175
 
 
1176
 
        for (i = 0; i < pool_size + pool_max; i++) {
1177
 
                entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1178
 
                                GFP_KERNEL);
1179
 
                if (entry == NULL) {
1180
 
                        __vxge_hw_blockpool_destroy(blockpool);
1181
 
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
1182
 
                        goto blockpool_create_exit;
1183
 
                }
1184
 
                list_add(&entry->item, &blockpool->free_entry_list);
1185
 
        }
1186
 
 
1187
 
        for (i = 0; i < pool_size; i++) {
1188
 
                memblock = vxge_os_dma_malloc(
1189
 
                                hldev->pdev,
1190
 
                                VXGE_HW_BLOCK_SIZE,
1191
 
                                &dma_handle,
1192
 
                                &acc_handle);
1193
 
                if (memblock == NULL) {
1194
 
                        __vxge_hw_blockpool_destroy(blockpool);
1195
 
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
1196
 
                        goto blockpool_create_exit;
1197
 
                }
1198
 
 
1199
 
                dma_addr = pci_map_single(hldev->pdev, memblock,
1200
 
                                VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
1201
 
                if (unlikely(pci_dma_mapping_error(hldev->pdev,
1202
 
                                dma_addr))) {
1203
 
                        vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
1204
 
                        __vxge_hw_blockpool_destroy(blockpool);
1205
 
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
1206
 
                        goto blockpool_create_exit;
1207
 
                }
1208
 
 
1209
 
                if (!list_empty(&blockpool->free_entry_list))
1210
 
                        entry = (struct __vxge_hw_blockpool_entry *)
1211
 
                                list_first_entry(&blockpool->free_entry_list,
1212
 
                                        struct __vxge_hw_blockpool_entry,
1213
 
                                        item);
1214
 
 
1215
 
                if (entry == NULL)
1216
 
                        entry =
1217
 
                            kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1218
 
                                        GFP_KERNEL);
1219
 
                if (entry != NULL) {
1220
 
                        list_del(&entry->item);
1221
 
                        entry->length = VXGE_HW_BLOCK_SIZE;
1222
 
                        entry->memblock = memblock;
1223
 
                        entry->dma_addr = dma_addr;
1224
 
                        entry->acc_handle = acc_handle;
1225
 
                        entry->dma_handle = dma_handle;
1226
 
                        list_add(&entry->item,
1227
 
                                          &blockpool->free_block_list);
1228
 
                        blockpool->pool_size++;
1229
 
                } else {
1230
 
                        __vxge_hw_blockpool_destroy(blockpool);
1231
 
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
1232
 
                        goto blockpool_create_exit;
1233
 
                }
1234
 
        }
1235
 
 
1236
 
blockpool_create_exit:
1237
 
        return status;
1238
 
}
1239
 
 
1240
 
/*
1241
 
 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1242
 
 * Check the fifo configuration
1243
 
 */
1244
 
static enum vxge_hw_status
1245
 
__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1246
 
{
1247
 
        if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1248
 
            (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1249
 
                return VXGE_HW_BADCFG_FIFO_BLOCKS;
1250
 
 
1251
 
        return VXGE_HW_OK;
1252
 
}
1253
 
 
1254
 
/*
1255
 
 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1256
 
 * Check the vpath configuration
1257
 
 */
1258
 
static enum vxge_hw_status
1259
 
__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1260
 
{
1261
 
        enum vxge_hw_status status;
1262
 
 
1263
 
        if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1264
 
            (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
1265
 
                return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1266
 
 
1267
 
        status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1268
 
        if (status != VXGE_HW_OK)
1269
 
                return status;
1270
 
 
1271
 
        if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1272
 
                ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1273
 
                (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1274
 
                return VXGE_HW_BADCFG_VPATH_MTU;
1275
 
 
1276
 
        if ((vp_config->rpa_strip_vlan_tag !=
1277
 
                VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1278
 
                (vp_config->rpa_strip_vlan_tag !=
1279
 
                VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1280
 
                (vp_config->rpa_strip_vlan_tag !=
1281
 
                VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1282
 
                return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1283
 
 
1284
 
        return VXGE_HW_OK;
1285
 
}
1286
 
 
1287
 
/*
1288
 
 * __vxge_hw_device_config_check - Check device configuration.
1289
 
 * Check the device configuration
1290
 
 */
1291
 
static enum vxge_hw_status
1292
 
__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1293
 
{
1294
 
        u32 i;
1295
 
        enum vxge_hw_status status;
1296
 
 
1297
 
        if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1298
 
            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1299
 
            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1300
 
            (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1301
 
                return VXGE_HW_BADCFG_INTR_MODE;
1302
 
 
1303
 
        if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1304
 
            (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1305
 
                return VXGE_HW_BADCFG_RTS_MAC_EN;
1306
 
 
1307
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1308
 
                status = __vxge_hw_device_vpath_config_check(
1309
 
                                &new_config->vp_config[i]);
1310
 
                if (status != VXGE_HW_OK)
1311
 
                        return status;
1312
 
        }
1313
 
 
1314
 
        return VXGE_HW_OK;
1315
 
}
1316
 
 
1317
 
/*
1318
 
 * vxge_hw_device_initialize - Initialize Titan device.
1319
 
 * Initialize Titan device. Note that all the arguments of this public API
1320
 
 * are 'IN', including @hldev. Driver cooperates with
1321
 
 * OS to find new Titan device, locate its PCI and memory spaces.
1322
 
 *
1323
 
 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
1324
 
 * to enable the latter to perform Titan hardware initialization.
1325
 
 */
1326
 
enum vxge_hw_status __devinit
1327
 
vxge_hw_device_initialize(
1328
 
        struct __vxge_hw_device **devh,
1329
 
        struct vxge_hw_device_attr *attr,
1330
 
        struct vxge_hw_device_config *device_config)
1331
 
{
1332
 
        u32 i;
1333
 
        u32 nblocks = 0;
1334
 
        struct __vxge_hw_device *hldev = NULL;
1335
 
        enum vxge_hw_status status = VXGE_HW_OK;
1336
 
 
1337
 
        status = __vxge_hw_device_config_check(device_config);
1338
 
        if (status != VXGE_HW_OK)
1339
 
                goto exit;
1340
 
 
1341
 
        hldev = vzalloc(sizeof(struct __vxge_hw_device));
1342
 
        if (hldev == NULL) {
1343
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
1344
 
                goto exit;
1345
 
        }
1346
 
 
1347
 
        hldev->magic = VXGE_HW_DEVICE_MAGIC;
1348
 
 
1349
 
        vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
1350
 
 
1351
 
        /* apply config */
1352
 
        memcpy(&hldev->config, device_config,
1353
 
                sizeof(struct vxge_hw_device_config));
1354
 
 
1355
 
        hldev->bar0 = attr->bar0;
1356
 
        hldev->pdev = attr->pdev;
1357
 
 
1358
 
        hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
1359
 
        hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
1360
 
        hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
1361
 
 
1362
 
        __vxge_hw_device_pci_e_init(hldev);
1363
 
 
1364
 
        status = __vxge_hw_device_reg_addr_get(hldev);
1365
 
        if (status != VXGE_HW_OK) {
1366
 
                vfree(hldev);
1367
 
                goto exit;
1368
 
        }
1369
 
 
1370
 
        __vxge_hw_device_host_info_get(hldev);
1371
 
 
1372
 
        /* Incrementing for stats blocks */
1373
 
        nblocks++;
1374
 
 
1375
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1376
 
                if (!(hldev->vpath_assignments & vxge_mBIT(i)))
1377
 
                        continue;
1378
 
 
1379
 
                if (device_config->vp_config[i].ring.enable ==
1380
 
                        VXGE_HW_RING_ENABLE)
1381
 
                        nblocks += device_config->vp_config[i].ring.ring_blocks;
1382
 
 
1383
 
                if (device_config->vp_config[i].fifo.enable ==
1384
 
                        VXGE_HW_FIFO_ENABLE)
1385
 
                        nblocks += device_config->vp_config[i].fifo.fifo_blocks;
1386
 
                nblocks++;
1387
 
        }
1388
 
 
1389
 
        if (__vxge_hw_blockpool_create(hldev,
1390
 
                &hldev->block_pool,
1391
 
                device_config->dma_blockpool_initial + nblocks,
1392
 
                device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
1393
 
 
1394
 
                vxge_hw_device_terminate(hldev);
1395
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
1396
 
                goto exit;
1397
 
        }
1398
 
 
1399
 
        status = __vxge_hw_device_initialize(hldev);
1400
 
        if (status != VXGE_HW_OK) {
1401
 
                vxge_hw_device_terminate(hldev);
1402
 
                goto exit;
1403
 
        }
1404
 
 
1405
 
        *devh = hldev;
1406
 
exit:
1407
 
        return status;
1408
 
}
1409
 
 
1410
 
/*
1411
 
 * vxge_hw_device_terminate - Terminate Titan device.
1412
 
 * Terminate HW device.
1413
 
 */
1414
 
void
1415
 
vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
1416
 
{
1417
 
        vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
1418
 
 
1419
 
        hldev->magic = VXGE_HW_DEVICE_DEAD;
1420
 
        __vxge_hw_blockpool_destroy(&hldev->block_pool);
1421
 
        vfree(hldev);
1422
 
}
1423
 
 
1424
 
/*
1425
 
 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
1426
 
 *                           and offset and perform an operation
1427
 
 */
1428
 
static enum vxge_hw_status
1429
 
__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
1430
 
                             u32 operation, u32 offset, u64 *stat)
1431
 
{
1432
 
        u64 val64;
1433
 
        enum vxge_hw_status status = VXGE_HW_OK;
1434
 
        struct vxge_hw_vpath_reg __iomem *vp_reg;
1435
 
 
1436
 
        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1437
 
                status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1438
 
                goto vpath_stats_access_exit;
1439
 
        }
1440
 
 
1441
 
        vp_reg = vpath->vp_reg;
1442
 
 
1443
 
        val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
1444
 
                 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
1445
 
                 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
1446
 
 
1447
 
        status = __vxge_hw_pio_mem_write64(val64,
1448
 
                                &vp_reg->xmac_stats_access_cmd,
1449
 
                                VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
1450
 
                                vpath->hldev->config.device_poll_millis);
1451
 
        if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1452
 
                *stat = readq(&vp_reg->xmac_stats_access_data);
1453
 
        else
1454
 
                *stat = 0;
1455
 
 
1456
 
vpath_stats_access_exit:
1457
 
        return status;
1458
 
}
1459
 
 
1460
 
/*
1461
 
 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
1462
 
 */
1463
 
static enum vxge_hw_status
1464
 
__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
1465
 
                        struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
1466
 
{
1467
 
        u64 *val64;
1468
 
        int i;
1469
 
        u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
1470
 
        enum vxge_hw_status status = VXGE_HW_OK;
1471
 
 
1472
 
        val64 = (u64 *)vpath_tx_stats;
1473
 
 
1474
 
        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1475
 
                status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1476
 
                goto exit;
1477
 
        }
1478
 
 
1479
 
        for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
1480
 
                status = __vxge_hw_vpath_stats_access(vpath,
1481
 
                                        VXGE_HW_STATS_OP_READ,
1482
 
                                        offset, val64);
1483
 
                if (status != VXGE_HW_OK)
1484
 
                        goto exit;
1485
 
                offset++;
1486
 
                val64++;
1487
 
        }
1488
 
exit:
1489
 
        return status;
1490
 
}
1491
 
 
1492
 
/*
1493
 
 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
1494
 
 */
1495
 
static enum vxge_hw_status
1496
 
__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
1497
 
                        struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
1498
 
{
1499
 
        u64 *val64;
1500
 
        enum vxge_hw_status status = VXGE_HW_OK;
1501
 
        int i;
1502
 
        u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
1503
 
        val64 = (u64 *) vpath_rx_stats;
1504
 
 
1505
 
        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1506
 
                status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1507
 
                goto exit;
1508
 
        }
1509
 
        for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
1510
 
                status = __vxge_hw_vpath_stats_access(vpath,
1511
 
                                        VXGE_HW_STATS_OP_READ,
1512
 
                                        offset >> 3, val64);
1513
 
                if (status != VXGE_HW_OK)
1514
 
                        goto exit;
1515
 
 
1516
 
                offset += 8;
1517
 
                val64++;
1518
 
        }
1519
 
exit:
1520
 
        return status;
1521
 
}
1522
 
 
1523
 
/*
1524
 
 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
1525
 
 */
1526
 
static enum vxge_hw_status
1527
 
__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
1528
 
                          struct vxge_hw_vpath_stats_hw_info *hw_stats)
1529
 
{
1530
 
        u64 val64;
1531
 
        enum vxge_hw_status status = VXGE_HW_OK;
1532
 
        struct vxge_hw_vpath_reg __iomem *vp_reg;
1533
 
 
1534
 
        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1535
 
                status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1536
 
                goto exit;
1537
 
        }
1538
 
        vp_reg = vpath->vp_reg;
1539
 
 
1540
 
        val64 = readq(&vp_reg->vpath_debug_stats0);
1541
 
        hw_stats->ini_num_mwr_sent =
1542
 
                (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
1543
 
 
1544
 
        val64 = readq(&vp_reg->vpath_debug_stats1);
1545
 
        hw_stats->ini_num_mrd_sent =
1546
 
                (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
1547
 
 
1548
 
        val64 = readq(&vp_reg->vpath_debug_stats2);
1549
 
        hw_stats->ini_num_cpl_rcvd =
1550
 
                (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
1551
 
 
1552
 
        val64 = readq(&vp_reg->vpath_debug_stats3);
1553
 
        hw_stats->ini_num_mwr_byte_sent =
1554
 
                VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
1555
 
 
1556
 
        val64 = readq(&vp_reg->vpath_debug_stats4);
1557
 
        hw_stats->ini_num_cpl_byte_rcvd =
1558
 
                VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
1559
 
 
1560
 
        val64 = readq(&vp_reg->vpath_debug_stats5);
1561
 
        hw_stats->wrcrdtarb_xoff =
1562
 
                (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
1563
 
 
1564
 
        val64 = readq(&vp_reg->vpath_debug_stats6);
1565
 
        hw_stats->rdcrdtarb_xoff =
1566
 
                (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
1567
 
 
1568
 
        val64 = readq(&vp_reg->vpath_genstats_count01);
1569
 
        hw_stats->vpath_genstats_count0 =
1570
 
        (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
1571
 
                val64);
1572
 
 
1573
 
        val64 = readq(&vp_reg->vpath_genstats_count01);
1574
 
        hw_stats->vpath_genstats_count1 =
1575
 
        (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
1576
 
                val64);
1577
 
 
1578
 
        val64 = readq(&vp_reg->vpath_genstats_count23);
1579
 
        hw_stats->vpath_genstats_count2 =
1580
 
        (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
1581
 
                val64);
1582
 
 
1583
 
        val64 = readq(&vp_reg->vpath_genstats_count01);
1584
 
        hw_stats->vpath_genstats_count3 =
1585
 
        (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
1586
 
                val64);
1587
 
 
1588
 
        val64 = readq(&vp_reg->vpath_genstats_count4);
1589
 
        hw_stats->vpath_genstats_count4 =
1590
 
        (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
1591
 
                val64);
1592
 
 
1593
 
        val64 = readq(&vp_reg->vpath_genstats_count5);
1594
 
        hw_stats->vpath_genstats_count5 =
1595
 
        (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
1596
 
                val64);
1597
 
 
1598
 
        status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
1599
 
        if (status != VXGE_HW_OK)
1600
 
                goto exit;
1601
 
 
1602
 
        status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
1603
 
        if (status != VXGE_HW_OK)
1604
 
                goto exit;
1605
 
 
1606
 
        VXGE_HW_VPATH_STATS_PIO_READ(
1607
 
                VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
1608
 
 
1609
 
        hw_stats->prog_event_vnum0 =
1610
 
                        (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
1611
 
 
1612
 
        hw_stats->prog_event_vnum1 =
1613
 
                        (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
1614
 
 
1615
 
        VXGE_HW_VPATH_STATS_PIO_READ(
1616
 
                VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
1617
 
 
1618
 
        hw_stats->prog_event_vnum2 =
1619
 
                        (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
1620
 
 
1621
 
        hw_stats->prog_event_vnum3 =
1622
 
                        (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
1623
 
 
1624
 
        val64 = readq(&vp_reg->rx_multi_cast_stats);
1625
 
        hw_stats->rx_multi_cast_frame_discard =
1626
 
                (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
1627
 
 
1628
 
        val64 = readq(&vp_reg->rx_frm_transferred);
1629
 
        hw_stats->rx_frm_transferred =
1630
 
                (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
1631
 
 
1632
 
        val64 = readq(&vp_reg->rxd_returned);
1633
 
        hw_stats->rxd_returned =
1634
 
                (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
1635
 
 
1636
 
        val64 = readq(&vp_reg->dbg_stats_rx_mpa);
1637
 
        hw_stats->rx_mpa_len_fail_frms =
1638
 
                (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
1639
 
        hw_stats->rx_mpa_mrk_fail_frms =
1640
 
                (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
1641
 
        hw_stats->rx_mpa_crc_fail_frms =
1642
 
                (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
1643
 
 
1644
 
        val64 = readq(&vp_reg->dbg_stats_rx_fau);
1645
 
        hw_stats->rx_permitted_frms =
1646
 
                (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
1647
 
        hw_stats->rx_vp_reset_discarded_frms =
1648
 
        (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
1649
 
        hw_stats->rx_wol_frms =
1650
 
                (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
1651
 
 
1652
 
        val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
1653
 
        hw_stats->tx_vp_reset_discarded_frms =
1654
 
        (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
1655
 
                val64);
1656
 
exit:
1657
 
        return status;
1658
 
}
1659
 
 
1660
 
/*
1661
 
 * vxge_hw_device_stats_get - Get the device hw statistics.
1662
 
 * Returns the vpath h/w stats for the device.
1663
 
 */
1664
 
enum vxge_hw_status
1665
 
vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
1666
 
                        struct vxge_hw_device_stats_hw_info *hw_stats)
1667
 
{
1668
 
        u32 i;
1669
 
        enum vxge_hw_status status = VXGE_HW_OK;
1670
 
 
1671
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1672
 
                if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
1673
 
                        (hldev->virtual_paths[i].vp_open ==
1674
 
                                VXGE_HW_VP_NOT_OPEN))
1675
 
                        continue;
1676
 
 
1677
 
                memcpy(hldev->virtual_paths[i].hw_stats_sav,
1678
 
                                hldev->virtual_paths[i].hw_stats,
1679
 
                                sizeof(struct vxge_hw_vpath_stats_hw_info));
1680
 
 
1681
 
                status = __vxge_hw_vpath_stats_get(
1682
 
                        &hldev->virtual_paths[i],
1683
 
                        hldev->virtual_paths[i].hw_stats);
1684
 
        }
1685
 
 
1686
 
        memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
1687
 
                        sizeof(struct vxge_hw_device_stats_hw_info));
1688
 
 
1689
 
        return status;
1690
 
}
1691
 
 
1692
 
/*
1693
 
 * vxge_hw_driver_stats_get - Get the device sw statistics.
1694
 
 * Returns the vpath s/w stats for the device.
1695
 
 */
1696
 
enum vxge_hw_status vxge_hw_driver_stats_get(
1697
 
                        struct __vxge_hw_device *hldev,
1698
 
                        struct vxge_hw_device_stats_sw_info *sw_stats)
1699
 
{
1700
 
        enum vxge_hw_status status = VXGE_HW_OK;
1701
 
 
1702
 
        memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
1703
 
                sizeof(struct vxge_hw_device_stats_sw_info));
1704
 
 
1705
 
        return status;
1706
 
}
1707
 
 
1708
 
/*
1709
 
 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
1710
 
 *                           and offset and perform an operation
1711
 
 * Get the statistics from the given location and offset.
1712
 
 */
1713
 
enum vxge_hw_status
1714
 
vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
1715
 
                            u32 operation, u32 location, u32 offset, u64 *stat)
1716
 
{
1717
 
        u64 val64;
1718
 
        enum vxge_hw_status status = VXGE_HW_OK;
1719
 
 
1720
 
        status = __vxge_hw_device_is_privilaged(hldev->host_type,
1721
 
                        hldev->func_id);
1722
 
        if (status != VXGE_HW_OK)
1723
 
                goto exit;
1724
 
 
1725
 
        val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
1726
 
                VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
1727
 
                VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
1728
 
                VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
1729
 
 
1730
 
        status = __vxge_hw_pio_mem_write64(val64,
1731
 
                                &hldev->mrpcim_reg->xmac_stats_sys_cmd,
1732
 
                                VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
1733
 
                                hldev->config.device_poll_millis);
1734
 
 
1735
 
        if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1736
 
                *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
1737
 
        else
1738
 
                *stat = 0;
1739
 
exit:
1740
 
        return status;
1741
 
}
1742
 
 
1743
 
/*
1744
 
 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
1745
 
 * Get the Statistics on aggregate port
1746
 
 */
1747
 
static enum vxge_hw_status
1748
 
vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
1749
 
                                   struct vxge_hw_xmac_aggr_stats *aggr_stats)
1750
 
{
1751
 
        u64 *val64;
1752
 
        int i;
1753
 
        u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
1754
 
        enum vxge_hw_status status = VXGE_HW_OK;
1755
 
 
1756
 
        val64 = (u64 *)aggr_stats;
1757
 
 
1758
 
        status = __vxge_hw_device_is_privilaged(hldev->host_type,
1759
 
                        hldev->func_id);
1760
 
        if (status != VXGE_HW_OK)
1761
 
                goto exit;
1762
 
 
1763
 
        for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
1764
 
                status = vxge_hw_mrpcim_stats_access(hldev,
1765
 
                                        VXGE_HW_STATS_OP_READ,
1766
 
                                        VXGE_HW_STATS_LOC_AGGR,
1767
 
                                        ((offset + (104 * port)) >> 3), val64);
1768
 
                if (status != VXGE_HW_OK)
1769
 
                        goto exit;
1770
 
 
1771
 
                offset += 8;
1772
 
                val64++;
1773
 
        }
1774
 
exit:
1775
 
        return status;
1776
 
}
1777
 
 
1778
 
/*
1779
 
 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1780
 
 * Get the Statistics on port
1781
 
 */
1782
 
static enum vxge_hw_status
1783
 
vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1784
 
                                   struct vxge_hw_xmac_port_stats *port_stats)
1785
 
{
1786
 
        u64 *val64;
1787
 
        enum vxge_hw_status status = VXGE_HW_OK;
1788
 
        int i;
1789
 
        u32 offset = 0x0;
1790
 
        val64 = (u64 *) port_stats;
1791
 
 
1792
 
        status = __vxge_hw_device_is_privilaged(hldev->host_type,
1793
 
                        hldev->func_id);
1794
 
        if (status != VXGE_HW_OK)
1795
 
                goto exit;
1796
 
 
1797
 
        for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1798
 
                status = vxge_hw_mrpcim_stats_access(hldev,
1799
 
                                        VXGE_HW_STATS_OP_READ,
1800
 
                                        VXGE_HW_STATS_LOC_AGGR,
1801
 
                                        ((offset + (608 * port)) >> 3), val64);
1802
 
                if (status != VXGE_HW_OK)
1803
 
                        goto exit;
1804
 
 
1805
 
                offset += 8;
1806
 
                val64++;
1807
 
        }
1808
 
 
1809
 
exit:
1810
 
        return status;
1811
 
}
1812
 
 
1813
 
/*
1814
 
 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1815
 
 * Get the XMAC Statistics
1816
 
 */
1817
 
enum vxge_hw_status
1818
 
vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1819
 
                              struct vxge_hw_xmac_stats *xmac_stats)
1820
 
{
1821
 
        enum vxge_hw_status status = VXGE_HW_OK;
1822
 
        u32 i;
1823
 
 
1824
 
        status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1825
 
                                        0, &xmac_stats->aggr_stats[0]);
1826
 
        if (status != VXGE_HW_OK)
1827
 
                goto exit;
1828
 
 
1829
 
        status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1830
 
                                1, &xmac_stats->aggr_stats[1]);
1831
 
        if (status != VXGE_HW_OK)
1832
 
                goto exit;
1833
 
 
1834
 
        for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1835
 
 
1836
 
                status = vxge_hw_device_xmac_port_stats_get(hldev,
1837
 
                                        i, &xmac_stats->port_stats[i]);
1838
 
                if (status != VXGE_HW_OK)
1839
 
                        goto exit;
1840
 
        }
1841
 
 
1842
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1843
 
 
1844
 
                if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1845
 
                        continue;
1846
 
 
1847
 
                status = __vxge_hw_vpath_xmac_tx_stats_get(
1848
 
                                        &hldev->virtual_paths[i],
1849
 
                                        &xmac_stats->vpath_tx_stats[i]);
1850
 
                if (status != VXGE_HW_OK)
1851
 
                        goto exit;
1852
 
 
1853
 
                status = __vxge_hw_vpath_xmac_rx_stats_get(
1854
 
                                        &hldev->virtual_paths[i],
1855
 
                                        &xmac_stats->vpath_rx_stats[i]);
1856
 
                if (status != VXGE_HW_OK)
1857
 
                        goto exit;
1858
 
        }
1859
 
exit:
1860
 
        return status;
1861
 
}
1862
 
 
1863
 
/*
1864
 
 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1865
 
 * This routine is used to dynamically change the debug output
1866
 
 */
1867
 
void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1868
 
                              enum vxge_debug_level level, u32 mask)
1869
 
{
1870
 
        if (hldev == NULL)
1871
 
                return;
1872
 
 
1873
 
#if defined(VXGE_DEBUG_TRACE_MASK) || \
1874
 
        defined(VXGE_DEBUG_ERR_MASK)
1875
 
        hldev->debug_module_mask = mask;
1876
 
        hldev->debug_level = level;
1877
 
#endif
1878
 
 
1879
 
#if defined(VXGE_DEBUG_ERR_MASK)
1880
 
        hldev->level_err = level & VXGE_ERR;
1881
 
#endif
1882
 
 
1883
 
#if defined(VXGE_DEBUG_TRACE_MASK)
1884
 
        hldev->level_trace = level & VXGE_TRACE;
1885
 
#endif
1886
 
}
1887
 
 
1888
 
/*
1889
 
 * vxge_hw_device_error_level_get - Get the error level
1890
 
 * This routine returns the current error level set
1891
 
 */
1892
 
u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1893
 
{
1894
 
#if defined(VXGE_DEBUG_ERR_MASK)
1895
 
        if (hldev == NULL)
1896
 
                return VXGE_ERR;
1897
 
        else
1898
 
                return hldev->level_err;
1899
 
#else
1900
 
        return 0;
1901
 
#endif
1902
 
}
1903
 
 
1904
 
/*
1905
 
 * vxge_hw_device_trace_level_get - Get the trace level
1906
 
 * This routine returns the current trace level set
1907
 
 */
1908
 
u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1909
 
{
1910
 
#if defined(VXGE_DEBUG_TRACE_MASK)
1911
 
        if (hldev == NULL)
1912
 
                return VXGE_TRACE;
1913
 
        else
1914
 
                return hldev->level_trace;
1915
 
#else
1916
 
        return 0;
1917
 
#endif
1918
 
}
1919
 
 
1920
 
/*
1921
 
 * vxge_hw_getpause_data -Pause frame frame generation and reception.
1922
 
 * Returns the Pause frame generation and reception capability of the NIC.
1923
 
 */
1924
 
enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1925
 
                                                 u32 port, u32 *tx, u32 *rx)
1926
 
{
1927
 
        u64 val64;
1928
 
        enum vxge_hw_status status = VXGE_HW_OK;
1929
 
 
1930
 
        if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1931
 
                status = VXGE_HW_ERR_INVALID_DEVICE;
1932
 
                goto exit;
1933
 
        }
1934
 
 
1935
 
        if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1936
 
                status = VXGE_HW_ERR_INVALID_PORT;
1937
 
                goto exit;
1938
 
        }
1939
 
 
1940
 
        if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1941
 
                status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1942
 
                goto exit;
1943
 
        }
1944
 
 
1945
 
        val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1946
 
        if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1947
 
                *tx = 1;
1948
 
        if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1949
 
                *rx = 1;
1950
 
exit:
1951
 
        return status;
1952
 
}
1953
 
 
1954
 
/*
1955
 
 * vxge_hw_device_setpause_data -  set/reset pause frame generation.
1956
 
 * It can be used to set or reset Pause frame generation or reception
1957
 
 * support of the NIC.
1958
 
 */
1959
 
enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1960
 
                                                 u32 port, u32 tx, u32 rx)
1961
 
{
1962
 
        u64 val64;
1963
 
        enum vxge_hw_status status = VXGE_HW_OK;
1964
 
 
1965
 
        if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1966
 
                status = VXGE_HW_ERR_INVALID_DEVICE;
1967
 
                goto exit;
1968
 
        }
1969
 
 
1970
 
        if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1971
 
                status = VXGE_HW_ERR_INVALID_PORT;
1972
 
                goto exit;
1973
 
        }
1974
 
 
1975
 
        status = __vxge_hw_device_is_privilaged(hldev->host_type,
1976
 
                        hldev->func_id);
1977
 
        if (status != VXGE_HW_OK)
1978
 
                goto exit;
1979
 
 
1980
 
        val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1981
 
        if (tx)
1982
 
                val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1983
 
        else
1984
 
                val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1985
 
        if (rx)
1986
 
                val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1987
 
        else
1988
 
                val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1989
 
 
1990
 
        writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1991
 
exit:
1992
 
        return status;
1993
 
}
1994
 
 
1995
 
u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1996
 
{
1997
 
        int link_width, exp_cap;
1998
 
        u16 lnk;
1999
 
 
2000
 
        exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
2001
 
        pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
2002
 
        link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
2003
 
        return link_width;
2004
 
}
2005
 
 
2006
 
/*
2007
 
 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
2008
 
 * This function returns the index of memory block
2009
 
 */
2010
 
static inline u32
2011
 
__vxge_hw_ring_block_memblock_idx(u8 *block)
2012
 
{
2013
 
        return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
2014
 
}
2015
 
 
2016
 
/*
2017
 
 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
2018
 
 * This function sets index to a memory block
2019
 
 */
2020
 
static inline void
2021
 
__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
2022
 
{
2023
 
        *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
2024
 
}
2025
 
 
2026
 
/*
2027
 
 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
2028
 
 * in RxD block
2029
 
 * Sets the next block pointer in RxD block
2030
 
 */
2031
 
static inline void
2032
 
__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
2033
 
{
2034
 
        *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
2035
 
}
2036
 
 
2037
 
/*
2038
 
 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
2039
 
 *             first block
2040
 
 * Returns the dma address of the first RxD block
2041
 
 */
2042
 
static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
2043
 
{
2044
 
        struct vxge_hw_mempool_dma *dma_object;
2045
 
 
2046
 
        dma_object = ring->mempool->memblocks_dma_arr;
2047
 
        vxge_assert(dma_object != NULL);
2048
 
 
2049
 
        return dma_object->addr;
2050
 
}
2051
 
 
2052
 
/*
2053
 
 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
2054
 
 * This function returns the dma address of a given item
2055
 
 */
2056
 
static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
2057
 
                                               void *item)
2058
 
{
2059
 
        u32 memblock_idx;
2060
 
        void *memblock;
2061
 
        struct vxge_hw_mempool_dma *memblock_dma_object;
2062
 
        ptrdiff_t dma_item_offset;
2063
 
 
2064
 
        /* get owner memblock index */
2065
 
        memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
2066
 
 
2067
 
        /* get owner memblock by memblock index */
2068
 
        memblock = mempoolh->memblocks_arr[memblock_idx];
2069
 
 
2070
 
        /* get memblock DMA object by memblock index */
2071
 
        memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
2072
 
 
2073
 
        /* calculate offset in the memblock of this item */
2074
 
        dma_item_offset = (u8 *)item - (u8 *)memblock;
2075
 
 
2076
 
        return memblock_dma_object->addr + dma_item_offset;
2077
 
}
2078
 
 
2079
 
/*
2080
 
 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
2081
 
 * This function returns the dma address of a given item
2082
 
 */
2083
 
static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
2084
 
                                         struct __vxge_hw_ring *ring, u32 from,
2085
 
                                         u32 to)
2086
 
{
2087
 
        u8 *to_item , *from_item;
2088
 
        dma_addr_t to_dma;
2089
 
 
2090
 
        /* get "from" RxD block */
2091
 
        from_item = mempoolh->items_arr[from];
2092
 
        vxge_assert(from_item);
2093
 
 
2094
 
        /* get "to" RxD block */
2095
 
        to_item = mempoolh->items_arr[to];
2096
 
        vxge_assert(to_item);
2097
 
 
2098
 
        /* return address of the beginning of previous RxD block */
2099
 
        to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
2100
 
 
2101
 
        /* set next pointer for this RxD block to point on
2102
 
         * previous item's DMA start address */
2103
 
        __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
2104
 
}
2105
 
 
2106
 
/*
2107
 
 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
2108
 
 * block callback
2109
 
 * This function is callback passed to __vxge_hw_mempool_create to create memory
2110
 
 * pool for RxD block
2111
 
 */
2112
 
static void
2113
 
__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
2114
 
                                  u32 memblock_index,
2115
 
                                  struct vxge_hw_mempool_dma *dma_object,
2116
 
                                  u32 index, u32 is_last)
2117
 
{
2118
 
        u32 i;
2119
 
        void *item = mempoolh->items_arr[index];
2120
 
        struct __vxge_hw_ring *ring =
2121
 
                (struct __vxge_hw_ring *)mempoolh->userdata;
2122
 
 
2123
 
        /* format rxds array */
2124
 
        for (i = 0; i < ring->rxds_per_block; i++) {
2125
 
                void *rxdblock_priv;
2126
 
                void *uld_priv;
2127
 
                struct vxge_hw_ring_rxd_1 *rxdp;
2128
 
 
2129
 
                u32 reserve_index = ring->channel.reserve_ptr -
2130
 
                                (index * ring->rxds_per_block + i + 1);
2131
 
                u32 memblock_item_idx;
2132
 
 
2133
 
                ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
2134
 
                                                i * ring->rxd_size;
2135
 
 
2136
 
                /* Note: memblock_item_idx is index of the item within
2137
 
                 *       the memblock. For instance, in case of three RxD-blocks
2138
 
                 *       per memblock this value can be 0, 1 or 2. */
2139
 
                rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
2140
 
                                        memblock_index, item,
2141
 
                                        &memblock_item_idx);
2142
 
 
2143
 
                rxdp = (struct vxge_hw_ring_rxd_1 *)
2144
 
                                ring->channel.reserve_arr[reserve_index];
2145
 
 
2146
 
                uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
2147
 
 
2148
 
                /* pre-format Host_Control */
2149
 
                rxdp->host_control = (u64)(size_t)uld_priv;
2150
 
        }
2151
 
 
2152
 
        __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
2153
 
 
2154
 
        if (is_last) {
2155
 
                /* link last one with first one */
2156
 
                __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
2157
 
        }
2158
 
 
2159
 
        if (index > 0) {
2160
 
                /* link this RxD block with previous one */
2161
 
                __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
2162
 
        }
2163
 
}
2164
 
 
2165
 
/*
2166
 
 * __vxge_hw_ring_replenish - Initial replenish of RxDs
2167
 
 * This function replenishes the RxDs from reserve array to work array
2168
 
 */
2169
 
enum vxge_hw_status
2170
 
vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
2171
 
{
2172
 
        void *rxd;
2173
 
        struct __vxge_hw_channel *channel;
2174
 
        enum vxge_hw_status status = VXGE_HW_OK;
2175
 
 
2176
 
        channel = &ring->channel;
2177
 
 
2178
 
        while (vxge_hw_channel_dtr_count(channel) > 0) {
2179
 
 
2180
 
                status = vxge_hw_ring_rxd_reserve(ring, &rxd);
2181
 
 
2182
 
                vxge_assert(status == VXGE_HW_OK);
2183
 
 
2184
 
                if (ring->rxd_init) {
2185
 
                        status = ring->rxd_init(rxd, channel->userdata);
2186
 
                        if (status != VXGE_HW_OK) {
2187
 
                                vxge_hw_ring_rxd_free(ring, rxd);
2188
 
                                goto exit;
2189
 
                        }
2190
 
                }
2191
 
 
2192
 
                vxge_hw_ring_rxd_post(ring, rxd);
2193
 
        }
2194
 
        status = VXGE_HW_OK;
2195
 
exit:
2196
 
        return status;
2197
 
}
2198
 
 
2199
 
/*
2200
 
 * __vxge_hw_channel_allocate - Allocate memory for channel
2201
 
 * This function allocates required memory for the channel and various arrays
2202
 
 * in the channel
2203
 
 */
2204
 
static struct __vxge_hw_channel *
2205
 
__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
2206
 
                           enum __vxge_hw_channel_type type,
2207
 
                           u32 length, u32 per_dtr_space,
2208
 
                           void *userdata)
2209
 
{
2210
 
        struct __vxge_hw_channel *channel;
2211
 
        struct __vxge_hw_device *hldev;
2212
 
        int size = 0;
2213
 
        u32 vp_id;
2214
 
 
2215
 
        hldev = vph->vpath->hldev;
2216
 
        vp_id = vph->vpath->vp_id;
2217
 
 
2218
 
        switch (type) {
2219
 
        case VXGE_HW_CHANNEL_TYPE_FIFO:
2220
 
                size = sizeof(struct __vxge_hw_fifo);
2221
 
                break;
2222
 
        case VXGE_HW_CHANNEL_TYPE_RING:
2223
 
                size = sizeof(struct __vxge_hw_ring);
2224
 
                break;
2225
 
        default:
2226
 
                break;
2227
 
        }
2228
 
 
2229
 
        channel = kzalloc(size, GFP_KERNEL);
2230
 
        if (channel == NULL)
2231
 
                goto exit0;
2232
 
        INIT_LIST_HEAD(&channel->item);
2233
 
 
2234
 
        channel->common_reg = hldev->common_reg;
2235
 
        channel->first_vp_id = hldev->first_vp_id;
2236
 
        channel->type = type;
2237
 
        channel->devh = hldev;
2238
 
        channel->vph = vph;
2239
 
        channel->userdata = userdata;
2240
 
        channel->per_dtr_space = per_dtr_space;
2241
 
        channel->length = length;
2242
 
        channel->vp_id = vp_id;
2243
 
 
2244
 
        channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2245
 
        if (channel->work_arr == NULL)
2246
 
                goto exit1;
2247
 
 
2248
 
        channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2249
 
        if (channel->free_arr == NULL)
2250
 
                goto exit1;
2251
 
        channel->free_ptr = length;
2252
 
 
2253
 
        channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2254
 
        if (channel->reserve_arr == NULL)
2255
 
                goto exit1;
2256
 
        channel->reserve_ptr = length;
2257
 
        channel->reserve_top = 0;
2258
 
 
2259
 
        channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2260
 
        if (channel->orig_arr == NULL)
2261
 
                goto exit1;
2262
 
 
2263
 
        return channel;
2264
 
exit1:
2265
 
        __vxge_hw_channel_free(channel);
2266
 
 
2267
 
exit0:
2268
 
        return NULL;
2269
 
}
2270
 
 
2271
 
/*
2272
 
 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
2273
 
 * Adds a block to block pool
2274
 
 */
2275
 
static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
2276
 
                                        void *block_addr,
2277
 
                                        u32 length,
2278
 
                                        struct pci_dev *dma_h,
2279
 
                                        struct pci_dev *acc_handle)
2280
 
{
2281
 
        struct __vxge_hw_blockpool *blockpool;
2282
 
        struct __vxge_hw_blockpool_entry *entry = NULL;
2283
 
        dma_addr_t dma_addr;
2284
 
        enum vxge_hw_status status = VXGE_HW_OK;
2285
 
        u32 req_out;
2286
 
 
2287
 
        blockpool = &devh->block_pool;
2288
 
 
2289
 
        if (block_addr == NULL) {
2290
 
                blockpool->req_out--;
2291
 
                status = VXGE_HW_FAIL;
2292
 
                goto exit;
2293
 
        }
2294
 
 
2295
 
        dma_addr = pci_map_single(devh->pdev, block_addr, length,
2296
 
                                PCI_DMA_BIDIRECTIONAL);
2297
 
 
2298
 
        if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
2299
 
                vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
2300
 
                blockpool->req_out--;
2301
 
                status = VXGE_HW_FAIL;
2302
 
                goto exit;
2303
 
        }
2304
 
 
2305
 
        if (!list_empty(&blockpool->free_entry_list))
2306
 
                entry = (struct __vxge_hw_blockpool_entry *)
2307
 
                        list_first_entry(&blockpool->free_entry_list,
2308
 
                                struct __vxge_hw_blockpool_entry,
2309
 
                                item);
2310
 
 
2311
 
        if (entry == NULL)
2312
 
                entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
2313
 
        else
2314
 
                list_del(&entry->item);
2315
 
 
2316
 
        if (entry != NULL) {
2317
 
                entry->length = length;
2318
 
                entry->memblock = block_addr;
2319
 
                entry->dma_addr = dma_addr;
2320
 
                entry->acc_handle = acc_handle;
2321
 
                entry->dma_handle = dma_h;
2322
 
                list_add(&entry->item, &blockpool->free_block_list);
2323
 
                blockpool->pool_size++;
2324
 
                status = VXGE_HW_OK;
2325
 
        } else
2326
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
2327
 
 
2328
 
        blockpool->req_out--;
2329
 
 
2330
 
        req_out = blockpool->req_out;
2331
 
exit:
2332
 
        return;
2333
 
}
2334
 
 
2335
 
static inline void
2336
 
vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
2337
 
{
2338
 
        gfp_t flags;
2339
 
        void *vaddr;
2340
 
 
2341
 
        if (in_interrupt())
2342
 
                flags = GFP_ATOMIC | GFP_DMA;
2343
 
        else
2344
 
                flags = GFP_KERNEL | GFP_DMA;
2345
 
 
2346
 
        vaddr = kmalloc((size), flags);
2347
 
 
2348
 
        vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
2349
 
}
2350
 
 
2351
 
/*
2352
 
 * __vxge_hw_blockpool_blocks_add - Request additional blocks
2353
 
 */
2354
 
static
2355
 
void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
2356
 
{
2357
 
        u32 nreq = 0, i;
2358
 
 
2359
 
        if ((blockpool->pool_size  +  blockpool->req_out) <
2360
 
                VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
2361
 
                nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
2362
 
                blockpool->req_out += nreq;
2363
 
        }
2364
 
 
2365
 
        for (i = 0; i < nreq; i++)
2366
 
                vxge_os_dma_malloc_async(
2367
 
                        ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2368
 
                        blockpool->hldev, VXGE_HW_BLOCK_SIZE);
2369
 
}
2370
 
 
2371
 
/*
2372
 
 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
2373
 
 * Allocates a block of memory of given size, either from block pool
2374
 
 * or by calling vxge_os_dma_malloc()
2375
 
 */
2376
 
static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
2377
 
                                        struct vxge_hw_mempool_dma *dma_object)
2378
 
{
2379
 
        struct __vxge_hw_blockpool_entry *entry = NULL;
2380
 
        struct __vxge_hw_blockpool  *blockpool;
2381
 
        void *memblock = NULL;
2382
 
        enum vxge_hw_status status = VXGE_HW_OK;
2383
 
 
2384
 
        blockpool = &devh->block_pool;
2385
 
 
2386
 
        if (size != blockpool->block_size) {
2387
 
 
2388
 
                memblock = vxge_os_dma_malloc(devh->pdev, size,
2389
 
                                                &dma_object->handle,
2390
 
                                                &dma_object->acc_handle);
2391
 
 
2392
 
                if (memblock == NULL) {
2393
 
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
2394
 
                        goto exit;
2395
 
                }
2396
 
 
2397
 
                dma_object->addr = pci_map_single(devh->pdev, memblock, size,
2398
 
                                        PCI_DMA_BIDIRECTIONAL);
2399
 
 
2400
 
                if (unlikely(pci_dma_mapping_error(devh->pdev,
2401
 
                                dma_object->addr))) {
2402
 
                        vxge_os_dma_free(devh->pdev, memblock,
2403
 
                                &dma_object->acc_handle);
2404
 
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
2405
 
                        goto exit;
2406
 
                }
2407
 
 
2408
 
        } else {
2409
 
 
2410
 
                if (!list_empty(&blockpool->free_block_list))
2411
 
                        entry = (struct __vxge_hw_blockpool_entry *)
2412
 
                                list_first_entry(&blockpool->free_block_list,
2413
 
                                        struct __vxge_hw_blockpool_entry,
2414
 
                                        item);
2415
 
 
2416
 
                if (entry != NULL) {
2417
 
                        list_del(&entry->item);
2418
 
                        dma_object->addr = entry->dma_addr;
2419
 
                        dma_object->handle = entry->dma_handle;
2420
 
                        dma_object->acc_handle = entry->acc_handle;
2421
 
                        memblock = entry->memblock;
2422
 
 
2423
 
                        list_add(&entry->item,
2424
 
                                &blockpool->free_entry_list);
2425
 
                        blockpool->pool_size--;
2426
 
                }
2427
 
 
2428
 
                if (memblock != NULL)
2429
 
                        __vxge_hw_blockpool_blocks_add(blockpool);
2430
 
        }
2431
 
exit:
2432
 
        return memblock;
2433
 
}
2434
 
 
2435
 
/*
2436
 
 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
2437
 
 */
2438
 
static void
2439
 
__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
2440
 
{
2441
 
        struct list_head *p, *n;
2442
 
 
2443
 
        list_for_each_safe(p, n, &blockpool->free_block_list) {
2444
 
 
2445
 
                if (blockpool->pool_size < blockpool->pool_max)
2446
 
                        break;
2447
 
 
2448
 
                pci_unmap_single(
2449
 
                        ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2450
 
                        ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
2451
 
                        ((struct __vxge_hw_blockpool_entry *)p)->length,
2452
 
                        PCI_DMA_BIDIRECTIONAL);
2453
 
 
2454
 
                vxge_os_dma_free(
2455
 
                        ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2456
 
                        ((struct __vxge_hw_blockpool_entry *)p)->memblock,
2457
 
                        &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
2458
 
 
2459
 
                list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
2460
 
 
2461
 
                list_add(p, &blockpool->free_entry_list);
2462
 
 
2463
 
                blockpool->pool_size--;
2464
 
 
2465
 
        }
2466
 
}
2467
 
 
2468
 
/*
2469
 
 * __vxge_hw_blockpool_free - Frees the memory allcoated with
2470
 
 *                              __vxge_hw_blockpool_malloc
2471
 
 */
2472
 
static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
2473
 
                                     void *memblock, u32 size,
2474
 
                                     struct vxge_hw_mempool_dma *dma_object)
2475
 
{
2476
 
        struct __vxge_hw_blockpool_entry *entry = NULL;
2477
 
        struct __vxge_hw_blockpool  *blockpool;
2478
 
        enum vxge_hw_status status = VXGE_HW_OK;
2479
 
 
2480
 
        blockpool = &devh->block_pool;
2481
 
 
2482
 
        if (size != blockpool->block_size) {
2483
 
                pci_unmap_single(devh->pdev, dma_object->addr, size,
2484
 
                        PCI_DMA_BIDIRECTIONAL);
2485
 
                vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
2486
 
        } else {
2487
 
 
2488
 
                if (!list_empty(&blockpool->free_entry_list))
2489
 
                        entry = (struct __vxge_hw_blockpool_entry *)
2490
 
                                list_first_entry(&blockpool->free_entry_list,
2491
 
                                        struct __vxge_hw_blockpool_entry,
2492
 
                                        item);
2493
 
 
2494
 
                if (entry == NULL)
2495
 
                        entry = vmalloc(sizeof(
2496
 
                                        struct __vxge_hw_blockpool_entry));
2497
 
                else
2498
 
                        list_del(&entry->item);
2499
 
 
2500
 
                if (entry != NULL) {
2501
 
                        entry->length = size;
2502
 
                        entry->memblock = memblock;
2503
 
                        entry->dma_addr = dma_object->addr;
2504
 
                        entry->acc_handle = dma_object->acc_handle;
2505
 
                        entry->dma_handle = dma_object->handle;
2506
 
                        list_add(&entry->item,
2507
 
                                        &blockpool->free_block_list);
2508
 
                        blockpool->pool_size++;
2509
 
                        status = VXGE_HW_OK;
2510
 
                } else
2511
 
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
2512
 
 
2513
 
                if (status == VXGE_HW_OK)
2514
 
                        __vxge_hw_blockpool_blocks_remove(blockpool);
2515
 
        }
2516
 
}
2517
 
 
2518
 
/*
2519
 
 * vxge_hw_mempool_destroy
2520
 
 */
2521
 
static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
2522
 
{
2523
 
        u32 i, j;
2524
 
        struct __vxge_hw_device *devh = mempool->devh;
2525
 
 
2526
 
        for (i = 0; i < mempool->memblocks_allocated; i++) {
2527
 
                struct vxge_hw_mempool_dma *dma_object;
2528
 
 
2529
 
                vxge_assert(mempool->memblocks_arr[i]);
2530
 
                vxge_assert(mempool->memblocks_dma_arr + i);
2531
 
 
2532
 
                dma_object = mempool->memblocks_dma_arr + i;
2533
 
 
2534
 
                for (j = 0; j < mempool->items_per_memblock; j++) {
2535
 
                        u32 index = i * mempool->items_per_memblock + j;
2536
 
 
2537
 
                        /* to skip last partially filled(if any) memblock */
2538
 
                        if (index >= mempool->items_current)
2539
 
                                break;
2540
 
                }
2541
 
 
2542
 
                vfree(mempool->memblocks_priv_arr[i]);
2543
 
 
2544
 
                __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
2545
 
                                mempool->memblock_size, dma_object);
2546
 
        }
2547
 
 
2548
 
        vfree(mempool->items_arr);
2549
 
        vfree(mempool->memblocks_dma_arr);
2550
 
        vfree(mempool->memblocks_priv_arr);
2551
 
        vfree(mempool->memblocks_arr);
2552
 
        vfree(mempool);
2553
 
}
2554
 
 
2555
 
/*
2556
 
 * __vxge_hw_mempool_grow
2557
 
 * Will resize mempool up to %num_allocate value.
2558
 
 */
2559
 
static enum vxge_hw_status
2560
 
__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
2561
 
                       u32 *num_allocated)
2562
 
{
2563
 
        u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
2564
 
        u32 n_items = mempool->items_per_memblock;
2565
 
        u32 start_block_idx = mempool->memblocks_allocated;
2566
 
        u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
2567
 
        enum vxge_hw_status status = VXGE_HW_OK;
2568
 
 
2569
 
        *num_allocated = 0;
2570
 
 
2571
 
        if (end_block_idx > mempool->memblocks_max) {
2572
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
2573
 
                goto exit;
2574
 
        }
2575
 
 
2576
 
        for (i = start_block_idx; i < end_block_idx; i++) {
2577
 
                u32 j;
2578
 
                u32 is_last = ((end_block_idx - 1) == i);
2579
 
                struct vxge_hw_mempool_dma *dma_object =
2580
 
                        mempool->memblocks_dma_arr + i;
2581
 
                void *the_memblock;
2582
 
 
2583
 
                /* allocate memblock's private part. Each DMA memblock
2584
 
                 * has a space allocated for item's private usage upon
2585
 
                 * mempool's user request. Each time mempool grows, it will
2586
 
                 * allocate new memblock and its private part at once.
2587
 
                 * This helps to minimize memory usage a lot. */
2588
 
                mempool->memblocks_priv_arr[i] =
2589
 
                                vzalloc(mempool->items_priv_size * n_items);
2590
 
                if (mempool->memblocks_priv_arr[i] == NULL) {
2591
 
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
2592
 
                        goto exit;
2593
 
                }
2594
 
 
2595
 
                /* allocate DMA-capable memblock */
2596
 
                mempool->memblocks_arr[i] =
2597
 
                        __vxge_hw_blockpool_malloc(mempool->devh,
2598
 
                                mempool->memblock_size, dma_object);
2599
 
                if (mempool->memblocks_arr[i] == NULL) {
2600
 
                        vfree(mempool->memblocks_priv_arr[i]);
2601
 
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
2602
 
                        goto exit;
2603
 
                }
2604
 
 
2605
 
                (*num_allocated)++;
2606
 
                mempool->memblocks_allocated++;
2607
 
 
2608
 
                memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
2609
 
 
2610
 
                the_memblock = mempool->memblocks_arr[i];
2611
 
 
2612
 
                /* fill the items hash array */
2613
 
                for (j = 0; j < n_items; j++) {
2614
 
                        u32 index = i * n_items + j;
2615
 
 
2616
 
                        if (first_time && index >= mempool->items_initial)
2617
 
                                break;
2618
 
 
2619
 
                        mempool->items_arr[index] =
2620
 
                                ((char *)the_memblock + j*mempool->item_size);
2621
 
 
2622
 
                        /* let caller to do more job on each item */
2623
 
                        if (mempool->item_func_alloc != NULL)
2624
 
                                mempool->item_func_alloc(mempool, i,
2625
 
                                        dma_object, index, is_last);
2626
 
 
2627
 
                        mempool->items_current = index + 1;
2628
 
                }
2629
 
 
2630
 
                if (first_time && mempool->items_current ==
2631
 
                                        mempool->items_initial)
2632
 
                        break;
2633
 
        }
2634
 
exit:
2635
 
        return status;
2636
 
}
2637
 
 
2638
 
/*
2639
 
 * vxge_hw_mempool_create
2640
 
 * This function will create memory pool object. Pool may grow but will
2641
 
 * never shrink. Pool consists of number of dynamically allocated blocks
2642
 
 * with size enough to hold %items_initial number of items. Memory is
2643
 
 * DMA-able but client must map/unmap before interoperating with the device.
2644
 
 */
2645
 
static struct vxge_hw_mempool *
2646
 
__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
2647
 
                         u32 memblock_size,
2648
 
                         u32 item_size,
2649
 
                         u32 items_priv_size,
2650
 
                         u32 items_initial,
2651
 
                         u32 items_max,
2652
 
                         struct vxge_hw_mempool_cbs *mp_callback,
2653
 
                         void *userdata)
2654
 
{
2655
 
        enum vxge_hw_status status = VXGE_HW_OK;
2656
 
        u32 memblocks_to_allocate;
2657
 
        struct vxge_hw_mempool *mempool = NULL;
2658
 
        u32 allocated;
2659
 
 
2660
 
        if (memblock_size < item_size) {
2661
 
                status = VXGE_HW_FAIL;
2662
 
                goto exit;
2663
 
        }
2664
 
 
2665
 
        mempool = vzalloc(sizeof(struct vxge_hw_mempool));
2666
 
        if (mempool == NULL) {
2667
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
2668
 
                goto exit;
2669
 
        }
2670
 
 
2671
 
        mempool->devh                   = devh;
2672
 
        mempool->memblock_size          = memblock_size;
2673
 
        mempool->items_max              = items_max;
2674
 
        mempool->items_initial          = items_initial;
2675
 
        mempool->item_size              = item_size;
2676
 
        mempool->items_priv_size        = items_priv_size;
2677
 
        mempool->item_func_alloc        = mp_callback->item_func_alloc;
2678
 
        mempool->userdata               = userdata;
2679
 
 
2680
 
        mempool->memblocks_allocated = 0;
2681
 
 
2682
 
        mempool->items_per_memblock = memblock_size / item_size;
2683
 
 
2684
 
        mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
2685
 
                                        mempool->items_per_memblock;
2686
 
 
2687
 
        /* allocate array of memblocks */
2688
 
        mempool->memblocks_arr =
2689
 
                vzalloc(sizeof(void *) * mempool->memblocks_max);
2690
 
        if (mempool->memblocks_arr == NULL) {
2691
 
                __vxge_hw_mempool_destroy(mempool);
2692
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
2693
 
                mempool = NULL;
2694
 
                goto exit;
2695
 
        }
2696
 
 
2697
 
        /* allocate array of private parts of items per memblocks */
2698
 
        mempool->memblocks_priv_arr =
2699
 
                vzalloc(sizeof(void *) * mempool->memblocks_max);
2700
 
        if (mempool->memblocks_priv_arr == NULL) {
2701
 
                __vxge_hw_mempool_destroy(mempool);
2702
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
2703
 
                mempool = NULL;
2704
 
                goto exit;
2705
 
        }
2706
 
 
2707
 
        /* allocate array of memblocks DMA objects */
2708
 
        mempool->memblocks_dma_arr =
2709
 
                vzalloc(sizeof(struct vxge_hw_mempool_dma) *
2710
 
                        mempool->memblocks_max);
2711
 
        if (mempool->memblocks_dma_arr == NULL) {
2712
 
                __vxge_hw_mempool_destroy(mempool);
2713
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
2714
 
                mempool = NULL;
2715
 
                goto exit;
2716
 
        }
2717
 
 
2718
 
        /* allocate hash array of items */
2719
 
        mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
2720
 
        if (mempool->items_arr == NULL) {
2721
 
                __vxge_hw_mempool_destroy(mempool);
2722
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
2723
 
                mempool = NULL;
2724
 
                goto exit;
2725
 
        }
2726
 
 
2727
 
        /* calculate initial number of memblocks */
2728
 
        memblocks_to_allocate = (mempool->items_initial +
2729
 
                                 mempool->items_per_memblock - 1) /
2730
 
                                                mempool->items_per_memblock;
2731
 
 
2732
 
        /* pre-allocate the mempool */
2733
 
        status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
2734
 
                                        &allocated);
2735
 
        if (status != VXGE_HW_OK) {
2736
 
                __vxge_hw_mempool_destroy(mempool);
2737
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
2738
 
                mempool = NULL;
2739
 
                goto exit;
2740
 
        }
2741
 
 
2742
 
exit:
2743
 
        return mempool;
2744
 
}
2745
 
 
2746
 
/*
2747
 
 * __vxge_hw_ring_abort - Returns the RxD
2748
 
 * This function terminates the RxDs of ring
2749
 
 */
2750
 
static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
2751
 
{
2752
 
        void *rxdh;
2753
 
        struct __vxge_hw_channel *channel;
2754
 
 
2755
 
        channel = &ring->channel;
2756
 
 
2757
 
        for (;;) {
2758
 
                vxge_hw_channel_dtr_try_complete(channel, &rxdh);
2759
 
 
2760
 
                if (rxdh == NULL)
2761
 
                        break;
2762
 
 
2763
 
                vxge_hw_channel_dtr_complete(channel);
2764
 
 
2765
 
                if (ring->rxd_term)
2766
 
                        ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
2767
 
                                channel->userdata);
2768
 
 
2769
 
                vxge_hw_channel_dtr_free(channel, rxdh);
2770
 
        }
2771
 
 
2772
 
        return VXGE_HW_OK;
2773
 
}
2774
 
 
2775
 
/*
2776
 
 * __vxge_hw_ring_reset - Resets the ring
2777
 
 * This function resets the ring during vpath reset operation
2778
 
 */
2779
 
static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
2780
 
{
2781
 
        enum vxge_hw_status status = VXGE_HW_OK;
2782
 
        struct __vxge_hw_channel *channel;
2783
 
 
2784
 
        channel = &ring->channel;
2785
 
 
2786
 
        __vxge_hw_ring_abort(ring);
2787
 
 
2788
 
        status = __vxge_hw_channel_reset(channel);
2789
 
 
2790
 
        if (status != VXGE_HW_OK)
2791
 
                goto exit;
2792
 
 
2793
 
        if (ring->rxd_init) {
2794
 
                status = vxge_hw_ring_replenish(ring);
2795
 
                if (status != VXGE_HW_OK)
2796
 
                        goto exit;
2797
 
        }
2798
 
exit:
2799
 
        return status;
2800
 
}
2801
 
 
2802
 
/*
2803
 
 * __vxge_hw_ring_delete - Removes the ring
2804
 
 * This function freeup the memory pool and removes the ring
2805
 
 */
2806
 
static enum vxge_hw_status
2807
 
__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
2808
 
{
2809
 
        struct __vxge_hw_ring *ring = vp->vpath->ringh;
2810
 
 
2811
 
        __vxge_hw_ring_abort(ring);
2812
 
 
2813
 
        if (ring->mempool)
2814
 
                __vxge_hw_mempool_destroy(ring->mempool);
2815
 
 
2816
 
        vp->vpath->ringh = NULL;
2817
 
        __vxge_hw_channel_free(&ring->channel);
2818
 
 
2819
 
        return VXGE_HW_OK;
2820
 
}
2821
 
 
2822
 
/*
2823
 
 * __vxge_hw_ring_create - Create a Ring
2824
 
 * This function creates Ring and initializes it.
2825
 
 */
2826
 
static enum vxge_hw_status
2827
 
__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2828
 
                      struct vxge_hw_ring_attr *attr)
2829
 
{
2830
 
        enum vxge_hw_status status = VXGE_HW_OK;
2831
 
        struct __vxge_hw_ring *ring;
2832
 
        u32 ring_length;
2833
 
        struct vxge_hw_ring_config *config;
2834
 
        struct __vxge_hw_device *hldev;
2835
 
        u32 vp_id;
2836
 
        struct vxge_hw_mempool_cbs ring_mp_callback;
2837
 
 
2838
 
        if ((vp == NULL) || (attr == NULL)) {
2839
 
                status = VXGE_HW_FAIL;
2840
 
                goto exit;
2841
 
        }
2842
 
 
2843
 
        hldev = vp->vpath->hldev;
2844
 
        vp_id = vp->vpath->vp_id;
2845
 
 
2846
 
        config = &hldev->config.vp_config[vp_id].ring;
2847
 
 
2848
 
        ring_length = config->ring_blocks *
2849
 
                        vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2850
 
 
2851
 
        ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
2852
 
                                                VXGE_HW_CHANNEL_TYPE_RING,
2853
 
                                                ring_length,
2854
 
                                                attr->per_rxd_space,
2855
 
                                                attr->userdata);
2856
 
        if (ring == NULL) {
2857
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
2858
 
                goto exit;
2859
 
        }
2860
 
 
2861
 
        vp->vpath->ringh = ring;
2862
 
        ring->vp_id = vp_id;
2863
 
        ring->vp_reg = vp->vpath->vp_reg;
2864
 
        ring->common_reg = hldev->common_reg;
2865
 
        ring->stats = &vp->vpath->sw_stats->ring_stats;
2866
 
        ring->config = config;
2867
 
        ring->callback = attr->callback;
2868
 
        ring->rxd_init = attr->rxd_init;
2869
 
        ring->rxd_term = attr->rxd_term;
2870
 
        ring->buffer_mode = config->buffer_mode;
2871
 
        ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
2872
 
        ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
2873
 
        ring->rxds_limit = config->rxds_limit;
2874
 
 
2875
 
        ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
2876
 
        ring->rxd_priv_size =
2877
 
                sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
2878
 
        ring->per_rxd_space = attr->per_rxd_space;
2879
 
 
2880
 
        ring->rxd_priv_size =
2881
 
                ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
2882
 
                VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2883
 
 
2884
 
        /* how many RxDs can fit into one block. Depends on configured
2885
 
         * buffer_mode. */
2886
 
        ring->rxds_per_block =
2887
 
                vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2888
 
 
2889
 
        /* calculate actual RxD block private size */
2890
 
        ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
2891
 
        ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
2892
 
        ring->mempool = __vxge_hw_mempool_create(hldev,
2893
 
                                VXGE_HW_BLOCK_SIZE,
2894
 
                                VXGE_HW_BLOCK_SIZE,
2895
 
                                ring->rxdblock_priv_size,
2896
 
                                ring->config->ring_blocks,
2897
 
                                ring->config->ring_blocks,
2898
 
                                &ring_mp_callback,
2899
 
                                ring);
2900
 
        if (ring->mempool == NULL) {
2901
 
                __vxge_hw_ring_delete(vp);
2902
 
                return VXGE_HW_ERR_OUT_OF_MEMORY;
2903
 
        }
2904
 
 
2905
 
        status = __vxge_hw_channel_initialize(&ring->channel);
2906
 
        if (status != VXGE_HW_OK) {
2907
 
                __vxge_hw_ring_delete(vp);
2908
 
                goto exit;
2909
 
        }
2910
 
 
2911
 
        /* Note:
2912
 
         * Specifying rxd_init callback means two things:
2913
 
         * 1) rxds need to be initialized by driver at channel-open time;
2914
 
         * 2) rxds need to be posted at channel-open time
2915
 
         *    (that's what the initial_replenish() below does)
2916
 
         * Currently we don't have a case when the 1) is done without the 2).
2917
 
         */
2918
 
        if (ring->rxd_init) {
2919
 
                status = vxge_hw_ring_replenish(ring);
2920
 
                if (status != VXGE_HW_OK) {
2921
 
                        __vxge_hw_ring_delete(vp);
2922
 
                        goto exit;
2923
 
                }
2924
 
        }
2925
 
 
2926
 
        /* initial replenish will increment the counter in its post() routine,
2927
 
         * we have to reset it */
2928
 
        ring->stats->common_stats.usage_cnt = 0;
2929
 
exit:
2930
 
        return status;
2931
 
}
2932
 
 
2933
 
/*
2934
 
 * vxge_hw_device_config_default_get - Initialize device config with defaults.
2935
 
 * Initialize Titan device config with default values.
2936
 
 */
2937
 
enum vxge_hw_status __devinit
2938
 
vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2939
 
{
2940
 
        u32 i;
2941
 
 
2942
 
        device_config->dma_blockpool_initial =
2943
 
                                        VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
2944
 
        device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
2945
 
        device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
2946
 
        device_config->rth_en = VXGE_HW_RTH_DEFAULT;
2947
 
        device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
2948
 
        device_config->device_poll_millis =  VXGE_HW_DEF_DEVICE_POLL_MILLIS;
2949
 
        device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
2950
 
 
2951
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2952
 
                device_config->vp_config[i].vp_id = i;
2953
 
 
2954
 
                device_config->vp_config[i].min_bandwidth =
2955
 
                                VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
2956
 
 
2957
 
                device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
2958
 
 
2959
 
                device_config->vp_config[i].ring.ring_blocks =
2960
 
                                VXGE_HW_DEF_RING_BLOCKS;
2961
 
 
2962
 
                device_config->vp_config[i].ring.buffer_mode =
2963
 
                                VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
2964
 
 
2965
 
                device_config->vp_config[i].ring.scatter_mode =
2966
 
                                VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
2967
 
 
2968
 
                device_config->vp_config[i].ring.rxds_limit =
2969
 
                                VXGE_HW_DEF_RING_RXDS_LIMIT;
2970
 
 
2971
 
                device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
2972
 
 
2973
 
                device_config->vp_config[i].fifo.fifo_blocks =
2974
 
                                VXGE_HW_MIN_FIFO_BLOCKS;
2975
 
 
2976
 
                device_config->vp_config[i].fifo.max_frags =
2977
 
                                VXGE_HW_MAX_FIFO_FRAGS;
2978
 
 
2979
 
                device_config->vp_config[i].fifo.memblock_size =
2980
 
                                VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
2981
 
 
2982
 
                device_config->vp_config[i].fifo.alignment_size =
2983
 
                                VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
2984
 
 
2985
 
                device_config->vp_config[i].fifo.intr =
2986
 
                                VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
2987
 
 
2988
 
                device_config->vp_config[i].fifo.no_snoop_bits =
2989
 
                                VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
2990
 
                device_config->vp_config[i].tti.intr_enable =
2991
 
                                VXGE_HW_TIM_INTR_DEFAULT;
2992
 
 
2993
 
                device_config->vp_config[i].tti.btimer_val =
2994
 
                                VXGE_HW_USE_FLASH_DEFAULT;
2995
 
 
2996
 
                device_config->vp_config[i].tti.timer_ac_en =
2997
 
                                VXGE_HW_USE_FLASH_DEFAULT;
2998
 
 
2999
 
                device_config->vp_config[i].tti.timer_ci_en =
3000
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3001
 
 
3002
 
                device_config->vp_config[i].tti.timer_ri_en =
3003
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3004
 
 
3005
 
                device_config->vp_config[i].tti.rtimer_val =
3006
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3007
 
 
3008
 
                device_config->vp_config[i].tti.util_sel =
3009
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3010
 
 
3011
 
                device_config->vp_config[i].tti.ltimer_val =
3012
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3013
 
 
3014
 
                device_config->vp_config[i].tti.urange_a =
3015
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3016
 
 
3017
 
                device_config->vp_config[i].tti.uec_a =
3018
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3019
 
 
3020
 
                device_config->vp_config[i].tti.urange_b =
3021
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3022
 
 
3023
 
                device_config->vp_config[i].tti.uec_b =
3024
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3025
 
 
3026
 
                device_config->vp_config[i].tti.urange_c =
3027
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3028
 
 
3029
 
                device_config->vp_config[i].tti.uec_c =
3030
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3031
 
 
3032
 
                device_config->vp_config[i].tti.uec_d =
3033
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3034
 
 
3035
 
                device_config->vp_config[i].rti.intr_enable =
3036
 
                                VXGE_HW_TIM_INTR_DEFAULT;
3037
 
 
3038
 
                device_config->vp_config[i].rti.btimer_val =
3039
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3040
 
 
3041
 
                device_config->vp_config[i].rti.timer_ac_en =
3042
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3043
 
 
3044
 
                device_config->vp_config[i].rti.timer_ci_en =
3045
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3046
 
 
3047
 
                device_config->vp_config[i].rti.timer_ri_en =
3048
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3049
 
 
3050
 
                device_config->vp_config[i].rti.rtimer_val =
3051
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3052
 
 
3053
 
                device_config->vp_config[i].rti.util_sel =
3054
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3055
 
 
3056
 
                device_config->vp_config[i].rti.ltimer_val =
3057
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3058
 
 
3059
 
                device_config->vp_config[i].rti.urange_a =
3060
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3061
 
 
3062
 
                device_config->vp_config[i].rti.uec_a =
3063
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3064
 
 
3065
 
                device_config->vp_config[i].rti.urange_b =
3066
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3067
 
 
3068
 
                device_config->vp_config[i].rti.uec_b =
3069
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3070
 
 
3071
 
                device_config->vp_config[i].rti.urange_c =
3072
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3073
 
 
3074
 
                device_config->vp_config[i].rti.uec_c =
3075
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3076
 
 
3077
 
                device_config->vp_config[i].rti.uec_d =
3078
 
                                VXGE_HW_USE_FLASH_DEFAULT;
3079
 
 
3080
 
                device_config->vp_config[i].mtu =
3081
 
                                VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
3082
 
 
3083
 
                device_config->vp_config[i].rpa_strip_vlan_tag =
3084
 
                        VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
3085
 
        }
3086
 
 
3087
 
        return VXGE_HW_OK;
3088
 
}
3089
 
 
3090
 
/*
3091
 
 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
3092
 
 * Set the swapper bits appropriately for the vpath.
3093
 
 */
3094
 
static enum vxge_hw_status
3095
 
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
3096
 
{
3097
 
#ifndef __BIG_ENDIAN
3098
 
        u64 val64;
3099
 
 
3100
 
        val64 = readq(&vpath_reg->vpath_general_cfg1);
3101
 
        wmb();
3102
 
        val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
3103
 
        writeq(val64, &vpath_reg->vpath_general_cfg1);
3104
 
        wmb();
3105
 
#endif
3106
 
        return VXGE_HW_OK;
3107
 
}
3108
 
 
3109
 
/*
3110
 
 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
3111
 
 * Set the swapper bits appropriately for the vpath.
3112
 
 */
3113
 
static enum vxge_hw_status
3114
 
__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
3115
 
                           struct vxge_hw_vpath_reg __iomem *vpath_reg)
3116
 
{
3117
 
        u64 val64;
3118
 
 
3119
 
        val64 = readq(&legacy_reg->pifm_wr_swap_en);
3120
 
 
3121
 
        if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
3122
 
                val64 = readq(&vpath_reg->kdfcctl_cfg0);
3123
 
                wmb();
3124
 
 
3125
 
                val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
3126
 
                        VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1  |
3127
 
                        VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
3128
 
 
3129
 
                writeq(val64, &vpath_reg->kdfcctl_cfg0);
3130
 
                wmb();
3131
 
        }
3132
 
 
3133
 
        return VXGE_HW_OK;
3134
 
}
3135
 
 
3136
 
/*
3137
 
 * vxge_hw_mgmt_reg_read - Read Titan register.
3138
 
 */
3139
 
enum vxge_hw_status
3140
 
vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
3141
 
                      enum vxge_hw_mgmt_reg_type type,
3142
 
                      u32 index, u32 offset, u64 *value)
3143
 
{
3144
 
        enum vxge_hw_status status = VXGE_HW_OK;
3145
 
 
3146
 
        if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3147
 
                status = VXGE_HW_ERR_INVALID_DEVICE;
3148
 
                goto exit;
3149
 
        }
3150
 
 
3151
 
        switch (type) {
3152
 
        case vxge_hw_mgmt_reg_type_legacy:
3153
 
                if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
3154
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3155
 
                        break;
3156
 
                }
3157
 
                *value = readq((void __iomem *)hldev->legacy_reg + offset);
3158
 
                break;
3159
 
        case vxge_hw_mgmt_reg_type_toc:
3160
 
                if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
3161
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3162
 
                        break;
3163
 
                }
3164
 
                *value = readq((void __iomem *)hldev->toc_reg + offset);
3165
 
                break;
3166
 
        case vxge_hw_mgmt_reg_type_common:
3167
 
                if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
3168
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3169
 
                        break;
3170
 
                }
3171
 
                *value = readq((void __iomem *)hldev->common_reg + offset);
3172
 
                break;
3173
 
        case vxge_hw_mgmt_reg_type_mrpcim:
3174
 
                if (!(hldev->access_rights &
3175
 
                        VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
3176
 
                        status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3177
 
                        break;
3178
 
                }
3179
 
                if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
3180
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3181
 
                        break;
3182
 
                }
3183
 
                *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
3184
 
                break;
3185
 
        case vxge_hw_mgmt_reg_type_srpcim:
3186
 
                if (!(hldev->access_rights &
3187
 
                        VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
3188
 
                        status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3189
 
                        break;
3190
 
                }
3191
 
                if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3192
 
                        status = VXGE_HW_ERR_INVALID_INDEX;
3193
 
                        break;
3194
 
                }
3195
 
                if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
3196
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3197
 
                        break;
3198
 
                }
3199
 
                *value = readq((void __iomem *)hldev->srpcim_reg[index] +
3200
 
                                offset);
3201
 
                break;
3202
 
        case vxge_hw_mgmt_reg_type_vpmgmt:
3203
 
                if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3204
 
                        (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3205
 
                        status = VXGE_HW_ERR_INVALID_INDEX;
3206
 
                        break;
3207
 
                }
3208
 
                if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
3209
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3210
 
                        break;
3211
 
                }
3212
 
                *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
3213
 
                                offset);
3214
 
                break;
3215
 
        case vxge_hw_mgmt_reg_type_vpath:
3216
 
                if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
3217
 
                        (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3218
 
                        status = VXGE_HW_ERR_INVALID_INDEX;
3219
 
                        break;
3220
 
                }
3221
 
                if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
3222
 
                        status = VXGE_HW_ERR_INVALID_INDEX;
3223
 
                        break;
3224
 
                }
3225
 
                if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
3226
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3227
 
                        break;
3228
 
                }
3229
 
                *value = readq((void __iomem *)hldev->vpath_reg[index] +
3230
 
                                offset);
3231
 
                break;
3232
 
        default:
3233
 
                status = VXGE_HW_ERR_INVALID_TYPE;
3234
 
                break;
3235
 
        }
3236
 
 
3237
 
exit:
3238
 
        return status;
3239
 
}
3240
 
 
3241
 
/*
3242
 
 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
3243
 
 */
3244
 
enum vxge_hw_status
3245
 
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3246
 
{
3247
 
        struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
3248
 
        enum vxge_hw_status status = VXGE_HW_OK;
3249
 
        int i = 0, j = 0;
3250
 
 
3251
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3252
 
                if (!((vpath_mask) & vxge_mBIT(i)))
3253
 
                        continue;
3254
 
                vpmgmt_reg = hldev->vpmgmt_reg[i];
3255
 
                for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
3256
 
                        if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
3257
 
                        & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
3258
 
                                return VXGE_HW_FAIL;
3259
 
                }
3260
 
        }
3261
 
        return status;
3262
 
}
3263
 
/*
3264
 
 * vxge_hw_mgmt_reg_Write - Write Titan register.
3265
 
 */
3266
 
enum vxge_hw_status
3267
 
vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
3268
 
                      enum vxge_hw_mgmt_reg_type type,
3269
 
                      u32 index, u32 offset, u64 value)
3270
 
{
3271
 
        enum vxge_hw_status status = VXGE_HW_OK;
3272
 
 
3273
 
        if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3274
 
                status = VXGE_HW_ERR_INVALID_DEVICE;
3275
 
                goto exit;
3276
 
        }
3277
 
 
3278
 
        switch (type) {
3279
 
        case vxge_hw_mgmt_reg_type_legacy:
3280
 
                if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
3281
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3282
 
                        break;
3283
 
                }
3284
 
                writeq(value, (void __iomem *)hldev->legacy_reg + offset);
3285
 
                break;
3286
 
        case vxge_hw_mgmt_reg_type_toc:
3287
 
                if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
3288
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3289
 
                        break;
3290
 
                }
3291
 
                writeq(value, (void __iomem *)hldev->toc_reg + offset);
3292
 
                break;
3293
 
        case vxge_hw_mgmt_reg_type_common:
3294
 
                if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
3295
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3296
 
                        break;
3297
 
                }
3298
 
                writeq(value, (void __iomem *)hldev->common_reg + offset);
3299
 
                break;
3300
 
        case vxge_hw_mgmt_reg_type_mrpcim:
3301
 
                if (!(hldev->access_rights &
3302
 
                        VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
3303
 
                        status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3304
 
                        break;
3305
 
                }
3306
 
                if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
3307
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3308
 
                        break;
3309
 
                }
3310
 
                writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
3311
 
                break;
3312
 
        case vxge_hw_mgmt_reg_type_srpcim:
3313
 
                if (!(hldev->access_rights &
3314
 
                        VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
3315
 
                        status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3316
 
                        break;
3317
 
                }
3318
 
                if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3319
 
                        status = VXGE_HW_ERR_INVALID_INDEX;
3320
 
                        break;
3321
 
                }
3322
 
                if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
3323
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3324
 
                        break;
3325
 
                }
3326
 
                writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
3327
 
                        offset);
3328
 
 
3329
 
                break;
3330
 
        case vxge_hw_mgmt_reg_type_vpmgmt:
3331
 
                if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3332
 
                        (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3333
 
                        status = VXGE_HW_ERR_INVALID_INDEX;
3334
 
                        break;
3335
 
                }
3336
 
                if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
3337
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3338
 
                        break;
3339
 
                }
3340
 
                writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
3341
 
                        offset);
3342
 
                break;
3343
 
        case vxge_hw_mgmt_reg_type_vpath:
3344
 
                if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
3345
 
                        (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3346
 
                        status = VXGE_HW_ERR_INVALID_INDEX;
3347
 
                        break;
3348
 
                }
3349
 
                if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
3350
 
                        status = VXGE_HW_ERR_INVALID_OFFSET;
3351
 
                        break;
3352
 
                }
3353
 
                writeq(value, (void __iomem *)hldev->vpath_reg[index] +
3354
 
                        offset);
3355
 
                break;
3356
 
        default:
3357
 
                status = VXGE_HW_ERR_INVALID_TYPE;
3358
 
                break;
3359
 
        }
3360
 
exit:
3361
 
        return status;
3362
 
}
3363
 
 
3364
 
/*
3365
 
 * __vxge_hw_fifo_abort - Returns the TxD
3366
 
 * This function terminates the TxDs of fifo
3367
 
 */
3368
 
static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
3369
 
{
3370
 
        void *txdlh;
3371
 
 
3372
 
        for (;;) {
3373
 
                vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
3374
 
 
3375
 
                if (txdlh == NULL)
3376
 
                        break;
3377
 
 
3378
 
                vxge_hw_channel_dtr_complete(&fifo->channel);
3379
 
 
3380
 
                if (fifo->txdl_term) {
3381
 
                        fifo->txdl_term(txdlh,
3382
 
                        VXGE_HW_TXDL_STATE_POSTED,
3383
 
                        fifo->channel.userdata);
3384
 
                }
3385
 
 
3386
 
                vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
3387
 
        }
3388
 
 
3389
 
        return VXGE_HW_OK;
3390
 
}
3391
 
 
3392
 
/*
3393
 
 * __vxge_hw_fifo_reset - Resets the fifo
3394
 
 * This function resets the fifo during vpath reset operation
3395
 
 */
3396
 
static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
3397
 
{
3398
 
        enum vxge_hw_status status = VXGE_HW_OK;
3399
 
 
3400
 
        __vxge_hw_fifo_abort(fifo);
3401
 
        status = __vxge_hw_channel_reset(&fifo->channel);
3402
 
 
3403
 
        return status;
3404
 
}
3405
 
 
3406
 
/*
3407
 
 * __vxge_hw_fifo_delete - Removes the FIFO
3408
 
 * This function freeup the memory pool and removes the FIFO
3409
 
 */
3410
 
static enum vxge_hw_status
3411
 
__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
3412
 
{
3413
 
        struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
3414
 
 
3415
 
        __vxge_hw_fifo_abort(fifo);
3416
 
 
3417
 
        if (fifo->mempool)
3418
 
                __vxge_hw_mempool_destroy(fifo->mempool);
3419
 
 
3420
 
        vp->vpath->fifoh = NULL;
3421
 
 
3422
 
        __vxge_hw_channel_free(&fifo->channel);
3423
 
 
3424
 
        return VXGE_HW_OK;
3425
 
}
3426
 
 
3427
 
/*
3428
 
 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
3429
 
 * list callback
3430
 
 * This function is callback passed to __vxge_hw_mempool_create to create memory
3431
 
 * pool for TxD list
3432
 
 */
3433
 
static void
3434
 
__vxge_hw_fifo_mempool_item_alloc(
3435
 
        struct vxge_hw_mempool *mempoolh,
3436
 
        u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
3437
 
        u32 index, u32 is_last)
3438
 
{
3439
 
        u32 memblock_item_idx;
3440
 
        struct __vxge_hw_fifo_txdl_priv *txdl_priv;
3441
 
        struct vxge_hw_fifo_txd *txdp =
3442
 
                (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
3443
 
        struct __vxge_hw_fifo *fifo =
3444
 
                        (struct __vxge_hw_fifo *)mempoolh->userdata;
3445
 
        void *memblock = mempoolh->memblocks_arr[memblock_index];
3446
 
 
3447
 
        vxge_assert(txdp);
3448
 
 
3449
 
        txdp->host_control = (u64) (size_t)
3450
 
        __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
3451
 
                                        &memblock_item_idx);
3452
 
 
3453
 
        txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
3454
 
 
3455
 
        vxge_assert(txdl_priv);
3456
 
 
3457
 
        fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
3458
 
 
3459
 
        /* pre-format HW's TxDL's private */
3460
 
        txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
3461
 
        txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
3462
 
        txdl_priv->dma_handle = dma_object->handle;
3463
 
        txdl_priv->memblock   = memblock;
3464
 
        txdl_priv->first_txdp = txdp;
3465
 
        txdl_priv->next_txdl_priv = NULL;
3466
 
        txdl_priv->alloc_frags = 0;
3467
 
}
3468
 
 
3469
 
/*
3470
 
 * __vxge_hw_fifo_create - Create a FIFO
3471
 
 * This function creates FIFO and initializes it.
3472
 
 */
3473
 
static enum vxge_hw_status
3474
 
__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
3475
 
                      struct vxge_hw_fifo_attr *attr)
3476
 
{
3477
 
        enum vxge_hw_status status = VXGE_HW_OK;
3478
 
        struct __vxge_hw_fifo *fifo;
3479
 
        struct vxge_hw_fifo_config *config;
3480
 
        u32 txdl_size, txdl_per_memblock;
3481
 
        struct vxge_hw_mempool_cbs fifo_mp_callback;
3482
 
        struct __vxge_hw_virtualpath *vpath;
3483
 
 
3484
 
        if ((vp == NULL) || (attr == NULL)) {
3485
 
                status = VXGE_HW_ERR_INVALID_HANDLE;
3486
 
                goto exit;
3487
 
        }
3488
 
        vpath = vp->vpath;
3489
 
        config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
3490
 
 
3491
 
        txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
3492
 
 
3493
 
        txdl_per_memblock = config->memblock_size / txdl_size;
3494
 
 
3495
 
        fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
3496
 
                                        VXGE_HW_CHANNEL_TYPE_FIFO,
3497
 
                                        config->fifo_blocks * txdl_per_memblock,
3498
 
                                        attr->per_txdl_space, attr->userdata);
3499
 
 
3500
 
        if (fifo == NULL) {
3501
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
3502
 
                goto exit;
3503
 
        }
3504
 
 
3505
 
        vpath->fifoh = fifo;
3506
 
        fifo->nofl_db = vpath->nofl_db;
3507
 
 
3508
 
        fifo->vp_id = vpath->vp_id;
3509
 
        fifo->vp_reg = vpath->vp_reg;
3510
 
        fifo->stats = &vpath->sw_stats->fifo_stats;
3511
 
 
3512
 
        fifo->config = config;
3513
 
 
3514
 
        /* apply "interrupts per txdl" attribute */
3515
 
        fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
3516
 
        fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
3517
 
        fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
3518
 
 
3519
 
        if (fifo->config->intr)
3520
 
                fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
3521
 
 
3522
 
        fifo->no_snoop_bits = config->no_snoop_bits;
3523
 
 
3524
 
        /*
3525
 
         * FIFO memory management strategy:
3526
 
         *
3527
 
         * TxDL split into three independent parts:
3528
 
         *      - set of TxD's
3529
 
         *      - TxD HW private part
3530
 
         *      - driver private part
3531
 
         *
3532
 
         * Adaptative memory allocation used. i.e. Memory allocated on
3533
 
         * demand with the size which will fit into one memory block.
3534
 
         * One memory block may contain more than one TxDL.
3535
 
         *
3536
 
         * During "reserve" operations more memory can be allocated on demand
3537
 
         * for example due to FIFO full condition.
3538
 
         *
3539
 
         * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
3540
 
         * routine which will essentially stop the channel and free resources.
3541
 
         */
3542
 
 
3543
 
        /* TxDL common private size == TxDL private  +  driver private */
3544
 
        fifo->priv_size =
3545
 
                sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
3546
 
        fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
3547
 
                        VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
3548
 
 
3549
 
        fifo->per_txdl_space = attr->per_txdl_space;
3550
 
 
3551
 
        /* recompute txdl size to be cacheline aligned */
3552
 
        fifo->txdl_size = txdl_size;
3553
 
        fifo->txdl_per_memblock = txdl_per_memblock;
3554
 
 
3555
 
        fifo->txdl_term = attr->txdl_term;
3556
 
        fifo->callback = attr->callback;
3557
 
 
3558
 
        if (fifo->txdl_per_memblock == 0) {
3559
 
                __vxge_hw_fifo_delete(vp);
3560
 
                status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
3561
 
                goto exit;
3562
 
        }
3563
 
 
3564
 
        fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
3565
 
 
3566
 
        fifo->mempool =
3567
 
                __vxge_hw_mempool_create(vpath->hldev,
3568
 
                        fifo->config->memblock_size,
3569
 
                        fifo->txdl_size,
3570
 
                        fifo->priv_size,
3571
 
                        (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3572
 
                        (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3573
 
                        &fifo_mp_callback,
3574
 
                        fifo);
3575
 
 
3576
 
        if (fifo->mempool == NULL) {
3577
 
                __vxge_hw_fifo_delete(vp);
3578
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
3579
 
                goto exit;
3580
 
        }
3581
 
 
3582
 
        status = __vxge_hw_channel_initialize(&fifo->channel);
3583
 
        if (status != VXGE_HW_OK) {
3584
 
                __vxge_hw_fifo_delete(vp);
3585
 
                goto exit;
3586
 
        }
3587
 
 
3588
 
        vxge_assert(fifo->channel.reserve_ptr);
3589
 
exit:
3590
 
        return status;
3591
 
}
3592
 
 
3593
 
/*
3594
 
 * __vxge_hw_vpath_pci_read - Read the content of given address
3595
 
 *                          in pci config space.
3596
 
 * Read from the vpath pci config space.
3597
 
 */
3598
 
static enum vxge_hw_status
3599
 
__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
3600
 
                         u32 phy_func_0, u32 offset, u32 *val)
3601
 
{
3602
 
        u64 val64;
3603
 
        enum vxge_hw_status status = VXGE_HW_OK;
3604
 
        struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
3605
 
 
3606
 
        val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
3607
 
 
3608
 
        if (phy_func_0)
3609
 
                val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
3610
 
 
3611
 
        writeq(val64, &vp_reg->pci_config_access_cfg1);
3612
 
        wmb();
3613
 
        writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
3614
 
                        &vp_reg->pci_config_access_cfg2);
3615
 
        wmb();
3616
 
 
3617
 
        status = __vxge_hw_device_register_poll(
3618
 
                        &vp_reg->pci_config_access_cfg2,
3619
 
                        VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3620
 
 
3621
 
        if (status != VXGE_HW_OK)
3622
 
                goto exit;
3623
 
 
3624
 
        val64 = readq(&vp_reg->pci_config_access_status);
3625
 
 
3626
 
        if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
3627
 
                status = VXGE_HW_FAIL;
3628
 
                *val = 0;
3629
 
        } else
3630
 
                *val = (u32)vxge_bVALn(val64, 32, 32);
3631
 
exit:
3632
 
        return status;
3633
 
}
3634
 
 
3635
 
/**
3636
 
 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3637
 
 * @hldev: HW device.
3638
 
 * @on_off: TRUE if flickering to be on, FALSE to be off
3639
 
 *
3640
 
 * Flicker the link LED.
3641
 
 */
3642
 
enum vxge_hw_status
3643
 
vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
3644
 
{
3645
 
        struct __vxge_hw_virtualpath *vpath;
3646
 
        u64 data0, data1 = 0, steer_ctrl = 0;
3647
 
        enum vxge_hw_status status;
3648
 
 
3649
 
        if (hldev == NULL) {
3650
 
                status = VXGE_HW_ERR_INVALID_DEVICE;
3651
 
                goto exit;
3652
 
        }
3653
 
 
3654
 
        vpath = &hldev->virtual_paths[hldev->first_vp_id];
3655
 
 
3656
 
        data0 = on_off;
3657
 
        status = vxge_hw_vpath_fw_api(vpath,
3658
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
3659
 
                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
3660
 
                        0, &data0, &data1, &steer_ctrl);
3661
 
exit:
3662
 
        return status;
3663
 
}
3664
 
 
3665
 
/*
3666
 
 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3667
 
 */
3668
 
enum vxge_hw_status
3669
 
__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3670
 
                              u32 action, u32 rts_table, u32 offset,
3671
 
                              u64 *data0, u64 *data1)
3672
 
{
3673
 
        enum vxge_hw_status status;
3674
 
        u64 steer_ctrl = 0;
3675
 
 
3676
 
        if (vp == NULL) {
3677
 
                status = VXGE_HW_ERR_INVALID_HANDLE;
3678
 
                goto exit;
3679
 
        }
3680
 
 
3681
 
        if ((rts_table ==
3682
 
             VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3683
 
            (rts_table ==
3684
 
             VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3685
 
            (rts_table ==
3686
 
             VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3687
 
            (rts_table ==
3688
 
             VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3689
 
                steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3690
 
        }
3691
 
 
3692
 
        status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3693
 
                                      data0, data1, &steer_ctrl);
3694
 
        if (status != VXGE_HW_OK)
3695
 
                goto exit;
3696
 
 
3697
 
        if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
3698
 
            (rts_table !=
3699
 
             VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3700
 
                *data1 = 0;
3701
 
exit:
3702
 
        return status;
3703
 
}
3704
 
 
3705
 
/*
3706
 
 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3707
 
 */
3708
 
enum vxge_hw_status
3709
 
__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
3710
 
                              u32 rts_table, u32 offset, u64 steer_data0,
3711
 
                              u64 steer_data1)
3712
 
{
3713
 
        u64 data0, data1 = 0, steer_ctrl = 0;
3714
 
        enum vxge_hw_status status;
3715
 
 
3716
 
        if (vp == NULL) {
3717
 
                status = VXGE_HW_ERR_INVALID_HANDLE;
3718
 
                goto exit;
3719
 
        }
3720
 
 
3721
 
        data0 = steer_data0;
3722
 
 
3723
 
        if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3724
 
            (rts_table ==
3725
 
             VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3726
 
                data1 = steer_data1;
3727
 
 
3728
 
        status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3729
 
                                      &data0, &data1, &steer_ctrl);
3730
 
exit:
3731
 
        return status;
3732
 
}
3733
 
 
3734
 
/*
3735
 
 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3736
 
 */
3737
 
enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3738
 
                        struct __vxge_hw_vpath_handle *vp,
3739
 
                        enum vxge_hw_rth_algoritms algorithm,
3740
 
                        struct vxge_hw_rth_hash_types *hash_type,
3741
 
                        u16 bucket_size)
3742
 
{
3743
 
        u64 data0, data1;
3744
 
        enum vxge_hw_status status = VXGE_HW_OK;
3745
 
 
3746
 
        if (vp == NULL) {
3747
 
                status = VXGE_HW_ERR_INVALID_HANDLE;
3748
 
                goto exit;
3749
 
        }
3750
 
 
3751
 
        status = __vxge_hw_vpath_rts_table_get(vp,
3752
 
                     VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3753
 
                     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3754
 
                        0, &data0, &data1);
3755
 
        if (status != VXGE_HW_OK)
3756
 
                goto exit;
3757
 
 
3758
 
        data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3759
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3760
 
 
3761
 
        data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3762
 
        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3763
 
        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3764
 
 
3765
 
        if (hash_type->hash_type_tcpipv4_en)
3766
 
                data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3767
 
 
3768
 
        if (hash_type->hash_type_ipv4_en)
3769
 
                data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3770
 
 
3771
 
        if (hash_type->hash_type_tcpipv6_en)
3772
 
                data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3773
 
 
3774
 
        if (hash_type->hash_type_ipv6_en)
3775
 
                data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3776
 
 
3777
 
        if (hash_type->hash_type_tcpipv6ex_en)
3778
 
                data0 |=
3779
 
                VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3780
 
 
3781
 
        if (hash_type->hash_type_ipv6ex_en)
3782
 
                data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3783
 
 
3784
 
        if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3785
 
                data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3786
 
        else
3787
 
                data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3788
 
 
3789
 
        status = __vxge_hw_vpath_rts_table_set(vp,
3790
 
                VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3791
 
                VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3792
 
                0, data0, 0);
3793
 
exit:
3794
 
        return status;
3795
 
}
3796
 
 
3797
 
static void
3798
 
vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3799
 
                                u16 flag, u8 *itable)
3800
 
{
3801
 
        switch (flag) {
3802
 
        case 1:
3803
 
                *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3804
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3805
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3806
 
                        itable[j]);
3807
 
        case 2:
3808
 
                *data0 |=
3809
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3810
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3811
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3812
 
                        itable[j]);
3813
 
        case 3:
3814
 
                *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3815
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3816
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3817
 
                        itable[j]);
3818
 
        case 4:
3819
 
                *data1 |=
3820
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3821
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3822
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3823
 
                        itable[j]);
3824
 
        default:
3825
 
                return;
3826
 
        }
3827
 
}
3828
 
/*
3829
 
 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3830
 
 */
3831
 
enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3832
 
                        struct __vxge_hw_vpath_handle **vpath_handles,
3833
 
                        u32 vpath_count,
3834
 
                        u8 *mtable,
3835
 
                        u8 *itable,
3836
 
                        u32 itable_size)
3837
 
{
3838
 
        u32 i, j, action, rts_table;
3839
 
        u64 data0;
3840
 
        u64 data1;
3841
 
        u32 max_entries;
3842
 
        enum vxge_hw_status status = VXGE_HW_OK;
3843
 
        struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3844
 
 
3845
 
        if (vp == NULL) {
3846
 
                status = VXGE_HW_ERR_INVALID_HANDLE;
3847
 
                goto exit;
3848
 
        }
3849
 
 
3850
 
        max_entries = (((u32)1) << itable_size);
3851
 
 
3852
 
        if (vp->vpath->hldev->config.rth_it_type
3853
 
                                == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3854
 
                action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3855
 
                rts_table =
3856
 
                        VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3857
 
 
3858
 
                for (j = 0; j < max_entries; j++) {
3859
 
 
3860
 
                        data1 = 0;
3861
 
 
3862
 
                        data0 =
3863
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3864
 
                                itable[j]);
3865
 
 
3866
 
                        status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3867
 
                                action, rts_table, j, data0, data1);
3868
 
 
3869
 
                        if (status != VXGE_HW_OK)
3870
 
                                goto exit;
3871
 
                }
3872
 
 
3873
 
                for (j = 0; j < max_entries; j++) {
3874
 
 
3875
 
                        data1 = 0;
3876
 
 
3877
 
                        data0 =
3878
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3879
 
                        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3880
 
                                itable[j]);
3881
 
 
3882
 
                        status = __vxge_hw_vpath_rts_table_set(
3883
 
                                vpath_handles[mtable[itable[j]]], action,
3884
 
                                rts_table, j, data0, data1);
3885
 
 
3886
 
                        if (status != VXGE_HW_OK)
3887
 
                                goto exit;
3888
 
                }
3889
 
        } else {
3890
 
                action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3891
 
                rts_table =
3892
 
                        VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3893
 
                for (i = 0; i < vpath_count; i++) {
3894
 
 
3895
 
                        for (j = 0; j < max_entries;) {
3896
 
 
3897
 
                                data0 = 0;
3898
 
                                data1 = 0;
3899
 
 
3900
 
                                while (j < max_entries) {
3901
 
                                        if (mtable[itable[j]] != i) {
3902
 
                                                j++;
3903
 
                                                continue;
3904
 
                                        }
3905
 
                                        vxge_hw_rts_rth_data0_data1_get(j,
3906
 
                                                &data0, &data1, 1, itable);
3907
 
                                        j++;
3908
 
                                        break;
3909
 
                                }
3910
 
 
3911
 
                                while (j < max_entries) {
3912
 
                                        if (mtable[itable[j]] != i) {
3913
 
                                                j++;
3914
 
                                                continue;
3915
 
                                        }
3916
 
                                        vxge_hw_rts_rth_data0_data1_get(j,
3917
 
                                                &data0, &data1, 2, itable);
3918
 
                                        j++;
3919
 
                                        break;
3920
 
                                }
3921
 
 
3922
 
                                while (j < max_entries) {
3923
 
                                        if (mtable[itable[j]] != i) {
3924
 
                                                j++;
3925
 
                                                continue;
3926
 
                                        }
3927
 
                                        vxge_hw_rts_rth_data0_data1_get(j,
3928
 
                                                &data0, &data1, 3, itable);
3929
 
                                        j++;
3930
 
                                        break;
3931
 
                                }
3932
 
 
3933
 
                                while (j < max_entries) {
3934
 
                                        if (mtable[itable[j]] != i) {
3935
 
                                                j++;
3936
 
                                                continue;
3937
 
                                        }
3938
 
                                        vxge_hw_rts_rth_data0_data1_get(j,
3939
 
                                                &data0, &data1, 4, itable);
3940
 
                                        j++;
3941
 
                                        break;
3942
 
                                }
3943
 
 
3944
 
                                if (data0 != 0) {
3945
 
                                        status = __vxge_hw_vpath_rts_table_set(
3946
 
                                                        vpath_handles[i],
3947
 
                                                        action, rts_table,
3948
 
                                                        0, data0, data1);
3949
 
 
3950
 
                                        if (status != VXGE_HW_OK)
3951
 
                                                goto exit;
3952
 
                                }
3953
 
                        }
3954
 
                }
3955
 
        }
3956
 
exit:
3957
 
        return status;
3958
 
}
3959
 
 
3960
 
/**
3961
 
 * vxge_hw_vpath_check_leak - Check for memory leak
3962
 
 * @ringh: Handle to the ring object used for receive
3963
 
 *
3964
 
 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3965
 
 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3966
 
 * Returns: VXGE_HW_FAIL, if leak has occurred.
3967
 
 *
3968
 
 */
3969
 
enum vxge_hw_status
3970
 
vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3971
 
{
3972
 
        enum vxge_hw_status status = VXGE_HW_OK;
3973
 
        u64 rxd_new_count, rxd_spat;
3974
 
 
3975
 
        if (ring == NULL)
3976
 
                return status;
3977
 
 
3978
 
        rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3979
 
        rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3980
 
        rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3981
 
 
3982
 
        if (rxd_new_count >= rxd_spat)
3983
 
                status = VXGE_HW_FAIL;
3984
 
 
3985
 
        return status;
3986
 
}
3987
 
 
3988
 
/*
3989
 
 * __vxge_hw_vpath_mgmt_read
3990
 
 * This routine reads the vpath_mgmt registers
3991
 
 */
3992
 
static enum vxge_hw_status
3993
 
__vxge_hw_vpath_mgmt_read(
3994
 
        struct __vxge_hw_device *hldev,
3995
 
        struct __vxge_hw_virtualpath *vpath)
3996
 
{
3997
 
        u32 i, mtu = 0, max_pyld = 0;
3998
 
        u64 val64;
3999
 
        enum vxge_hw_status status = VXGE_HW_OK;
4000
 
 
4001
 
        for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
4002
 
 
4003
 
                val64 = readq(&vpath->vpmgmt_reg->
4004
 
                                rxmac_cfg0_port_vpmgmt_clone[i]);
4005
 
                max_pyld =
4006
 
                        (u32)
4007
 
                        VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
4008
 
                        (val64);
4009
 
                if (mtu < max_pyld)
4010
 
                        mtu = max_pyld;
4011
 
        }
4012
 
 
4013
 
        vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
4014
 
 
4015
 
        val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
4016
 
 
4017
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4018
 
                if (val64 & vxge_mBIT(i))
4019
 
                        vpath->vsport_number = i;
4020
 
        }
4021
 
 
4022
 
        val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
4023
 
 
4024
 
        if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
4025
 
                VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
4026
 
        else
4027
 
                VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
4028
 
 
4029
 
        return status;
4030
 
}
4031
 
 
4032
 
/*
4033
 
 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
4034
 
 * This routine checks the vpath_rst_in_prog register to see if
4035
 
 * adapter completed the reset process for the vpath
4036
 
 */
4037
 
static enum vxge_hw_status
4038
 
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
4039
 
{
4040
 
        enum vxge_hw_status status;
4041
 
 
4042
 
        status = __vxge_hw_device_register_poll(
4043
 
                        &vpath->hldev->common_reg->vpath_rst_in_prog,
4044
 
                        VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
4045
 
                                1 << (16 - vpath->vp_id)),
4046
 
                        vpath->hldev->config.device_poll_millis);
4047
 
 
4048
 
        return status;
4049
 
}
4050
 
 
4051
 
/*
4052
 
 * __vxge_hw_vpath_reset
4053
 
 * This routine resets the vpath on the device
4054
 
 */
4055
 
static enum vxge_hw_status
4056
 
__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4057
 
{
4058
 
        u64 val64;
4059
 
        enum vxge_hw_status status = VXGE_HW_OK;
4060
 
 
4061
 
        val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
4062
 
 
4063
 
        __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4064
 
                                &hldev->common_reg->cmn_rsthdlr_cfg0);
4065
 
 
4066
 
        return status;
4067
 
}
4068
 
 
4069
 
/*
4070
 
 * __vxge_hw_vpath_sw_reset
4071
 
 * This routine resets the vpath structures
4072
 
 */
4073
 
static enum vxge_hw_status
4074
 
__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4075
 
{
4076
 
        enum vxge_hw_status status = VXGE_HW_OK;
4077
 
        struct __vxge_hw_virtualpath *vpath;
4078
 
 
4079
 
        vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
4080
 
 
4081
 
        if (vpath->ringh) {
4082
 
                status = __vxge_hw_ring_reset(vpath->ringh);
4083
 
                if (status != VXGE_HW_OK)
4084
 
                        goto exit;
4085
 
        }
4086
 
 
4087
 
        if (vpath->fifoh)
4088
 
                status = __vxge_hw_fifo_reset(vpath->fifoh);
4089
 
exit:
4090
 
        return status;
4091
 
}
4092
 
 
4093
 
/*
4094
 
 * __vxge_hw_vpath_prc_configure
4095
 
 * This routine configures the prc registers of virtual path using the config
4096
 
 * passed
4097
 
 */
4098
 
static void
4099
 
__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4100
 
{
4101
 
        u64 val64;
4102
 
        struct __vxge_hw_virtualpath *vpath;
4103
 
        struct vxge_hw_vp_config *vp_config;
4104
 
        struct vxge_hw_vpath_reg __iomem *vp_reg;
4105
 
 
4106
 
        vpath = &hldev->virtual_paths[vp_id];
4107
 
        vp_reg = vpath->vp_reg;
4108
 
        vp_config = vpath->vp_config;
4109
 
 
4110
 
        if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
4111
 
                return;
4112
 
 
4113
 
        val64 = readq(&vp_reg->prc_cfg1);
4114
 
        val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
4115
 
        writeq(val64, &vp_reg->prc_cfg1);
4116
 
 
4117
 
        val64 = readq(&vpath->vp_reg->prc_cfg6);
4118
 
        val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
4119
 
        writeq(val64, &vpath->vp_reg->prc_cfg6);
4120
 
 
4121
 
        val64 = readq(&vp_reg->prc_cfg7);
4122
 
 
4123
 
        if (vpath->vp_config->ring.scatter_mode !=
4124
 
                VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
4125
 
 
4126
 
                val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
4127
 
 
4128
 
                switch (vpath->vp_config->ring.scatter_mode) {
4129
 
                case VXGE_HW_RING_SCATTER_MODE_A:
4130
 
                        val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4131
 
                                        VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
4132
 
                        break;
4133
 
                case VXGE_HW_RING_SCATTER_MODE_B:
4134
 
                        val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4135
 
                                        VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
4136
 
                        break;
4137
 
                case VXGE_HW_RING_SCATTER_MODE_C:
4138
 
                        val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4139
 
                                        VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
4140
 
                        break;
4141
 
                }
4142
 
        }
4143
 
 
4144
 
        writeq(val64, &vp_reg->prc_cfg7);
4145
 
 
4146
 
        writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
4147
 
                                __vxge_hw_ring_first_block_address_get(
4148
 
                                        vpath->ringh) >> 3), &vp_reg->prc_cfg5);
4149
 
 
4150
 
        val64 = readq(&vp_reg->prc_cfg4);
4151
 
        val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
4152
 
        val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
4153
 
 
4154
 
        val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
4155
 
                        VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
4156
 
 
4157
 
        if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
4158
 
                val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
4159
 
        else
4160
 
                val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
4161
 
 
4162
 
        writeq(val64, &vp_reg->prc_cfg4);
4163
 
}
4164
 
 
4165
 
/*
4166
 
 * __vxge_hw_vpath_kdfc_configure
4167
 
 * This routine configures the kdfc registers of virtual path using the
4168
 
 * config passed
4169
 
 */
4170
 
static enum vxge_hw_status
4171
 
__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4172
 
{
4173
 
        u64 val64;
4174
 
        u64 vpath_stride;
4175
 
        enum vxge_hw_status status = VXGE_HW_OK;
4176
 
        struct __vxge_hw_virtualpath *vpath;
4177
 
        struct vxge_hw_vpath_reg __iomem *vp_reg;
4178
 
 
4179
 
        vpath = &hldev->virtual_paths[vp_id];
4180
 
        vp_reg = vpath->vp_reg;
4181
 
        status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
4182
 
 
4183
 
        if (status != VXGE_HW_OK)
4184
 
                goto exit;
4185
 
 
4186
 
        val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
4187
 
 
4188
 
        vpath->max_kdfc_db =
4189
 
                (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
4190
 
                        val64+1)/2;
4191
 
 
4192
 
        if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4193
 
 
4194
 
                vpath->max_nofl_db = vpath->max_kdfc_db;
4195
 
 
4196
 
                if (vpath->max_nofl_db <
4197
 
                        ((vpath->vp_config->fifo.memblock_size /
4198
 
                        (vpath->vp_config->fifo.max_frags *
4199
 
                        sizeof(struct vxge_hw_fifo_txd))) *
4200
 
                        vpath->vp_config->fifo.fifo_blocks)) {
4201
 
 
4202
 
                        return VXGE_HW_BADCFG_FIFO_BLOCKS;
4203
 
                }
4204
 
                val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
4205
 
                                (vpath->max_nofl_db*2)-1);
4206
 
        }
4207
 
 
4208
 
        writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
4209
 
 
4210
 
        writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
4211
 
                &vp_reg->kdfc_fifo_trpl_ctrl);
4212
 
 
4213
 
        val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
4214
 
 
4215
 
        val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
4216
 
                   VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
4217
 
 
4218
 
        val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
4219
 
                 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
4220
 
#ifndef __BIG_ENDIAN
4221
 
                 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
4222
 
#endif
4223
 
                 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
4224
 
 
4225
 
        writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
4226
 
        writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
4227
 
        wmb();
4228
 
        vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
4229
 
 
4230
 
        vpath->nofl_db =
4231
 
                (struct __vxge_hw_non_offload_db_wrapper __iomem *)
4232
 
                (hldev->kdfc + (vp_id *
4233
 
                VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
4234
 
                                        vpath_stride)));
4235
 
exit:
4236
 
        return status;
4237
 
}
4238
 
 
4239
 
/*
4240
 
 * __vxge_hw_vpath_mac_configure
4241
 
 * This routine configures the mac of virtual path using the config passed
4242
 
 */
4243
 
static enum vxge_hw_status
4244
 
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4245
 
{
4246
 
        u64 val64;
4247
 
        enum vxge_hw_status status = VXGE_HW_OK;
4248
 
        struct __vxge_hw_virtualpath *vpath;
4249
 
        struct vxge_hw_vp_config *vp_config;
4250
 
        struct vxge_hw_vpath_reg __iomem *vp_reg;
4251
 
 
4252
 
        vpath = &hldev->virtual_paths[vp_id];
4253
 
        vp_reg = vpath->vp_reg;
4254
 
        vp_config = vpath->vp_config;
4255
 
 
4256
 
        writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
4257
 
                        vpath->vsport_number), &vp_reg->xmac_vsport_choice);
4258
 
 
4259
 
        if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4260
 
 
4261
 
                val64 = readq(&vp_reg->xmac_rpa_vcfg);
4262
 
 
4263
 
                if (vp_config->rpa_strip_vlan_tag !=
4264
 
                        VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
4265
 
                        if (vp_config->rpa_strip_vlan_tag)
4266
 
                                val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
4267
 
                        else
4268
 
                                val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
4269
 
                }
4270
 
 
4271
 
                writeq(val64, &vp_reg->xmac_rpa_vcfg);
4272
 
                val64 = readq(&vp_reg->rxmac_vcfg0);
4273
 
 
4274
 
                if (vp_config->mtu !=
4275
 
                                VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
4276
 
                        val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4277
 
                        if ((vp_config->mtu  +
4278
 
                                VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
4279
 
                                val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
4280
 
                                        vp_config->mtu  +
4281
 
                                        VXGE_HW_MAC_HEADER_MAX_SIZE);
4282
 
                        else
4283
 
                                val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
4284
 
                                        vpath->max_mtu);
4285
 
                }
4286
 
 
4287
 
                writeq(val64, &vp_reg->rxmac_vcfg0);
4288
 
 
4289
 
                val64 = readq(&vp_reg->rxmac_vcfg1);
4290
 
 
4291
 
                val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
4292
 
                        VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
4293
 
 
4294
 
                if (hldev->config.rth_it_type ==
4295
 
                                VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
4296
 
                        val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
4297
 
                                0x2) |
4298
 
                                VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
4299
 
                }
4300
 
 
4301
 
                writeq(val64, &vp_reg->rxmac_vcfg1);
4302
 
        }
4303
 
        return status;
4304
 
}
4305
 
 
4306
 
/*
4307
 
 * __vxge_hw_vpath_tim_configure
4308
 
 * This routine configures the tim registers of virtual path using the config
4309
 
 * passed
4310
 
 */
4311
 
static enum vxge_hw_status
4312
 
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4313
 
{
4314
 
        u64 val64;
4315
 
        enum vxge_hw_status status = VXGE_HW_OK;
4316
 
        struct __vxge_hw_virtualpath *vpath;
4317
 
        struct vxge_hw_vpath_reg __iomem *vp_reg;
4318
 
        struct vxge_hw_vp_config *config;
4319
 
 
4320
 
        vpath = &hldev->virtual_paths[vp_id];
4321
 
        vp_reg = vpath->vp_reg;
4322
 
        config = vpath->vp_config;
4323
 
 
4324
 
        writeq(0, &vp_reg->tim_dest_addr);
4325
 
        writeq(0, &vp_reg->tim_vpath_map);
4326
 
        writeq(0, &vp_reg->tim_bitmap);
4327
 
        writeq(0, &vp_reg->tim_remap);
4328
 
 
4329
 
        if (config->ring.enable == VXGE_HW_RING_ENABLE)
4330
 
                writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
4331
 
                        (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4332
 
                        VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
4333
 
 
4334
 
        val64 = readq(&vp_reg->tim_pci_cfg);
4335
 
        val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
4336
 
        writeq(val64, &vp_reg->tim_pci_cfg);
4337
 
 
4338
 
        if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4339
 
 
4340
 
                val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4341
 
 
4342
 
                if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4343
 
                        val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4344
 
                                0x3ffffff);
4345
 
                        val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4346
 
                                        config->tti.btimer_val);
4347
 
                }
4348
 
 
4349
 
                val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4350
 
 
4351
 
                if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4352
 
                        if (config->tti.timer_ac_en)
4353
 
                                val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4354
 
                        else
4355
 
                                val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4356
 
                }
4357
 
 
4358
 
                if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4359
 
                        if (config->tti.timer_ci_en)
4360
 
                                val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4361
 
                        else
4362
 
                                val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4363
 
                }
4364
 
 
4365
 
                if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4366
 
                        val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4367
 
                        val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4368
 
                                        config->tti.urange_a);
4369
 
                }
4370
 
 
4371
 
                if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4372
 
                        val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4373
 
                        val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4374
 
                                        config->tti.urange_b);
4375
 
                }
4376
 
 
4377
 
                if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4378
 
                        val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4379
 
                        val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4380
 
                                        config->tti.urange_c);
4381
 
                }
4382
 
 
4383
 
                writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4384
 
                vpath->tim_tti_cfg1_saved = val64;
4385
 
 
4386
 
                val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4387
 
 
4388
 
                if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4389
 
                        val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4390
 
                        val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4391
 
                                                config->tti.uec_a);
4392
 
                }
4393
 
 
4394
 
                if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4395
 
                        val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4396
 
                        val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4397
 
                                                config->tti.uec_b);
4398
 
                }
4399
 
 
4400
 
                if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4401
 
                        val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4402
 
                        val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4403
 
                                                config->tti.uec_c);
4404
 
                }
4405
 
 
4406
 
                if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4407
 
                        val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4408
 
                        val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4409
 
                                                config->tti.uec_d);
4410
 
                }
4411
 
 
4412
 
                writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4413
 
                val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4414
 
 
4415
 
                if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4416
 
                        if (config->tti.timer_ri_en)
4417
 
                                val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4418
 
                        else
4419
 
                                val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4420
 
                }
4421
 
 
4422
 
                if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4423
 
                        val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4424
 
                                        0x3ffffff);
4425
 
                        val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4426
 
                                        config->tti.rtimer_val);
4427
 
                }
4428
 
 
4429
 
                if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4430
 
                        val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4431
 
                        val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
4432
 
                }
4433
 
 
4434
 
                if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4435
 
                        val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4436
 
                                        0x3ffffff);
4437
 
                        val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4438
 
                                        config->tti.ltimer_val);
4439
 
                }
4440
 
 
4441
 
                writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4442
 
                vpath->tim_tti_cfg3_saved = val64;
4443
 
        }
4444
 
 
4445
 
        if (config->ring.enable == VXGE_HW_RING_ENABLE) {
4446
 
 
4447
 
                val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4448
 
 
4449
 
                if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4450
 
                        val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4451
 
                                        0x3ffffff);
4452
 
                        val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4453
 
                                        config->rti.btimer_val);
4454
 
                }
4455
 
 
4456
 
                val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4457
 
 
4458
 
                if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4459
 
                        if (config->rti.timer_ac_en)
4460
 
                                val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4461
 
                        else
4462
 
                                val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4463
 
                }
4464
 
 
4465
 
                if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4466
 
                        if (config->rti.timer_ci_en)
4467
 
                                val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4468
 
                        else
4469
 
                                val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4470
 
                }
4471
 
 
4472
 
                if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4473
 
                        val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4474
 
                        val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4475
 
                                        config->rti.urange_a);
4476
 
                }
4477
 
 
4478
 
                if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4479
 
                        val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4480
 
                        val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4481
 
                                        config->rti.urange_b);
4482
 
                }
4483
 
 
4484
 
                if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4485
 
                        val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4486
 
                        val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4487
 
                                        config->rti.urange_c);
4488
 
                }
4489
 
 
4490
 
                writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4491
 
                vpath->tim_rti_cfg1_saved = val64;
4492
 
 
4493
 
                val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4494
 
 
4495
 
                if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4496
 
                        val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4497
 
                        val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4498
 
                                                config->rti.uec_a);
4499
 
                }
4500
 
 
4501
 
                if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4502
 
                        val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4503
 
                        val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4504
 
                                                config->rti.uec_b);
4505
 
                }
4506
 
 
4507
 
                if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4508
 
                        val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4509
 
                        val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4510
 
                                                config->rti.uec_c);
4511
 
                }
4512
 
 
4513
 
                if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4514
 
                        val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4515
 
                        val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4516
 
                                                config->rti.uec_d);
4517
 
                }
4518
 
 
4519
 
                writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4520
 
                val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4521
 
 
4522
 
                if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4523
 
                        if (config->rti.timer_ri_en)
4524
 
                                val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4525
 
                        else
4526
 
                                val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4527
 
                }
4528
 
 
4529
 
                if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4530
 
                        val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4531
 
                                        0x3ffffff);
4532
 
                        val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4533
 
                                        config->rti.rtimer_val);
4534
 
                }
4535
 
 
4536
 
                if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4537
 
                        val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4538
 
                        val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
4539
 
                }
4540
 
 
4541
 
                if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4542
 
                        val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4543
 
                                        0x3ffffff);
4544
 
                        val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4545
 
                                        config->rti.ltimer_val);
4546
 
                }
4547
 
 
4548
 
                writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4549
 
                vpath->tim_rti_cfg3_saved = val64;
4550
 
        }
4551
 
 
4552
 
        val64 = 0;
4553
 
        writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4554
 
        writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4555
 
        writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4556
 
        writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4557
 
        writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4558
 
        writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4559
 
 
4560
 
        val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
4561
 
        val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
4562
 
        val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
4563
 
        writeq(val64, &vp_reg->tim_wrkld_clc);
4564
 
 
4565
 
        return status;
4566
 
}
4567
 
 
4568
 
/*
4569
 
 * __vxge_hw_vpath_initialize
4570
 
 * This routine is the final phase of init which initializes the
4571
 
 * registers of the vpath using the configuration passed.
4572
 
 */
4573
 
static enum vxge_hw_status
4574
 
__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4575
 
{
4576
 
        u64 val64;
4577
 
        u32 val32;
4578
 
        enum vxge_hw_status status = VXGE_HW_OK;
4579
 
        struct __vxge_hw_virtualpath *vpath;
4580
 
        struct vxge_hw_vpath_reg __iomem *vp_reg;
4581
 
 
4582
 
        vpath = &hldev->virtual_paths[vp_id];
4583
 
 
4584
 
        if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4585
 
                status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4586
 
                goto exit;
4587
 
        }
4588
 
        vp_reg = vpath->vp_reg;
4589
 
 
4590
 
        status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4591
 
        if (status != VXGE_HW_OK)
4592
 
                goto exit;
4593
 
 
4594
 
        status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
4595
 
        if (status != VXGE_HW_OK)
4596
 
                goto exit;
4597
 
 
4598
 
        status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4599
 
        if (status != VXGE_HW_OK)
4600
 
                goto exit;
4601
 
 
4602
 
        status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4603
 
        if (status != VXGE_HW_OK)
4604
 
                goto exit;
4605
 
 
4606
 
        val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4607
 
 
4608
 
        /* Get MRRS value from device control */
4609
 
        status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4610
 
        if (status == VXGE_HW_OK) {
4611
 
                val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4612
 
                val64 &=
4613
 
                    ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4614
 
                val64 |=
4615
 
                    VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4616
 
 
4617
 
                val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4618
 
        }
4619
 
 
4620
 
        val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4621
 
        val64 |=
4622
 
            VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4623
 
                    VXGE_HW_MAX_PAYLOAD_SIZE_512);
4624
 
 
4625
 
        val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4626
 
        writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4627
 
 
4628
 
exit:
4629
 
        return status;
4630
 
}
4631
 
 
4632
 
/*
4633
 
 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4634
 
 * This routine closes all channels it opened and freeup memory
4635
 
 */
4636
 
static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4637
 
{
4638
 
        struct __vxge_hw_virtualpath *vpath;
4639
 
 
4640
 
        vpath = &hldev->virtual_paths[vp_id];
4641
 
 
4642
 
        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4643
 
                goto exit;
4644
 
 
4645
 
        VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4646
 
                vpath->hldev->tim_int_mask1, vpath->vp_id);
4647
 
        hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4648
 
 
4649
 
        /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
4650
 
         * work after the interface is brought down.
4651
 
         */
4652
 
        spin_lock(&vpath->lock);
4653
 
        vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4654
 
        spin_unlock(&vpath->lock);
4655
 
 
4656
 
        vpath->vpmgmt_reg = NULL;
4657
 
        vpath->nofl_db = NULL;
4658
 
        vpath->max_mtu = 0;
4659
 
        vpath->vsport_number = 0;
4660
 
        vpath->max_kdfc_db = 0;
4661
 
        vpath->max_nofl_db = 0;
4662
 
        vpath->ringh = NULL;
4663
 
        vpath->fifoh = NULL;
4664
 
        memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
4665
 
        vpath->stats_block = 0;
4666
 
        vpath->hw_stats = NULL;
4667
 
        vpath->hw_stats_sav = NULL;
4668
 
        vpath->sw_stats = NULL;
4669
 
 
4670
 
exit:
4671
 
        return;
4672
 
}
4673
 
 
4674
 
/*
4675
 
 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4676
 
 * This routine is the initial phase of init which resets the vpath and
4677
 
 * initializes the software support structures.
4678
 
 */
4679
 
static enum vxge_hw_status
4680
 
__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4681
 
                        struct vxge_hw_vp_config *config)
4682
 
{
4683
 
        struct __vxge_hw_virtualpath *vpath;
4684
 
        enum vxge_hw_status status = VXGE_HW_OK;
4685
 
 
4686
 
        if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4687
 
                status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4688
 
                goto exit;
4689
 
        }
4690
 
 
4691
 
        vpath = &hldev->virtual_paths[vp_id];
4692
 
 
4693
 
        spin_lock_init(&vpath->lock);
4694
 
        vpath->vp_id = vp_id;
4695
 
        vpath->vp_open = VXGE_HW_VP_OPEN;
4696
 
        vpath->hldev = hldev;
4697
 
        vpath->vp_config = config;
4698
 
        vpath->vp_reg = hldev->vpath_reg[vp_id];
4699
 
        vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4700
 
 
4701
 
        __vxge_hw_vpath_reset(hldev, vp_id);
4702
 
 
4703
 
        status = __vxge_hw_vpath_reset_check(vpath);
4704
 
        if (status != VXGE_HW_OK) {
4705
 
                memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4706
 
                goto exit;
4707
 
        }
4708
 
 
4709
 
        status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4710
 
        if (status != VXGE_HW_OK) {
4711
 
                memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4712
 
                goto exit;
4713
 
        }
4714
 
 
4715
 
        INIT_LIST_HEAD(&vpath->vpath_handles);
4716
 
 
4717
 
        vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4718
 
 
4719
 
        VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4720
 
                hldev->tim_int_mask1, vp_id);
4721
 
 
4722
 
        status = __vxge_hw_vpath_initialize(hldev, vp_id);
4723
 
        if (status != VXGE_HW_OK)
4724
 
                __vxge_hw_vp_terminate(hldev, vp_id);
4725
 
exit:
4726
 
        return status;
4727
 
}
4728
 
 
4729
 
/*
4730
 
 * vxge_hw_vpath_mtu_set - Set MTU.
4731
 
 * Set new MTU value. Example, to use jumbo frames:
4732
 
 * vxge_hw_vpath_mtu_set(my_device, 9600);
4733
 
 */
4734
 
enum vxge_hw_status
4735
 
vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4736
 
{
4737
 
        u64 val64;
4738
 
        enum vxge_hw_status status = VXGE_HW_OK;
4739
 
        struct __vxge_hw_virtualpath *vpath;
4740
 
 
4741
 
        if (vp == NULL) {
4742
 
                status = VXGE_HW_ERR_INVALID_HANDLE;
4743
 
                goto exit;
4744
 
        }
4745
 
        vpath = vp->vpath;
4746
 
 
4747
 
        new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4748
 
 
4749
 
        if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4750
 
                status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4751
 
 
4752
 
        val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4753
 
 
4754
 
        val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4755
 
        val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4756
 
 
4757
 
        writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4758
 
 
4759
 
        vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4760
 
 
4761
 
exit:
4762
 
        return status;
4763
 
}
4764
 
 
4765
 
/*
4766
 
 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4767
 
 * Enable the DMA vpath statistics. The function is to be called to re-enable
4768
 
 * the adapter to update stats into the host memory
4769
 
 */
4770
 
static enum vxge_hw_status
4771
 
vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4772
 
{
4773
 
        enum vxge_hw_status status = VXGE_HW_OK;
4774
 
        struct __vxge_hw_virtualpath *vpath;
4775
 
 
4776
 
        vpath = vp->vpath;
4777
 
 
4778
 
        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4779
 
                status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4780
 
                goto exit;
4781
 
        }
4782
 
 
4783
 
        memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4784
 
                        sizeof(struct vxge_hw_vpath_stats_hw_info));
4785
 
 
4786
 
        status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4787
 
exit:
4788
 
        return status;
4789
 
}
4790
 
 
4791
 
/*
4792
 
 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
4793
 
 * This function allocates a block from block pool or from the system
4794
 
 */
4795
 
static struct __vxge_hw_blockpool_entry *
4796
 
__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
4797
 
{
4798
 
        struct __vxge_hw_blockpool_entry *entry = NULL;
4799
 
        struct __vxge_hw_blockpool  *blockpool;
4800
 
 
4801
 
        blockpool = &devh->block_pool;
4802
 
 
4803
 
        if (size == blockpool->block_size) {
4804
 
 
4805
 
                if (!list_empty(&blockpool->free_block_list))
4806
 
                        entry = (struct __vxge_hw_blockpool_entry *)
4807
 
                                list_first_entry(&blockpool->free_block_list,
4808
 
                                        struct __vxge_hw_blockpool_entry,
4809
 
                                        item);
4810
 
 
4811
 
                if (entry != NULL) {
4812
 
                        list_del(&entry->item);
4813
 
                        blockpool->pool_size--;
4814
 
                }
4815
 
        }
4816
 
 
4817
 
        if (entry != NULL)
4818
 
                __vxge_hw_blockpool_blocks_add(blockpool);
4819
 
 
4820
 
        return entry;
4821
 
}
4822
 
 
4823
 
/*
4824
 
 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4825
 
 * This function is used to open access to virtual path of an
4826
 
 * adapter for offload, GRO operations. This function returns
4827
 
 * synchronously.
4828
 
 */
4829
 
enum vxge_hw_status
4830
 
vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4831
 
                   struct vxge_hw_vpath_attr *attr,
4832
 
                   struct __vxge_hw_vpath_handle **vpath_handle)
4833
 
{
4834
 
        struct __vxge_hw_virtualpath *vpath;
4835
 
        struct __vxge_hw_vpath_handle *vp;
4836
 
        enum vxge_hw_status status;
4837
 
 
4838
 
        vpath = &hldev->virtual_paths[attr->vp_id];
4839
 
 
4840
 
        if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4841
 
                status = VXGE_HW_ERR_INVALID_STATE;
4842
 
                goto vpath_open_exit1;
4843
 
        }
4844
 
 
4845
 
        status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4846
 
                        &hldev->config.vp_config[attr->vp_id]);
4847
 
        if (status != VXGE_HW_OK)
4848
 
                goto vpath_open_exit1;
4849
 
 
4850
 
        vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
4851
 
        if (vp == NULL) {
4852
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
4853
 
                goto vpath_open_exit2;
4854
 
        }
4855
 
 
4856
 
        vp->vpath = vpath;
4857
 
 
4858
 
        if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4859
 
                status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4860
 
                if (status != VXGE_HW_OK)
4861
 
                        goto vpath_open_exit6;
4862
 
        }
4863
 
 
4864
 
        if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4865
 
                status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4866
 
                if (status != VXGE_HW_OK)
4867
 
                        goto vpath_open_exit7;
4868
 
 
4869
 
                __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4870
 
        }
4871
 
 
4872
 
        vpath->fifoh->tx_intr_num =
4873
 
                (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
4874
 
                        VXGE_HW_VPATH_INTR_TX;
4875
 
 
4876
 
        vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4877
 
                                VXGE_HW_BLOCK_SIZE);
4878
 
        if (vpath->stats_block == NULL) {
4879
 
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
4880
 
                goto vpath_open_exit8;
4881
 
        }
4882
 
 
4883
 
        vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4884
 
                        stats_block->memblock;
4885
 
        memset(vpath->hw_stats, 0,
4886
 
                sizeof(struct vxge_hw_vpath_stats_hw_info));
4887
 
 
4888
 
        hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4889
 
                                                vpath->hw_stats;
4890
 
 
4891
 
        vpath->hw_stats_sav =
4892
 
                &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4893
 
        memset(vpath->hw_stats_sav, 0,
4894
 
                        sizeof(struct vxge_hw_vpath_stats_hw_info));
4895
 
 
4896
 
        writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4897
 
 
4898
 
        status = vxge_hw_vpath_stats_enable(vp);
4899
 
        if (status != VXGE_HW_OK)
4900
 
                goto vpath_open_exit8;
4901
 
 
4902
 
        list_add(&vp->item, &vpath->vpath_handles);
4903
 
 
4904
 
        hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4905
 
 
4906
 
        *vpath_handle = vp;
4907
 
 
4908
 
        attr->fifo_attr.userdata = vpath->fifoh;
4909
 
        attr->ring_attr.userdata = vpath->ringh;
4910
 
 
4911
 
        return VXGE_HW_OK;
4912
 
 
4913
 
vpath_open_exit8:
4914
 
        if (vpath->ringh != NULL)
4915
 
                __vxge_hw_ring_delete(vp);
4916
 
vpath_open_exit7:
4917
 
        if (vpath->fifoh != NULL)
4918
 
                __vxge_hw_fifo_delete(vp);
4919
 
vpath_open_exit6:
4920
 
        vfree(vp);
4921
 
vpath_open_exit2:
4922
 
        __vxge_hw_vp_terminate(hldev, attr->vp_id);
4923
 
vpath_open_exit1:
4924
 
 
4925
 
        return status;
4926
 
}
4927
 
 
4928
 
/**
4929
 
 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4930
 
 * (vpath) open
4931
 
 * @vp: Handle got from previous vpath open
4932
 
 *
4933
 
 * This function is used to close access to virtual path opened
4934
 
 * earlier.
4935
 
 */
4936
 
void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4937
 
{
4938
 
        struct __vxge_hw_virtualpath *vpath = vp->vpath;
4939
 
        struct __vxge_hw_ring *ring = vpath->ringh;
4940
 
        struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
4941
 
        u64 new_count, val64, val164;
4942
 
 
4943
 
        if (vdev->titan1) {
4944
 
                new_count = readq(&vpath->vp_reg->rxdmem_size);
4945
 
                new_count &= 0x1fff;
4946
 
        } else
4947
 
                new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
4948
 
 
4949
 
        val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
4950
 
 
4951
 
        writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4952
 
                &vpath->vp_reg->prc_rxd_doorbell);
4953
 
        readl(&vpath->vp_reg->prc_rxd_doorbell);
4954
 
 
4955
 
        val164 /= 2;
4956
 
        val64 = readq(&vpath->vp_reg->prc_cfg6);
4957
 
        val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4958
 
        val64 &= 0x1ff;
4959
 
 
4960
 
        /*
4961
 
         * Each RxD is of 4 qwords
4962
 
         */
4963
 
        new_count -= (val64 + 1);
4964
 
        val64 = min(val164, new_count) / 4;
4965
 
 
4966
 
        ring->rxds_limit = min(ring->rxds_limit, val64);
4967
 
        if (ring->rxds_limit < 4)
4968
 
                ring->rxds_limit = 4;
4969
 
}
4970
 
 
4971
 
/*
4972
 
 * __vxge_hw_blockpool_block_free - Frees a block from block pool
4973
 
 * @devh: Hal device
4974
 
 * @entry: Entry of block to be freed
4975
 
 *
4976
 
 * This function frees a block from block pool
4977
 
 */
4978
 
static void
4979
 
__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
4980
 
                               struct __vxge_hw_blockpool_entry *entry)
4981
 
{
4982
 
        struct __vxge_hw_blockpool  *blockpool;
4983
 
 
4984
 
        blockpool = &devh->block_pool;
4985
 
 
4986
 
        if (entry->length == blockpool->block_size) {
4987
 
                list_add(&entry->item, &blockpool->free_block_list);
4988
 
                blockpool->pool_size++;
4989
 
        }
4990
 
 
4991
 
        __vxge_hw_blockpool_blocks_remove(blockpool);
4992
 
}
4993
 
 
4994
 
/*
4995
 
 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4996
 
 * This function is used to close access to virtual path opened
4997
 
 * earlier.
4998
 
 */
4999
 
enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
5000
 
{
5001
 
        struct __vxge_hw_virtualpath *vpath = NULL;
5002
 
        struct __vxge_hw_device *devh = NULL;
5003
 
        u32 vp_id = vp->vpath->vp_id;
5004
 
        u32 is_empty = TRUE;
5005
 
        enum vxge_hw_status status = VXGE_HW_OK;
5006
 
 
5007
 
        vpath = vp->vpath;
5008
 
        devh = vpath->hldev;
5009
 
 
5010
 
        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5011
 
                status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5012
 
                goto vpath_close_exit;
5013
 
        }
5014
 
 
5015
 
        list_del(&vp->item);
5016
 
 
5017
 
        if (!list_empty(&vpath->vpath_handles)) {
5018
 
                list_add(&vp->item, &vpath->vpath_handles);
5019
 
                is_empty = FALSE;
5020
 
        }
5021
 
 
5022
 
        if (!is_empty) {
5023
 
                status = VXGE_HW_FAIL;
5024
 
                goto vpath_close_exit;
5025
 
        }
5026
 
 
5027
 
        devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
5028
 
 
5029
 
        if (vpath->ringh != NULL)
5030
 
                __vxge_hw_ring_delete(vp);
5031
 
 
5032
 
        if (vpath->fifoh != NULL)
5033
 
                __vxge_hw_fifo_delete(vp);
5034
 
 
5035
 
        if (vpath->stats_block != NULL)
5036
 
                __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
5037
 
 
5038
 
        vfree(vp);
5039
 
 
5040
 
        __vxge_hw_vp_terminate(devh, vp_id);
5041
 
 
5042
 
vpath_close_exit:
5043
 
        return status;
5044
 
}
5045
 
 
5046
 
/*
5047
 
 * vxge_hw_vpath_reset - Resets vpath
5048
 
 * This function is used to request a reset of vpath
5049
 
 */
5050
 
enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
5051
 
{
5052
 
        enum vxge_hw_status status;
5053
 
        u32 vp_id;
5054
 
        struct __vxge_hw_virtualpath *vpath = vp->vpath;
5055
 
 
5056
 
        vp_id = vpath->vp_id;
5057
 
 
5058
 
        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5059
 
                status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5060
 
                goto exit;
5061
 
        }
5062
 
 
5063
 
        status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
5064
 
        if (status == VXGE_HW_OK)
5065
 
                vpath->sw_stats->soft_reset_cnt++;
5066
 
exit:
5067
 
        return status;
5068
 
}
5069
 
 
5070
 
/*
5071
 
 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
5072
 
 * This function poll's for the vpath reset completion and re initializes
5073
 
 * the vpath.
5074
 
 */
5075
 
enum vxge_hw_status
5076
 
vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
5077
 
{
5078
 
        struct __vxge_hw_virtualpath *vpath = NULL;
5079
 
        enum vxge_hw_status status;
5080
 
        struct __vxge_hw_device *hldev;
5081
 
        u32 vp_id;
5082
 
 
5083
 
        vp_id = vp->vpath->vp_id;
5084
 
        vpath = vp->vpath;
5085
 
        hldev = vpath->hldev;
5086
 
 
5087
 
        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5088
 
                status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5089
 
                goto exit;
5090
 
        }
5091
 
 
5092
 
        status = __vxge_hw_vpath_reset_check(vpath);
5093
 
        if (status != VXGE_HW_OK)
5094
 
                goto exit;
5095
 
 
5096
 
        status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
5097
 
        if (status != VXGE_HW_OK)
5098
 
                goto exit;
5099
 
 
5100
 
        status = __vxge_hw_vpath_initialize(hldev, vp_id);
5101
 
        if (status != VXGE_HW_OK)
5102
 
                goto exit;
5103
 
 
5104
 
        if (vpath->ringh != NULL)
5105
 
                __vxge_hw_vpath_prc_configure(hldev, vp_id);
5106
 
 
5107
 
        memset(vpath->hw_stats, 0,
5108
 
                sizeof(struct vxge_hw_vpath_stats_hw_info));
5109
 
 
5110
 
        memset(vpath->hw_stats_sav, 0,
5111
 
                sizeof(struct vxge_hw_vpath_stats_hw_info));
5112
 
 
5113
 
        writeq(vpath->stats_block->dma_addr,
5114
 
                &vpath->vp_reg->stats_cfg);
5115
 
 
5116
 
        status = vxge_hw_vpath_stats_enable(vp);
5117
 
 
5118
 
exit:
5119
 
        return status;
5120
 
}
5121
 
 
5122
 
/*
5123
 
 * vxge_hw_vpath_enable - Enable vpath.
5124
 
 * This routine clears the vpath reset thereby enabling a vpath
5125
 
 * to start forwarding frames and generating interrupts.
5126
 
 */
5127
 
void
5128
 
vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
5129
 
{
5130
 
        struct __vxge_hw_device *hldev;
5131
 
        u64 val64;
5132
 
 
5133
 
        hldev = vp->vpath->hldev;
5134
 
 
5135
 
        val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
5136
 
                1 << (16 - vp->vpath->vp_id));
5137
 
 
5138
 
        __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
5139
 
                &hldev->common_reg->cmn_rsthdlr_cfg1);
5140
 
}