2
* Copyright (C) 2007 Michael Brown <mbrown@fensystems.co.uk>.
4
* Based in part upon the original driver by Mellanox Technologies
5
* Ltd. Portions may be Copyright (c) Mellanox Technologies Ltd.
7
* This program is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU General Public License as
9
* published by the Free Software Foundation; either version 2 of the
10
* License, or any later version.
12
* This program is distributed in the hope that it will be useful, but
13
* WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15
* General Public License for more details.
17
* You should have received a copy of the GNU General Public License
18
* along with this program; if not, write to the Free Software
19
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
* You can also choose to distribute this program under the terms of
23
* the Unmodified Binary Distribution Licence (as given in the file
24
* COPYING.UBDL), provided that you have satisfied its requirements.
27
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
39
#include <ipxe/pcibackup.h>
40
#include <ipxe/malloc.h>
41
#include <ipxe/umalloc.h>
42
#include <ipxe/iobuf.h>
43
#include <ipxe/netdevice.h>
44
#include <ipxe/infiniband.h>
45
#include <ipxe/ib_smc.h>
51
* Mellanox Arbel Infiniband HCA
55
/***************************************************************************
57
* Queue number allocation
59
***************************************************************************
63
* Allocate offset within usage bitmask
65
* @v bits Usage bitmask
66
* @v bits_len Length of usage bitmask
67
* @ret bit First free bit within bitmask, or negative error
69
static int arbel_bitmask_alloc ( arbel_bitmask_t *bits,
70
unsigned int bits_len ) {
72
arbel_bitmask_t mask = 1;
74
while ( bit < bits_len ) {
75
if ( ( mask & *bits ) == 0 ) {
80
mask = ( mask << 1 ) | ( mask >> ( 8 * sizeof ( mask ) - 1 ) );
88
* Free offset within usage bitmask
90
* @v bits Usage bitmask
91
* @v bit Bit within bitmask
93
static void arbel_bitmask_free ( arbel_bitmask_t *bits, int bit ) {
96
mask = ( 1 << ( bit % ( 8 * sizeof ( mask ) ) ) );
97
bits += ( bit / ( 8 * sizeof ( mask ) ) );
101
/***************************************************************************
105
***************************************************************************
109
* Wait for Arbel command completion
111
* @v arbel Arbel device
112
* @ret rc Return status code
114
static int arbel_cmd_wait ( struct arbel *arbel,
115
struct arbelprm_hca_command_register *hcr ) {
118
for ( wait = ARBEL_HCR_MAX_WAIT_MS ; wait ; wait-- ) {
120
readl ( arbel->config + ARBEL_HCR_REG ( 6 ) );
121
if ( MLX_GET ( hcr, go ) == 0 )
131
* @v arbel Arbel device
132
* @v command Command opcode, flags and input/output lengths
133
* @v op_mod Opcode modifier (0 if no modifier applicable)
134
* @v in Input parameters
135
* @v in_mod Input modifier (0 if no modifier applicable)
136
* @v out Output parameters
137
* @ret rc Return status code
139
static int arbel_cmd ( struct arbel *arbel, unsigned long command,
140
unsigned int op_mod, const void *in,
141
unsigned int in_mod, void *out ) {
142
struct arbelprm_hca_command_register hcr;
143
unsigned int opcode = ARBEL_HCR_OPCODE ( command );
144
size_t in_len = ARBEL_HCR_IN_LEN ( command );
145
size_t out_len = ARBEL_HCR_OUT_LEN ( command );
152
assert ( in_len <= ARBEL_MBOX_SIZE );
153
assert ( out_len <= ARBEL_MBOX_SIZE );
155
DBGC2 ( arbel, "Arbel %p command %02x in %zx%s out %zx%s\n",
156
arbel, opcode, in_len,
157
( ( command & ARBEL_HCR_IN_MBOX ) ? "(mbox)" : "" ), out_len,
158
( ( command & ARBEL_HCR_OUT_MBOX ) ? "(mbox)" : "" ) );
160
/* Check that HCR is free */
161
if ( ( rc = arbel_cmd_wait ( arbel, &hcr ) ) != 0 ) {
162
DBGC ( arbel, "Arbel %p command interface locked\n", arbel );
167
memset ( &hcr, 0, sizeof ( hcr ) );
168
in_buffer = &hcr.u.dwords[0];
169
if ( in_len && ( command & ARBEL_HCR_IN_MBOX ) ) {
170
in_buffer = arbel->mailbox_in;
171
MLX_FILL_H ( &hcr, 0, in_param_h, virt_to_bus ( in_buffer ) );
172
MLX_FILL_1 ( &hcr, 1, in_param_l, virt_to_bus ( in_buffer ) );
174
memcpy ( in_buffer, in, in_len );
175
MLX_FILL_1 ( &hcr, 2, input_modifier, in_mod );
176
out_buffer = &hcr.u.dwords[3];
177
if ( out_len && ( command & ARBEL_HCR_OUT_MBOX ) ) {
178
out_buffer = arbel->mailbox_out;
179
MLX_FILL_H ( &hcr, 3, out_param_h,
180
virt_to_bus ( out_buffer ) );
181
MLX_FILL_1 ( &hcr, 4, out_param_l,
182
virt_to_bus ( out_buffer ) );
184
MLX_FILL_3 ( &hcr, 6,
186
opcode_modifier, op_mod,
188
DBGC ( arbel, "Arbel %p issuing command %04x\n", arbel, opcode );
189
DBGC2_HDA ( arbel, virt_to_phys ( arbel->config + ARBEL_HCR_BASE ),
190
&hcr, sizeof ( hcr ) );
191
if ( in_len && ( command & ARBEL_HCR_IN_MBOX ) ) {
192
DBGC2 ( arbel, "Input mailbox:\n" );
193
DBGC2_HDA ( arbel, virt_to_phys ( in_buffer ), in_buffer,
194
( ( in_len < 512 ) ? in_len : 512 ) );
198
for ( i = 0 ; i < ( sizeof ( hcr ) / sizeof ( hcr.u.dwords[0] ) ) ;
200
writel ( hcr.u.dwords[i],
201
arbel->config + ARBEL_HCR_REG ( i ) );
205
/* Wait for command completion */
206
if ( ( rc = arbel_cmd_wait ( arbel, &hcr ) ) != 0 ) {
207
DBGC ( arbel, "Arbel %p timed out waiting for command:\n",
209
DBGC_HD ( arbel, &hcr, sizeof ( hcr ) );
213
/* Check command status */
214
status = MLX_GET ( &hcr, status );
216
DBGC ( arbel, "Arbel %p command failed with status %02x:\n",
218
DBGC_HD ( arbel, &hcr, sizeof ( hcr ) );
222
/* Read output parameters, if any */
223
hcr.u.dwords[3] = readl ( arbel->config + ARBEL_HCR_REG ( 3 ) );
224
hcr.u.dwords[4] = readl ( arbel->config + ARBEL_HCR_REG ( 4 ) );
225
memcpy ( out, out_buffer, out_len );
227
DBGC2 ( arbel, "Output%s:\n",
228
( command & ARBEL_HCR_OUT_MBOX ) ? " mailbox" : "" );
229
DBGC2_HDA ( arbel, virt_to_phys ( out_buffer ), out_buffer,
230
( ( out_len < 512 ) ? out_len : 512 ) );
237
arbel_cmd_query_dev_lim ( struct arbel *arbel,
238
struct arbelprm_query_dev_lim *dev_lim ) {
239
return arbel_cmd ( arbel,
240
ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_DEV_LIM,
241
1, sizeof ( *dev_lim ) ),
242
0, NULL, 0, dev_lim );
246
arbel_cmd_query_fw ( struct arbel *arbel, struct arbelprm_query_fw *fw ) {
247
return arbel_cmd ( arbel,
248
ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_FW,
254
arbel_cmd_init_hca ( struct arbel *arbel,
255
const struct arbelprm_init_hca *init_hca ) {
256
return arbel_cmd ( arbel,
257
ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT_HCA,
258
1, sizeof ( *init_hca ) ),
259
0, init_hca, 0, NULL );
263
arbel_cmd_close_hca ( struct arbel *arbel ) {
264
return arbel_cmd ( arbel,
265
ARBEL_HCR_VOID_CMD ( ARBEL_HCR_CLOSE_HCA ),
270
arbel_cmd_init_ib ( struct arbel *arbel, unsigned int port,
271
const struct arbelprm_init_ib *init_ib ) {
272
return arbel_cmd ( arbel,
273
ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT_IB,
274
1, sizeof ( *init_ib ) ),
275
0, init_ib, port, NULL );
279
arbel_cmd_close_ib ( struct arbel *arbel, unsigned int port ) {
280
return arbel_cmd ( arbel,
281
ARBEL_HCR_VOID_CMD ( ARBEL_HCR_CLOSE_IB ),
282
0, NULL, port, NULL );
286
arbel_cmd_sw2hw_mpt ( struct arbel *arbel, unsigned int index,
287
const struct arbelprm_mpt *mpt ) {
288
return arbel_cmd ( arbel,
289
ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_MPT,
290
1, sizeof ( *mpt ) ),
291
0, mpt, index, NULL );
295
arbel_cmd_map_eq ( struct arbel *arbel, unsigned long index_map,
296
const struct arbelprm_event_mask *mask ) {
297
return arbel_cmd ( arbel,
298
ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_EQ,
299
0, sizeof ( *mask ) ),
300
0, mask, index_map, NULL );
304
arbel_cmd_sw2hw_eq ( struct arbel *arbel, unsigned int index,
305
const struct arbelprm_eqc *eqctx ) {
306
return arbel_cmd ( arbel,
307
ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_EQ,
308
1, sizeof ( *eqctx ) ),
309
0, eqctx, index, NULL );
313
arbel_cmd_hw2sw_eq ( struct arbel *arbel, unsigned int index,
314
struct arbelprm_eqc *eqctx ) {
315
return arbel_cmd ( arbel,
316
ARBEL_HCR_OUT_CMD ( ARBEL_HCR_HW2SW_EQ,
317
1, sizeof ( *eqctx ) ),
318
1, NULL, index, eqctx );
322
arbel_cmd_sw2hw_cq ( struct arbel *arbel, unsigned long cqn,
323
const struct arbelprm_completion_queue_context *cqctx ) {
324
return arbel_cmd ( arbel,
325
ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_CQ,
326
1, sizeof ( *cqctx ) ),
327
0, cqctx, cqn, NULL );
331
arbel_cmd_hw2sw_cq ( struct arbel *arbel, unsigned long cqn,
332
struct arbelprm_completion_queue_context *cqctx) {
333
return arbel_cmd ( arbel,
334
ARBEL_HCR_OUT_CMD ( ARBEL_HCR_HW2SW_CQ,
335
1, sizeof ( *cqctx ) ),
336
0, NULL, cqn, cqctx );
340
arbel_cmd_query_cq ( struct arbel *arbel, unsigned long cqn,
341
struct arbelprm_completion_queue_context *cqctx ) {
342
return arbel_cmd ( arbel,
343
ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_CQ,
344
1, sizeof ( *cqctx ) ),
345
0, NULL, cqn, cqctx );
349
arbel_cmd_rst2init_qpee ( struct arbel *arbel, unsigned long qpn,
350
const struct arbelprm_qp_ee_state_transitions *ctx ){
351
return arbel_cmd ( arbel,
352
ARBEL_HCR_IN_CMD ( ARBEL_HCR_RST2INIT_QPEE,
353
1, sizeof ( *ctx ) ),
358
arbel_cmd_init2rtr_qpee ( struct arbel *arbel, unsigned long qpn,
359
const struct arbelprm_qp_ee_state_transitions *ctx ){
360
return arbel_cmd ( arbel,
361
ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT2RTR_QPEE,
362
1, sizeof ( *ctx ) ),
367
arbel_cmd_rtr2rts_qpee ( struct arbel *arbel, unsigned long qpn,
368
const struct arbelprm_qp_ee_state_transitions *ctx ) {
369
return arbel_cmd ( arbel,
370
ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTR2RTS_QPEE,
371
1, sizeof ( *ctx ) ),
376
arbel_cmd_rts2rts_qpee ( struct arbel *arbel, unsigned long qpn,
377
const struct arbelprm_qp_ee_state_transitions *ctx ) {
378
return arbel_cmd ( arbel,
379
ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTS2RTS_QPEE,
380
1, sizeof ( *ctx ) ),
385
arbel_cmd_2rst_qpee ( struct arbel *arbel, unsigned long qpn ) {
386
return arbel_cmd ( arbel,
387
ARBEL_HCR_VOID_CMD ( ARBEL_HCR_2RST_QPEE ),
388
0x03, NULL, qpn, NULL );
392
arbel_cmd_query_qpee ( struct arbel *arbel, unsigned long qpn,
393
struct arbelprm_qp_ee_state_transitions *ctx ) {
394
return arbel_cmd ( arbel,
395
ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_QPEE,
396
1, sizeof ( *ctx ) ),
401
arbel_cmd_conf_special_qp ( struct arbel *arbel, unsigned int qp_type,
402
unsigned long base_qpn ) {
403
return arbel_cmd ( arbel,
404
ARBEL_HCR_VOID_CMD ( ARBEL_HCR_CONF_SPECIAL_QP ),
405
qp_type, NULL, base_qpn, NULL );
409
arbel_cmd_mad_ifc ( struct arbel *arbel, unsigned int port,
410
union arbelprm_mad *mad ) {
411
return arbel_cmd ( arbel,
412
ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_MAD_IFC,
414
1, sizeof ( *mad ) ),
415
0x03, mad, port, mad );
419
arbel_cmd_read_mgm ( struct arbel *arbel, unsigned int index,
420
struct arbelprm_mgm_entry *mgm ) {
421
return arbel_cmd ( arbel,
422
ARBEL_HCR_OUT_CMD ( ARBEL_HCR_READ_MGM,
423
1, sizeof ( *mgm ) ),
424
0, NULL, index, mgm );
428
arbel_cmd_write_mgm ( struct arbel *arbel, unsigned int index,
429
const struct arbelprm_mgm_entry *mgm ) {
430
return arbel_cmd ( arbel,
431
ARBEL_HCR_IN_CMD ( ARBEL_HCR_WRITE_MGM,
432
1, sizeof ( *mgm ) ),
433
0, mgm, index, NULL );
437
arbel_cmd_mgid_hash ( struct arbel *arbel, const union ib_gid *gid,
438
struct arbelprm_mgm_hash *hash ) {
439
return arbel_cmd ( arbel,
440
ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_MGID_HASH,
442
0, sizeof ( *hash ) ),
447
arbel_cmd_run_fw ( struct arbel *arbel ) {
448
return arbel_cmd ( arbel,
449
ARBEL_HCR_VOID_CMD ( ARBEL_HCR_RUN_FW ),
454
arbel_cmd_disable_lam ( struct arbel *arbel ) {
455
return arbel_cmd ( arbel,
456
ARBEL_HCR_VOID_CMD ( ARBEL_HCR_DISABLE_LAM ),
461
arbel_cmd_enable_lam ( struct arbel *arbel, struct arbelprm_access_lam *lam ) {
462
return arbel_cmd ( arbel,
463
ARBEL_HCR_OUT_CMD ( ARBEL_HCR_ENABLE_LAM,
464
1, sizeof ( *lam ) ),
469
arbel_cmd_unmap_icm ( struct arbel *arbel, unsigned int page_count,
470
const struct arbelprm_scalar_parameter *offset ) {
471
return arbel_cmd ( arbel,
472
ARBEL_HCR_IN_CMD ( ARBEL_HCR_UNMAP_ICM, 0,
473
sizeof ( *offset ) ),
474
0, offset, page_count, NULL );
478
arbel_cmd_map_icm ( struct arbel *arbel,
479
const struct arbelprm_virtual_physical_mapping *map ) {
480
return arbel_cmd ( arbel,
481
ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_ICM,
482
1, sizeof ( *map ) ),
487
arbel_cmd_unmap_icm_aux ( struct arbel *arbel ) {
488
return arbel_cmd ( arbel,
489
ARBEL_HCR_VOID_CMD ( ARBEL_HCR_UNMAP_ICM_AUX ),
494
arbel_cmd_map_icm_aux ( struct arbel *arbel,
495
const struct arbelprm_virtual_physical_mapping *map ) {
496
return arbel_cmd ( arbel,
497
ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_ICM_AUX,
498
1, sizeof ( *map ) ),
503
arbel_cmd_set_icm_size ( struct arbel *arbel,
504
const struct arbelprm_scalar_parameter *icm_size,
505
struct arbelprm_scalar_parameter *icm_aux_size ) {
506
return arbel_cmd ( arbel,
507
ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_SET_ICM_SIZE,
508
0, sizeof ( *icm_size ),
509
0, sizeof ( *icm_aux_size ) ),
510
0, icm_size, 0, icm_aux_size );
514
arbel_cmd_unmap_fa ( struct arbel *arbel ) {
515
return arbel_cmd ( arbel,
516
ARBEL_HCR_VOID_CMD ( ARBEL_HCR_UNMAP_FA ),
521
arbel_cmd_map_fa ( struct arbel *arbel,
522
const struct arbelprm_virtual_physical_mapping *map ) {
523
return arbel_cmd ( arbel,
524
ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_FA,
525
1, sizeof ( *map ) ),
529
/***************************************************************************
533
***************************************************************************
537
* Issue management datagram
539
* @v ibdev Infiniband device
540
* @v mad Management datagram
541
* @ret rc Return status code
543
static int arbel_mad ( struct ib_device *ibdev, union ib_mad *mad ) {
544
struct arbel *arbel = ib_get_drvdata ( ibdev );
545
union arbelprm_mad mad_ifc;
548
linker_assert ( sizeof ( *mad ) == sizeof ( mad_ifc.mad ),
551
/* Copy in request packet */
552
memcpy ( &mad_ifc.mad, mad, sizeof ( mad_ifc.mad ) );
555
if ( ( rc = arbel_cmd_mad_ifc ( arbel, ibdev->port,
556
&mad_ifc ) ) != 0 ) {
557
DBGC ( arbel, "Arbel %p port %d could not issue MAD IFC: %s\n",
558
arbel, ibdev->port, strerror ( rc ) );
562
/* Copy out reply packet */
563
memcpy ( mad, &mad_ifc.mad, sizeof ( *mad ) );
565
if ( mad->hdr.status != 0 ) {
566
DBGC ( arbel, "Arbel %p port %d MAD IFC status %04x\n",
567
arbel, ibdev->port, ntohs ( mad->hdr.status ) );
573
/***************************************************************************
575
* Completion queue operations
577
***************************************************************************
581
* Dump completion queue context (for debugging only)
583
* @v arbel Arbel device
584
* @v cq Completion queue
585
* @ret rc Return status code
587
static __attribute__ (( unused )) int
588
arbel_dump_cqctx ( struct arbel *arbel, struct ib_completion_queue *cq ) {
589
struct arbelprm_completion_queue_context cqctx;
592
memset ( &cqctx, 0, sizeof ( cqctx ) );
593
if ( ( rc = arbel_cmd_query_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
594
DBGC ( arbel, "Arbel %p CQN %#lx QUERY_CQ failed: %s\n",
595
arbel, cq->cqn, strerror ( rc ) );
598
DBGC ( arbel, "Arbel %p CQN %#lx context:\n", arbel, cq->cqn );
599
DBGC_HDA ( arbel, 0, &cqctx, sizeof ( cqctx ) );
605
* Create completion queue
607
* @v ibdev Infiniband device
608
* @v cq Completion queue
609
* @ret rc Return status code
611
static int arbel_create_cq ( struct ib_device *ibdev,
612
struct ib_completion_queue *cq ) {
613
struct arbel *arbel = ib_get_drvdata ( ibdev );
614
struct arbel_completion_queue *arbel_cq;
615
struct arbelprm_completion_queue_context cqctx;
616
struct arbelprm_cq_ci_db_record *ci_db_rec;
617
struct arbelprm_cq_arm_db_record *arm_db_rec;
622
/* Find a free completion queue number */
623
cqn_offset = arbel_bitmask_alloc ( arbel->cq_inuse, ARBEL_MAX_CQS );
624
if ( cqn_offset < 0 ) {
625
DBGC ( arbel, "Arbel %p out of completion queues\n", arbel );
629
cq->cqn = ( arbel->limits.reserved_cqs + cqn_offset );
631
/* Allocate control structures */
632
arbel_cq = zalloc ( sizeof ( *arbel_cq ) );
637
arbel_cq->ci_doorbell_idx = arbel_cq_ci_doorbell_idx ( arbel, cq );
638
arbel_cq->arm_doorbell_idx = arbel_cq_arm_doorbell_idx ( arbel, cq );
640
/* Allocate completion queue itself */
641
arbel_cq->cqe_size = ( cq->num_cqes * sizeof ( arbel_cq->cqe[0] ) );
642
arbel_cq->cqe = malloc_dma ( arbel_cq->cqe_size,
643
sizeof ( arbel_cq->cqe[0] ) );
644
if ( ! arbel_cq->cqe ) {
648
memset ( arbel_cq->cqe, 0, arbel_cq->cqe_size );
649
for ( i = 0 ; i < cq->num_cqes ; i++ ) {
650
MLX_FILL_1 ( &arbel_cq->cqe[i].normal, 7, owner, 1 );
654
/* Initialise doorbell records */
655
ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
656
MLX_FILL_1 ( ci_db_rec, 0, counter, 0 );
657
MLX_FILL_2 ( ci_db_rec, 1,
658
res, ARBEL_UAR_RES_CQ_CI,
659
cq_number, cq->cqn );
660
arm_db_rec = &arbel->db_rec[arbel_cq->arm_doorbell_idx].cq_arm;
661
MLX_FILL_1 ( arm_db_rec, 0, counter, 0 );
662
MLX_FILL_2 ( arm_db_rec, 1,
663
res, ARBEL_UAR_RES_CQ_ARM,
664
cq_number, cq->cqn );
666
/* Hand queue over to hardware */
667
memset ( &cqctx, 0, sizeof ( cqctx ) );
668
MLX_FILL_1 ( &cqctx, 0, st, 0xa /* "Event fired" */ );
669
MLX_FILL_H ( &cqctx, 1, start_address_h,
670
virt_to_bus ( arbel_cq->cqe ) );
671
MLX_FILL_1 ( &cqctx, 2, start_address_l,
672
virt_to_bus ( arbel_cq->cqe ) );
673
MLX_FILL_2 ( &cqctx, 3,
674
usr_page, arbel->limits.reserved_uars,
675
log_cq_size, fls ( cq->num_cqes - 1 ) );
676
MLX_FILL_1 ( &cqctx, 5, c_eqn, arbel->eq.eqn );
677
MLX_FILL_1 ( &cqctx, 6, pd, ARBEL_GLOBAL_PD );
678
MLX_FILL_1 ( &cqctx, 7, l_key, arbel->lkey );
679
MLX_FILL_1 ( &cqctx, 12, cqn, cq->cqn );
680
MLX_FILL_1 ( &cqctx, 13,
681
cq_ci_db_record, arbel_cq->ci_doorbell_idx );
682
MLX_FILL_1 ( &cqctx, 14,
683
cq_state_db_record, arbel_cq->arm_doorbell_idx );
684
if ( ( rc = arbel_cmd_sw2hw_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
685
DBGC ( arbel, "Arbel %p CQN %#lx SW2HW_CQ failed: %s\n",
686
arbel, cq->cqn, strerror ( rc ) );
690
DBGC ( arbel, "Arbel %p CQN %#lx ring [%08lx,%08lx), doorbell %08lx\n",
691
arbel, cq->cqn, virt_to_phys ( arbel_cq->cqe ),
692
( virt_to_phys ( arbel_cq->cqe ) + arbel_cq->cqe_size ),
693
virt_to_phys ( ci_db_rec ) );
694
ib_cq_set_drvdata ( cq, arbel_cq );
698
MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
699
MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
700
free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
704
arbel_bitmask_free ( arbel->cq_inuse, cqn_offset );
710
* Destroy completion queue
712
* @v ibdev Infiniband device
713
* @v cq Completion queue
715
static void arbel_destroy_cq ( struct ib_device *ibdev,
716
struct ib_completion_queue *cq ) {
717
struct arbel *arbel = ib_get_drvdata ( ibdev );
718
struct arbel_completion_queue *arbel_cq = ib_cq_get_drvdata ( cq );
719
struct arbelprm_completion_queue_context cqctx;
720
struct arbelprm_cq_ci_db_record *ci_db_rec;
721
struct arbelprm_cq_arm_db_record *arm_db_rec;
725
/* Take ownership back from hardware */
726
if ( ( rc = arbel_cmd_hw2sw_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
727
DBGC ( arbel, "Arbel %p CQN %#lx FATAL HW2SW_CQ failed: "
728
"%s\n", arbel, cq->cqn, strerror ( rc ) );
729
/* Leak memory and return; at least we avoid corruption */
733
/* Clear doorbell records */
734
ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
735
arm_db_rec = &arbel->db_rec[arbel_cq->arm_doorbell_idx].cq_arm;
736
MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
737
MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
740
free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
743
/* Mark queue number as free */
744
cqn_offset = ( cq->cqn - arbel->limits.reserved_cqs );
745
arbel_bitmask_free ( arbel->cq_inuse, cqn_offset );
747
ib_cq_set_drvdata ( cq, NULL );
750
/***************************************************************************
752
* Queue pair operations
754
***************************************************************************
758
* Assign queue pair number
760
* @v ibdev Infiniband device
762
* @ret rc Return status code
764
static int arbel_alloc_qpn ( struct ib_device *ibdev,
765
struct ib_queue_pair *qp ) {
766
struct arbel *arbel = ib_get_drvdata ( ibdev );
767
unsigned int port_offset;
770
/* Calculate queue pair number */
771
port_offset = ( ibdev->port - ARBEL_PORT_BASE );
773
switch ( qp->type ) {
775
qp->qpn = ( arbel->special_qpn_base + port_offset );
778
qp->qpn = ( arbel->special_qpn_base + 2 + port_offset );
782
/* Find a free queue pair number */
783
qpn_offset = arbel_bitmask_alloc ( arbel->qp_inuse,
785
if ( qpn_offset < 0 ) {
786
DBGC ( arbel, "Arbel %p out of queue pairs\n",
790
qp->qpn = ( ( random() & ARBEL_QPN_RANDOM_MASK ) |
791
( arbel->qpn_base + qpn_offset ) );
794
DBGC ( arbel, "Arbel %p unsupported QP type %d\n",
801
* Free queue pair number
803
* @v ibdev Infiniband device
806
static void arbel_free_qpn ( struct ib_device *ibdev,
807
struct ib_queue_pair *qp ) {
808
struct arbel *arbel = ib_get_drvdata ( ibdev );
811
qpn_offset = ( ( qp->qpn & ~ARBEL_QPN_RANDOM_MASK ) - arbel->qpn_base );
812
if ( qpn_offset >= 0 )
813
arbel_bitmask_free ( arbel->qp_inuse, qpn_offset );
817
* Calculate transmission rate
819
* @v av Address vector
820
* @ret arbel_rate Arbel rate
822
static unsigned int arbel_rate ( struct ib_address_vector *av ) {
823
return ( ( ( av->rate >= IB_RATE_2_5 ) && ( av->rate <= IB_RATE_120 ) )
824
? ( av->rate + 5 ) : 0 );
827
/** Queue pair transport service type map */
828
static uint8_t arbel_qp_st[] = {
829
[IB_QPT_SMI] = ARBEL_ST_MLX,
830
[IB_QPT_GSI] = ARBEL_ST_MLX,
831
[IB_QPT_UD] = ARBEL_ST_UD,
832
[IB_QPT_RC] = ARBEL_ST_RC,
836
* Dump queue pair context (for debugging only)
838
* @v arbel Arbel device
840
* @ret rc Return status code
842
static __attribute__ (( unused )) int
843
arbel_dump_qpctx ( struct arbel *arbel, struct ib_queue_pair *qp ) {
844
struct arbelprm_qp_ee_state_transitions qpctx;
847
memset ( &qpctx, 0, sizeof ( qpctx ) );
848
if ( ( rc = arbel_cmd_query_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ) {
849
DBGC ( arbel, "Arbel %p QPN %#lx QUERY_QPEE failed: %s\n",
850
arbel, qp->qpn, strerror ( rc ) );
853
DBGC ( arbel, "Arbel %p QPN %#lx context:\n", arbel, qp->qpn );
854
DBGC_HDA ( arbel, 0, &qpctx.u.dwords[2], ( sizeof ( qpctx ) - 8 ) );
860
* Create send work queue
862
* @v arbel_send_wq Send work queue
863
* @v num_wqes Number of work queue entries
864
* @ret rc Return status code
866
static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
867
unsigned int num_wqes ) {
868
union arbel_send_wqe *wqe;
869
union arbel_send_wqe *next_wqe;
870
unsigned int wqe_idx_mask;
873
/* Allocate work queue */
874
arbel_send_wq->wqe_size = ( num_wqes *
875
sizeof ( arbel_send_wq->wqe[0] ) );
876
arbel_send_wq->wqe = malloc_dma ( arbel_send_wq->wqe_size,
877
sizeof ( arbel_send_wq->wqe[0] ) );
878
if ( ! arbel_send_wq->wqe )
880
memset ( arbel_send_wq->wqe, 0, arbel_send_wq->wqe_size );
882
/* Link work queue entries */
883
wqe_idx_mask = ( num_wqes - 1 );
884
for ( i = 0 ; i < num_wqes ; i++ ) {
885
wqe = &arbel_send_wq->wqe[i];
886
next_wqe = &arbel_send_wq->wqe[ ( i + 1 ) & wqe_idx_mask ];
887
MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
888
( virt_to_bus ( next_wqe ) >> 6 ) );
889
MLX_FILL_1 ( &wqe->next, 1, always1, 1 );
896
* Create receive work queue
898
* @v arbel_recv_wq Receive work queue
899
* @v num_wqes Number of work queue entries
900
* @v type Queue pair type
901
* @ret rc Return status code
903
static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
904
unsigned int num_wqes,
905
enum ib_queue_pair_type type ) {
906
struct arbelprm_recv_wqe *wqe;
907
struct arbelprm_recv_wqe *next_wqe;
908
unsigned int wqe_idx_mask;
914
/* Allocate work queue */
915
arbel_recv_wq->wqe_size = ( num_wqes *
916
sizeof ( arbel_recv_wq->wqe[0] ) );
917
arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
918
sizeof ( arbel_recv_wq->wqe[0] ) );
919
if ( ! arbel_recv_wq->wqe ) {
923
memset ( arbel_recv_wq->wqe, 0, arbel_recv_wq->wqe_size );
925
/* Allocate GRH entries, if needed */
926
if ( ( type == IB_QPT_SMI ) || ( type == IB_QPT_GSI ) ||
927
( type == IB_QPT_UD ) ) {
928
arbel_recv_wq->grh_size = ( num_wqes *
929
sizeof ( arbel_recv_wq->grh[0] ) );
930
arbel_recv_wq->grh = malloc_dma ( arbel_recv_wq->grh_size,
932
if ( ! arbel_recv_wq->grh ) {
938
/* Link work queue entries */
939
wqe_idx_mask = ( num_wqes - 1 );
940
nds = ( ( offsetof ( typeof ( *wqe ), data ) +
941
sizeof ( wqe->data[0] ) ) >> 4 );
942
for ( i = 0 ; i < num_wqes ; i++ ) {
943
wqe = &arbel_recv_wq->wqe[i].recv;
944
next_wqe = &arbel_recv_wq->wqe[( i + 1 ) & wqe_idx_mask].recv;
945
MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
946
( virt_to_bus ( next_wqe ) >> 6 ) );
947
MLX_FILL_1 ( &wqe->next, 1, nds, nds );
948
for ( j = 0 ; ( ( ( void * ) &wqe->data[j] ) <
949
( ( void * ) ( wqe + 1 ) ) ) ; j++ ) {
950
MLX_FILL_1 ( &wqe->data[j], 1,
951
l_key, ARBEL_INVALID_LKEY );
957
free_dma ( arbel_recv_wq->grh, arbel_recv_wq->grh_size );
959
free_dma ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size );
967
* @v ibdev Infiniband device
969
* @ret rc Return status code
971
static int arbel_create_qp ( struct ib_device *ibdev,
972
struct ib_queue_pair *qp ) {
973
struct arbel *arbel = ib_get_drvdata ( ibdev );
974
struct arbel_queue_pair *arbel_qp;
975
struct arbelprm_qp_ee_state_transitions qpctx;
976
struct arbelprm_qp_db_record *send_db_rec;
977
struct arbelprm_qp_db_record *recv_db_rec;
978
physaddr_t send_wqe_base_adr;
979
physaddr_t recv_wqe_base_adr;
980
physaddr_t wqe_base_adr;
983
/* Warn about dysfunctional code
985
* Arbel seems to crash the system as soon as the first send
986
* WQE completes on an RC queue pair. (NOPs complete
987
* successfully, so this is a problem specific to the work
988
* queue rather than the completion queue.) The cause of this
989
* problem has remained unknown for over a year. Patches to
990
* fix this are welcome.
992
if ( qp->type == IB_QPT_RC )
993
DBG ( "*** WARNING: Arbel RC support is non-functional ***\n" );
995
/* Calculate queue pair number */
996
if ( ( rc = arbel_alloc_qpn ( ibdev, qp ) ) != 0 )
999
/* Allocate control structures */
1000
arbel_qp = zalloc ( sizeof ( *arbel_qp ) );
1005
arbel_qp->send.doorbell_idx = arbel_send_doorbell_idx ( arbel, qp );
1006
arbel_qp->recv.doorbell_idx = arbel_recv_doorbell_idx ( arbel, qp );
1008
/* Create send and receive work queues */
1009
if ( ( rc = arbel_create_send_wq ( &arbel_qp->send,
1010
qp->send.num_wqes ) ) != 0 )
1011
goto err_create_send_wq;
1012
if ( ( rc = arbel_create_recv_wq ( &arbel_qp->recv, qp->recv.num_wqes,
1014
goto err_create_recv_wq;
1016
/* Send and receive work queue entries must be within the same 4GB */
1017
send_wqe_base_adr = virt_to_bus ( arbel_qp->send.wqe );
1018
recv_wqe_base_adr = virt_to_bus ( arbel_qp->recv.wqe );
1019
if ( ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) &&
1020
( ( ( ( uint64_t ) send_wqe_base_adr ) >> 32 ) !=
1021
( ( ( uint64_t ) recv_wqe_base_adr ) >> 32 ) ) ) {
1022
DBGC ( arbel, "Arbel %p QPN %#lx cannot support send %08lx "
1023
"recv %08lx\n", arbel, qp->qpn,
1024
send_wqe_base_adr, recv_wqe_base_adr );
1026
goto err_unsupported_address_split;
1028
wqe_base_adr = send_wqe_base_adr;
1030
/* Initialise doorbell records */
1031
send_db_rec = &arbel->db_rec[arbel_qp->send.doorbell_idx].qp;
1032
MLX_FILL_1 ( send_db_rec, 0, counter, 0 );
1033
MLX_FILL_2 ( send_db_rec, 1,
1034
res, ARBEL_UAR_RES_SQ,
1035
qp_number, qp->qpn );
1036
recv_db_rec = &arbel->db_rec[arbel_qp->recv.doorbell_idx].qp;
1037
MLX_FILL_1 ( recv_db_rec, 0, counter, 0 );
1038
MLX_FILL_2 ( recv_db_rec, 1,
1039
res, ARBEL_UAR_RES_RQ,
1040
qp_number, qp->qpn );
1042
/* Transition queue to INIT state */
1043
memset ( &qpctx, 0, sizeof ( qpctx ) );
1044
MLX_FILL_3 ( &qpctx, 2,
1046
qpc_eec_data.pm_state, ARBEL_PM_STATE_MIGRATED,
1047
qpc_eec_data.st, arbel_qp_st[qp->type] );
1048
MLX_FILL_4 ( &qpctx, 4,
1049
qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
1050
qpc_eec_data.log_rq_stride,
1051
( fls ( sizeof ( arbel_qp->recv.wqe[0] ) - 1 ) - 4 ),
1052
qpc_eec_data.log_sq_size, fls ( qp->send.num_wqes - 1 ),
1053
qpc_eec_data.log_sq_stride,
1054
( fls ( sizeof ( arbel_qp->send.wqe[0] ) - 1 ) - 4 ) );
1055
MLX_FILL_1 ( &qpctx, 5,
1056
qpc_eec_data.usr_page, arbel->limits.reserved_uars );
1057
MLX_FILL_1 ( &qpctx, 10, qpc_eec_data.primary_address_path.port_number,
1059
MLX_FILL_1 ( &qpctx, 27, qpc_eec_data.pd, ARBEL_GLOBAL_PD );
1060
MLX_FILL_H ( &qpctx, 28, qpc_eec_data.wqe_base_adr_h, wqe_base_adr );
1061
MLX_FILL_1 ( &qpctx, 29, qpc_eec_data.wqe_lkey, arbel->lkey );
1062
MLX_FILL_1 ( &qpctx, 30, qpc_eec_data.ssc, 1 );
1063
MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
1064
MLX_FILL_1 ( &qpctx, 34, qpc_eec_data.snd_wqe_base_adr_l,
1065
( send_wqe_base_adr >> 6 ) );
1066
MLX_FILL_1 ( &qpctx, 35, qpc_eec_data.snd_db_record_index,
1067
arbel_qp->send.doorbell_idx );
1068
MLX_FILL_4 ( &qpctx, 38,
1069
qpc_eec_data.rre, 1,
1070
qpc_eec_data.rwe, 1,
1071
qpc_eec_data.rae, 1,
1072
qpc_eec_data.rsc, 1 );
1073
MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
1074
MLX_FILL_1 ( &qpctx, 42, qpc_eec_data.rcv_wqe_base_adr_l,
1075
( recv_wqe_base_adr >> 6 ) );
1076
MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.rcv_db_record_index,
1077
arbel_qp->recv.doorbell_idx );
1078
if ( ( rc = arbel_cmd_rst2init_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
1079
DBGC ( arbel, "Arbel %p QPN %#lx RST2INIT_QPEE failed: %s\n",
1080
arbel, qp->qpn, strerror ( rc ) );
1081
goto err_rst2init_qpee;
1083
arbel_qp->state = ARBEL_QP_ST_INIT;
1085
DBGC ( arbel, "Arbel %p QPN %#lx send ring [%08lx,%08lx), doorbell "
1086
"%08lx\n", arbel, qp->qpn, virt_to_phys ( arbel_qp->send.wqe ),
1087
( virt_to_phys ( arbel_qp->send.wqe ) +
1088
arbel_qp->send.wqe_size ),
1089
virt_to_phys ( send_db_rec ) );
1090
DBGC ( arbel, "Arbel %p QPN %#lx receive ring [%08lx,%08lx), doorbell "
1091
"%08lx\n", arbel, qp->qpn, virt_to_phys ( arbel_qp->recv.wqe ),
1092
( virt_to_phys ( arbel_qp->recv.wqe ) +
1093
arbel_qp->recv.wqe_size ),
1094
virt_to_phys ( recv_db_rec ) );
1095
DBGC ( arbel, "Arbel %p QPN %#lx send CQN %#lx receive CQN %#lx\n",
1096
arbel, qp->qpn, qp->send.cq->cqn, qp->recv.cq->cqn );
1097
ib_qp_set_drvdata ( qp, arbel_qp );
1100
arbel_cmd_2rst_qpee ( arbel, qp->qpn );
1102
MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
1103
MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
1104
err_unsupported_address_split:
1105
free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
1106
free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
1108
free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
1112
arbel_free_qpn ( ibdev, qp );
1120
* @v ibdev Infiniband device
1122
* @ret rc Return status code
1124
static int arbel_modify_qp ( struct ib_device *ibdev,
1125
struct ib_queue_pair *qp ) {
1126
struct arbel *arbel = ib_get_drvdata ( ibdev );
1127
struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
1128
struct arbelprm_qp_ee_state_transitions qpctx;
1131
/* Transition queue to RTR state, if applicable */
1132
if ( arbel_qp->state < ARBEL_QP_ST_RTR ) {
1133
memset ( &qpctx, 0, sizeof ( qpctx ) );
1134
MLX_FILL_2 ( &qpctx, 4,
1135
qpc_eec_data.mtu, ARBEL_MTU_2048,
1136
qpc_eec_data.msg_max, 31 );
1137
MLX_FILL_1 ( &qpctx, 7,
1138
qpc_eec_data.remote_qpn_een, qp->av.qpn );
1139
MLX_FILL_2 ( &qpctx, 11,
1140
qpc_eec_data.primary_address_path.rnr_retry,
1142
qpc_eec_data.primary_address_path.rlid,
1144
MLX_FILL_2 ( &qpctx, 12,
1145
qpc_eec_data.primary_address_path.ack_timeout,
1146
14 /* 4.096us * 2^(14) = 67ms */,
1147
qpc_eec_data.primary_address_path.max_stat_rate,
1148
arbel_rate ( &qp->av ) );
1149
memcpy ( &qpctx.u.dwords[14], &qp->av.gid,
1150
sizeof ( qp->av.gid ) );
1151
MLX_FILL_1 ( &qpctx, 30,
1152
qpc_eec_data.retry_count, ARBEL_RETRY_MAX );
1153
MLX_FILL_1 ( &qpctx, 39,
1154
qpc_eec_data.next_rcv_psn, qp->recv.psn );
1155
MLX_FILL_1 ( &qpctx, 40,
1156
qpc_eec_data.ra_buff_indx,
1157
( arbel->limits.reserved_rdbs +
1158
( ( qp->qpn & ~ARBEL_QPN_RANDOM_MASK ) -
1159
arbel->special_qpn_base ) ) );
1160
if ( ( rc = arbel_cmd_init2rtr_qpee ( arbel, qp->qpn,
1162
DBGC ( arbel, "Arbel %p QPN %#lx INIT2RTR_QPEE failed:"
1163
" %s\n", arbel, qp->qpn, strerror ( rc ) );
1166
arbel_qp->state = ARBEL_QP_ST_RTR;
1169
/* Transition queue to RTS state, if applicable */
1170
if ( arbel_qp->state < ARBEL_QP_ST_RTS ) {
1171
memset ( &qpctx, 0, sizeof ( qpctx ) );
1172
MLX_FILL_1 ( &qpctx, 11,
1173
qpc_eec_data.primary_address_path.rnr_retry,
1175
MLX_FILL_1 ( &qpctx, 12,
1176
qpc_eec_data.primary_address_path.ack_timeout,
1177
14 /* 4.096us * 2^(14) = 67ms */ );
1178
MLX_FILL_2 ( &qpctx, 30,
1179
qpc_eec_data.retry_count, ARBEL_RETRY_MAX,
1180
qpc_eec_data.sic, 1 );
1181
MLX_FILL_1 ( &qpctx, 32,
1182
qpc_eec_data.next_send_psn, qp->send.psn );
1183
if ( ( rc = arbel_cmd_rtr2rts_qpee ( arbel, qp->qpn,
1185
DBGC ( arbel, "Arbel %p QPN %#lx RTR2RTS_QPEE failed: "
1186
"%s\n", arbel, qp->qpn, strerror ( rc ) );
1189
arbel_qp->state = ARBEL_QP_ST_RTS;
1192
/* Update parameters in RTS state */
1193
memset ( &qpctx, 0, sizeof ( qpctx ) );
1194
MLX_FILL_1 ( &qpctx, 0, opt_param_mask, ARBEL_QPEE_OPT_PARAM_QKEY );
1195
MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
1196
if ( ( rc = arbel_cmd_rts2rts_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ){
1197
DBGC ( arbel, "Arbel %p QPN %#lx RTS2RTS_QPEE failed: %s\n",
1198
arbel, qp->qpn, strerror ( rc ) );
1206
* Destroy queue pair
1208
* @v ibdev Infiniband device
1211
static void arbel_destroy_qp ( struct ib_device *ibdev,
1212
struct ib_queue_pair *qp ) {
1213
struct arbel *arbel = ib_get_drvdata ( ibdev );
1214
struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
1215
struct arbelprm_qp_db_record *send_db_rec;
1216
struct arbelprm_qp_db_record *recv_db_rec;
1219
/* Take ownership back from hardware */
1220
if ( ( rc = arbel_cmd_2rst_qpee ( arbel, qp->qpn ) ) != 0 ) {
1221
DBGC ( arbel, "Arbel %p QPN %#lx FATAL 2RST_QPEE failed: "
1222
"%s\n", arbel, qp->qpn, strerror ( rc ) );
1223
/* Leak memory and return; at least we avoid corruption */
1227
/* Clear doorbell records */
1228
send_db_rec = &arbel->db_rec[arbel_qp->send.doorbell_idx].qp;
1229
recv_db_rec = &arbel->db_rec[arbel_qp->recv.doorbell_idx].qp;
1230
MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
1231
MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
1234
free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
1235
free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
1236
free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
1239
/* Mark queue number as free */
1240
arbel_free_qpn ( ibdev, qp );
1242
ib_qp_set_drvdata ( qp, NULL );
1245
/***************************************************************************
1247
* Work request operations
1249
***************************************************************************
1253
* Ring doorbell register in UAR
1255
* @v arbel Arbel device
1256
* @v db_reg Doorbell register structure
1257
* @v offset Address of doorbell
1259
static void arbel_ring_doorbell ( struct arbel *arbel,
1260
union arbelprm_doorbell_register *db_reg,
1261
unsigned int offset ) {
1263
DBGC2 ( arbel, "Arbel %p ringing doorbell %08x:%08x at %lx\n",
1264
arbel, ntohl ( db_reg->dword[0] ), ntohl ( db_reg->dword[1] ),
1265
virt_to_phys ( arbel->uar + offset ) );
1268
writel ( db_reg->dword[0], ( arbel->uar + offset + 0 ) );
1270
writel ( db_reg->dword[1], ( arbel->uar + offset + 4 ) );
1273
/** GID used for GID-less send work queue entries */
1274
static const union ib_gid arbel_no_gid = {
1275
.bytes = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0 },
1279
* Construct UD send work queue entry
1281
* @v ibdev Infiniband device
1283
* @v dest Destination address vector
1284
* @v iobuf I/O buffer
1285
* @v wqe Send work queue entry
1286
* @ret nds Work queue entry size
1288
static size_t arbel_fill_ud_send_wqe ( struct ib_device *ibdev,
1289
struct ib_queue_pair *qp __unused,
1290
struct ib_address_vector *dest,
1291
struct io_buffer *iobuf,
1292
union arbel_send_wqe *wqe ) {
1293
struct arbel *arbel = ib_get_drvdata ( ibdev );
1294
const union ib_gid *gid;
1296
/* Construct this work queue entry */
1297
MLX_FILL_1 ( &wqe->ud.ctrl, 0, always1, 1 );
1298
MLX_FILL_2 ( &wqe->ud.ud, 0,
1299
ud_address_vector.pd, ARBEL_GLOBAL_PD,
1300
ud_address_vector.port_number, ibdev->port );
1301
MLX_FILL_2 ( &wqe->ud.ud, 1,
1302
ud_address_vector.rlid, dest->lid,
1303
ud_address_vector.g, dest->gid_present );
1304
MLX_FILL_2 ( &wqe->ud.ud, 2,
1305
ud_address_vector.max_stat_rate, arbel_rate ( dest ),
1306
ud_address_vector.msg, 3 );
1307
MLX_FILL_1 ( &wqe->ud.ud, 3, ud_address_vector.sl, dest->sl );
1308
gid = ( dest->gid_present ? &dest->gid : &arbel_no_gid );
1309
memcpy ( &wqe->ud.ud.u.dwords[4], gid, sizeof ( *gid ) );
1310
MLX_FILL_1 ( &wqe->ud.ud, 8, destination_qp, dest->qpn );
1311
MLX_FILL_1 ( &wqe->ud.ud, 9, q_key, dest->qkey );
1312
MLX_FILL_1 ( &wqe->ud.data[0], 0, byte_count, iob_len ( iobuf ) );
1313
MLX_FILL_1 ( &wqe->ud.data[0], 1, l_key, arbel->lkey );
1314
MLX_FILL_H ( &wqe->ud.data[0], 2,
1315
local_address_h, virt_to_bus ( iobuf->data ) );
1316
MLX_FILL_1 ( &wqe->ud.data[0], 3,
1317
local_address_l, virt_to_bus ( iobuf->data ) );
1319
return ( offsetof ( typeof ( wqe->ud ), data[1] ) >> 4 );
1323
* Construct MLX send work queue entry
1325
* @v ibdev Infiniband device
1327
* @v dest Destination address vector
1328
* @v iobuf I/O buffer
1329
* @v wqe Send work queue entry
1330
* @ret nds Work queue entry size
1332
static size_t arbel_fill_mlx_send_wqe ( struct ib_device *ibdev,
1333
struct ib_queue_pair *qp,
1334
struct ib_address_vector *dest,
1335
struct io_buffer *iobuf,
1336
union arbel_send_wqe *wqe ) {
1337
struct arbel *arbel = ib_get_drvdata ( ibdev );
1338
struct io_buffer headers;
1340
/* Construct IB headers */
1341
iob_populate ( &headers, &wqe->mlx.headers, 0,
1342
sizeof ( wqe->mlx.headers ) );
1343
iob_reserve ( &headers, sizeof ( wqe->mlx.headers ) );
1344
ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), dest );
1346
/* Construct this work queue entry */
1347
MLX_FILL_5 ( &wqe->mlx.ctrl, 0,
1348
c, 1 /* generate completion */,
1349
icrc, 0 /* generate ICRC */,
1350
max_statrate, arbel_rate ( dest ),
1352
v15, ( ( qp->ext_qpn == IB_QPN_SMI ) ? 1 : 0 ) );
1353
MLX_FILL_1 ( &wqe->mlx.ctrl, 1, rlid, dest->lid );
1354
MLX_FILL_1 ( &wqe->mlx.data[0], 0,
1355
byte_count, iob_len ( &headers ) );
1356
MLX_FILL_1 ( &wqe->mlx.data[0], 1, l_key, arbel->lkey );
1357
MLX_FILL_H ( &wqe->mlx.data[0], 2,
1358
local_address_h, virt_to_bus ( headers.data ) );
1359
MLX_FILL_1 ( &wqe->mlx.data[0], 3,
1360
local_address_l, virt_to_bus ( headers.data ) );
1361
MLX_FILL_1 ( &wqe->mlx.data[1], 0,
1362
byte_count, ( iob_len ( iobuf ) + 4 /* ICRC */ ) );
1363
MLX_FILL_1 ( &wqe->mlx.data[1], 1, l_key, arbel->lkey );
1364
MLX_FILL_H ( &wqe->mlx.data[1], 2,
1365
local_address_h, virt_to_bus ( iobuf->data ) );
1366
MLX_FILL_1 ( &wqe->mlx.data[1], 3,
1367
local_address_l, virt_to_bus ( iobuf->data ) );
1369
return ( offsetof ( typeof ( wqe->mlx ), data[2] ) >> 4 );
1373
* Construct RC send work queue entry
1375
* @v ibdev Infiniband device
1377
* @v dest Destination address vector
1378
* @v iobuf I/O buffer
1379
* @v wqe Send work queue entry
1380
* @ret nds Work queue entry size
1382
static size_t arbel_fill_rc_send_wqe ( struct ib_device *ibdev,
1383
struct ib_queue_pair *qp __unused,
1384
struct ib_address_vector *dest __unused,
1385
struct io_buffer *iobuf,
1386
union arbel_send_wqe *wqe ) {
1387
struct arbel *arbel = ib_get_drvdata ( ibdev );
1389
/* Construct this work queue entry */
1390
MLX_FILL_1 ( &wqe->rc.ctrl, 0, always1, 1 );
1391
MLX_FILL_1 ( &wqe->rc.data[0], 0, byte_count, iob_len ( iobuf ) );
1392
MLX_FILL_1 ( &wqe->rc.data[0], 1, l_key, arbel->lkey );
1393
MLX_FILL_H ( &wqe->rc.data[0], 2,
1394
local_address_h, virt_to_bus ( iobuf->data ) );
1395
MLX_FILL_1 ( &wqe->rc.data[0], 3,
1396
local_address_l, virt_to_bus ( iobuf->data ) );
1398
return ( offsetof ( typeof ( wqe->rc ), data[1] ) >> 4 );
1401
/** Work queue entry constructors */
1403
( * arbel_fill_send_wqe[] ) ( struct ib_device *ibdev,
1404
struct ib_queue_pair *qp,
1405
struct ib_address_vector *dest,
1406
struct io_buffer *iobuf,
1407
union arbel_send_wqe *wqe ) = {
1408
[IB_QPT_SMI] = arbel_fill_mlx_send_wqe,
1409
[IB_QPT_GSI] = arbel_fill_mlx_send_wqe,
1410
[IB_QPT_UD] = arbel_fill_ud_send_wqe,
1411
[IB_QPT_RC] = arbel_fill_rc_send_wqe,
1415
* Post send work queue entry
1417
* @v ibdev Infiniband device
1419
* @v dest Destination address vector
1420
* @v iobuf I/O buffer
1421
* @ret rc Return status code
1423
static int arbel_post_send ( struct ib_device *ibdev,
1424
struct ib_queue_pair *qp,
1425
struct ib_address_vector *dest,
1426
struct io_buffer *iobuf ) {
1427
struct arbel *arbel = ib_get_drvdata ( ibdev );
1428
struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
1429
struct ib_work_queue *wq = &qp->send;
1430
struct arbel_send_work_queue *arbel_send_wq = &arbel_qp->send;
1431
union arbel_send_wqe *prev_wqe;
1432
union arbel_send_wqe *wqe;
1433
struct arbelprm_qp_db_record *qp_db_rec;
1434
union arbelprm_doorbell_register db_reg;
1435
unsigned long wqe_idx_mask;
1438
/* Allocate work queue entry */
1439
wqe_idx_mask = ( wq->num_wqes - 1 );
1440
if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
1441
DBGC ( arbel, "Arbel %p QPN %#lx send queue full",
1445
wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
1446
prev_wqe = &arbel_send_wq->wqe[(wq->next_idx - 1) & wqe_idx_mask];
1447
wqe = &arbel_send_wq->wqe[wq->next_idx & wqe_idx_mask];
1449
/* Construct work queue entry */
1450
memset ( ( ( ( void * ) wqe ) + sizeof ( wqe->next ) ), 0,
1451
( sizeof ( *wqe ) - sizeof ( wqe->next ) ) );
1452
assert ( qp->type < ( sizeof ( arbel_fill_send_wqe ) /
1453
sizeof ( arbel_fill_send_wqe[0] ) ) );
1454
assert ( arbel_fill_send_wqe[qp->type] != NULL );
1455
nds = arbel_fill_send_wqe[qp->type] ( ibdev, qp, dest, iobuf, wqe );
1456
DBGCP ( arbel, "Arbel %p QPN %#lx posting send WQE %#lx:\n",
1457
arbel, qp->qpn, ( wq->next_idx & wqe_idx_mask ) );
1458
DBGCP_HDA ( arbel, virt_to_phys ( wqe ), wqe, sizeof ( *wqe ) );
1460
/* Update previous work queue entry's "next" field */
1461
MLX_SET ( &prev_wqe->next, nopcode, ARBEL_OPCODE_SEND );
1462
MLX_FILL_3 ( &prev_wqe->next, 1,
1467
/* Update doorbell record */
1469
qp_db_rec = &arbel->db_rec[arbel_send_wq->doorbell_idx].qp;
1470
MLX_FILL_1 ( qp_db_rec, 0,
1471
counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
1473
/* Ring doorbell register */
1474
MLX_FILL_4 ( &db_reg.send, 0,
1475
nopcode, ARBEL_OPCODE_SEND,
1477
wqe_counter, ( wq->next_idx & 0xffff ),
1479
MLX_FILL_2 ( &db_reg.send, 1,
1482
arbel_ring_doorbell ( arbel, &db_reg, ARBEL_DB_POST_SND_OFFSET );
1484
/* Update work queue's index */
1491
* Post receive work queue entry
1493
* @v ibdev Infiniband device
1495
* @v iobuf I/O buffer
1496
* @ret rc Return status code
1498
static int arbel_post_recv ( struct ib_device *ibdev,
1499
struct ib_queue_pair *qp,
1500
struct io_buffer *iobuf ) {
1501
struct arbel *arbel = ib_get_drvdata ( ibdev );
1502
struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
1503
struct ib_work_queue *wq = &qp->recv;
1504
struct arbel_recv_work_queue *arbel_recv_wq = &arbel_qp->recv;
1505
struct arbelprm_recv_wqe *wqe;
1506
struct arbelprm_wqe_segment_data_ptr *data;
1507
struct ib_global_route_header *grh;
1508
union arbelprm_doorbell_record *db_rec;
1509
unsigned int wqe_idx_mask;
1511
/* Allocate work queue entry */
1512
wqe_idx_mask = ( wq->num_wqes - 1 );
1513
if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
1514
DBGC ( arbel, "Arbel %p QPN %#lx receive queue full\n",
1518
wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
1519
wqe = &arbel_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
1521
/* Construct work queue entry */
1522
data = &wqe->data[0];
1523
if ( arbel_recv_wq->grh ) {
1524
grh = &arbel_recv_wq->grh[wq->next_idx & wqe_idx_mask];
1525
MLX_FILL_1 ( data, 0, byte_count, sizeof ( *grh ) );
1526
MLX_FILL_1 ( data, 1, l_key, arbel->lkey );
1527
MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( grh ) );
1528
MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( grh ) );
1531
MLX_FILL_1 ( data, 0, byte_count, iob_tailroom ( iobuf ) );
1532
MLX_FILL_1 ( data, 1, l_key, arbel->lkey );
1533
MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( iobuf->data ) );
1534
MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( iobuf->data ) );
1536
/* Update doorbell record */
1538
db_rec = &arbel->db_rec[arbel_recv_wq->doorbell_idx];
1539
MLX_FILL_1 ( &db_rec->qp, 0,
1540
counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
1542
/* Update work queue's index */
1551
* @v ibdev Infiniband device
1552
* @v cq Completion queue
1553
* @v cqe Hardware completion queue entry
1554
* @ret rc Return status code
1556
static int arbel_complete ( struct ib_device *ibdev,
1557
struct ib_completion_queue *cq,
1558
union arbelprm_completion_entry *cqe ) {
1559
struct arbel *arbel = ib_get_drvdata ( ibdev );
1560
struct ib_work_queue *wq;
1561
struct ib_queue_pair *qp;
1562
struct arbel_queue_pair *arbel_qp;
1563
struct arbel_send_work_queue *arbel_send_wq;
1564
struct arbel_recv_work_queue *arbel_recv_wq;
1565
struct arbelprm_recv_wqe *recv_wqe;
1566
struct io_buffer *iobuf;
1567
struct ib_address_vector recv_dest;
1568
struct ib_address_vector recv_source;
1569
struct ib_global_route_header *grh;
1570
struct ib_address_vector *source;
1571
unsigned int opcode;
1574
unsigned long wqe_adr;
1575
unsigned long wqe_idx;
1579
/* Parse completion */
1580
qpn = MLX_GET ( &cqe->normal, my_qpn );
1581
is_send = MLX_GET ( &cqe->normal, s );
1582
wqe_adr = ( MLX_GET ( &cqe->normal, wqe_adr ) << 6 );
1583
opcode = MLX_GET ( &cqe->normal, opcode );
1584
if ( opcode >= ARBEL_OPCODE_RECV_ERROR ) {
1585
/* "s" field is not valid for error opcodes */
1586
is_send = ( opcode == ARBEL_OPCODE_SEND_ERROR );
1587
DBGC ( arbel, "Arbel %p CQN %#lx %s QPN %#lx syndrome %#x "
1588
"vendor %#x\n", arbel, cq->cqn,
1589
( is_send ? "send" : "recv" ), qpn,
1590
MLX_GET ( &cqe->error, syndrome ),
1591
MLX_GET ( &cqe->error, vendor_code ) );
1592
DBGC_HDA ( arbel, virt_to_phys ( cqe ), cqe, sizeof ( *cqe ) );
1594
/* Don't return immediately; propagate error to completer */
1597
/* Identify work queue */
1598
wq = ib_find_wq ( cq, qpn, is_send );
1600
DBGC ( arbel, "Arbel %p CQN %#lx unknown %s QPN %#lx\n",
1601
arbel, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
1605
arbel_qp = ib_qp_get_drvdata ( qp );
1606
arbel_send_wq = &arbel_qp->send;
1607
arbel_recv_wq = &arbel_qp->recv;
1609
/* Identify work queue entry index */
1611
wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_send_wq->wqe ) ) /
1612
sizeof ( arbel_send_wq->wqe[0] ) );
1613
assert ( wqe_idx < qp->send.num_wqes );
1615
wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_recv_wq->wqe ) ) /
1616
sizeof ( arbel_recv_wq->wqe[0] ) );
1617
assert ( wqe_idx < qp->recv.num_wqes );
1620
DBGCP ( arbel, "Arbel %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
1621
arbel, cq->cqn, qp->qpn, ( is_send ? "send" : "recv" ),
1623
DBGCP_HDA ( arbel, virt_to_phys ( cqe ), cqe, sizeof ( *cqe ) );
1625
/* Identify I/O buffer */
1626
iobuf = wq->iobufs[wqe_idx];
1628
DBGC ( arbel, "Arbel %p CQN %#lx QPN %#lx empty %s WQE %#lx\n",
1629
arbel, cq->cqn, qp->qpn, ( is_send ? "send" : "recv" ),
1633
wq->iobufs[wqe_idx] = NULL;
1636
/* Hand off to completion handler */
1637
ib_complete_send ( ibdev, qp, iobuf, rc );
1639
/* Set received length */
1640
len = MLX_GET ( &cqe->normal, byte_cnt );
1641
recv_wqe = &arbel_recv_wq->wqe[wqe_idx].recv;
1642
assert ( MLX_GET ( &recv_wqe->data[0], local_address_l ) ==
1643
virt_to_bus ( iobuf->data ) );
1644
assert ( MLX_GET ( &recv_wqe->data[0], byte_count ) ==
1645
iob_tailroom ( iobuf ) );
1646
MLX_FILL_1 ( &recv_wqe->data[0], 0, byte_count, 0 );
1647
MLX_FILL_1 ( &recv_wqe->data[0], 1,
1648
l_key, ARBEL_INVALID_LKEY );
1649
memset ( &recv_dest, 0, sizeof ( recv_dest ) );
1650
recv_dest.qpn = qpn;
1651
switch ( qp->type ) {
1655
/* Locate corresponding GRH */
1656
assert ( arbel_recv_wq->grh != NULL );
1657
grh = &arbel_recv_wq->grh[wqe_idx];
1658
len -= sizeof ( *grh );
1659
/* Construct address vector */
1660
source = &recv_source;
1661
memset ( source, 0, sizeof ( *source ) );
1662
source->qpn = MLX_GET ( &cqe->normal, rqpn );
1663
source->lid = MLX_GET ( &cqe->normal, rlid );
1664
source->sl = MLX_GET ( &cqe->normal, sl );
1665
recv_dest.gid_present = source->gid_present =
1666
MLX_GET ( &cqe->normal, g );
1667
memcpy ( &recv_dest.gid, &grh->dgid,
1668
sizeof ( recv_dest.gid ) );
1669
memcpy ( &source->gid, &grh->sgid,
1670
sizeof ( source->gid ) );
1679
assert ( len <= iob_tailroom ( iobuf ) );
1680
iob_put ( iobuf, len );
1681
/* Hand off to completion handler */
1682
ib_complete_recv ( ibdev, qp, &recv_dest, source, iobuf, rc );
1689
* Poll completion queue
1691
* @v ibdev Infiniband device
1692
* @v cq Completion queue
1694
static void arbel_poll_cq ( struct ib_device *ibdev,
1695
struct ib_completion_queue *cq ) {
1696
struct arbel *arbel = ib_get_drvdata ( ibdev );
1697
struct arbel_completion_queue *arbel_cq = ib_cq_get_drvdata ( cq );
1698
struct arbelprm_cq_ci_db_record *ci_db_rec;
1699
union arbelprm_completion_entry *cqe;
1700
unsigned int cqe_idx_mask;
1704
/* Look for completion entry */
1705
cqe_idx_mask = ( cq->num_cqes - 1 );
1706
cqe = &arbel_cq->cqe[cq->next_idx & cqe_idx_mask];
1707
if ( MLX_GET ( &cqe->normal, owner ) != 0 ) {
1708
/* Entry still owned by hardware; end of poll */
1712
/* Handle completion */
1713
if ( ( rc = arbel_complete ( ibdev, cq, cqe ) ) != 0 ) {
1714
DBGC ( arbel, "Arbel %p CQN %#lx failed to complete: "
1715
"%s\n", arbel, cq->cqn, strerror ( rc ) );
1716
DBGC_HD ( arbel, cqe, sizeof ( *cqe ) );
1719
/* Return ownership to hardware */
1720
MLX_FILL_1 ( &cqe->normal, 7, owner, 1 );
1722
/* Update completion queue's index */
1724
/* Update doorbell record */
1725
ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
1726
MLX_FILL_1 ( ci_db_rec, 0,
1727
counter, ( cq->next_idx & 0xffffffffUL ) );
1731
/***************************************************************************
1735
***************************************************************************
1739
* Create event queue
1741
* @v arbel Arbel device
1742
* @ret rc Return status code
1744
static int arbel_create_eq ( struct arbel *arbel ) {
1745
struct arbel_event_queue *arbel_eq = &arbel->eq;
1746
struct arbelprm_eqc eqctx;
1747
struct arbelprm_event_mask mask;
1751
/* Select event queue number */
1752
arbel_eq->eqn = arbel->limits.reserved_eqs;
1754
/* Calculate doorbell address */
1755
arbel_eq->doorbell = ( arbel->eq_ci_doorbells +
1756
ARBEL_DB_EQ_OFFSET ( arbel_eq->eqn ) );
1758
/* Allocate event queue itself */
1759
arbel_eq->eqe_size =
1760
( ARBEL_NUM_EQES * sizeof ( arbel_eq->eqe[0] ) );
1761
arbel_eq->eqe = malloc_dma ( arbel_eq->eqe_size,
1762
sizeof ( arbel_eq->eqe[0] ) );
1763
if ( ! arbel_eq->eqe ) {
1767
memset ( arbel_eq->eqe, 0, arbel_eq->eqe_size );
1768
for ( i = 0 ; i < ARBEL_NUM_EQES ; i++ ) {
1769
MLX_FILL_1 ( &arbel_eq->eqe[i].generic, 7, owner, 1 );
1773
/* Hand queue over to hardware */
1774
memset ( &eqctx, 0, sizeof ( eqctx ) );
1775
MLX_FILL_1 ( &eqctx, 0, st, 0xa /* "Fired" */ );
1776
MLX_FILL_H ( &eqctx, 1,
1777
start_address_h, virt_to_phys ( arbel_eq->eqe ) );
1778
MLX_FILL_1 ( &eqctx, 2,
1779
start_address_l, virt_to_phys ( arbel_eq->eqe ) );
1780
MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( ARBEL_NUM_EQES - 1 ) );
1781
MLX_FILL_1 ( &eqctx, 6, pd, ARBEL_GLOBAL_PD );
1782
MLX_FILL_1 ( &eqctx, 7, lkey, arbel->lkey );
1783
if ( ( rc = arbel_cmd_sw2hw_eq ( arbel, arbel_eq->eqn,
1785
DBGC ( arbel, "Arbel %p EQN %#lx SW2HW_EQ failed: %s\n",
1786
arbel, arbel_eq->eqn, strerror ( rc ) );
1790
/* Map events to this event queue */
1791
memset ( &mask, 0xff, sizeof ( mask ) );
1792
if ( ( rc = arbel_cmd_map_eq ( arbel,
1793
( ARBEL_MAP_EQ | arbel_eq->eqn ),
1795
DBGC ( arbel, "Arbel %p EQN %#lx MAP_EQ failed: %s\n",
1796
arbel, arbel_eq->eqn, strerror ( rc ) );
1800
DBGC ( arbel, "Arbel %p EQN %#lx ring [%08lx,%08lx), doorbell %08lx\n",
1801
arbel, arbel_eq->eqn, virt_to_phys ( arbel_eq->eqe ),
1802
( virt_to_phys ( arbel_eq->eqe ) + arbel_eq->eqe_size ),
1803
virt_to_phys ( arbel_eq->doorbell ) );
1807
arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn, &eqctx );
1809
free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
1811
memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
1816
* Destroy event queue
1818
* @v arbel Arbel device
1820
static void arbel_destroy_eq ( struct arbel *arbel ) {
1821
struct arbel_event_queue *arbel_eq = &arbel->eq;
1822
struct arbelprm_eqc eqctx;
1823
struct arbelprm_event_mask mask;
1826
/* Unmap events from event queue */
1827
memset ( &mask, 0, sizeof ( mask ) );
1828
MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
1829
if ( ( rc = arbel_cmd_map_eq ( arbel,
1830
( ARBEL_UNMAP_EQ | arbel_eq->eqn ),
1832
DBGC ( arbel, "Arbel %p EQN %#lx FATAL MAP_EQ failed to "
1833
"unmap: %s\n", arbel, arbel_eq->eqn, strerror ( rc ) );
1834
/* Continue; HCA may die but system should survive */
1837
/* Take ownership back from hardware */
1838
if ( ( rc = arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn,
1840
DBGC ( arbel, "Arbel %p EQN %#lx FATAL HW2SW_EQ failed: %s\n",
1841
arbel, arbel_eq->eqn, strerror ( rc ) );
1842
/* Leak memory and return; at least we avoid corruption */
1847
free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
1848
memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
1852
* Handle port state event
1854
* @v arbel Arbel device
1855
* @v eqe Port state change event queue entry
1857
static void arbel_event_port_state_change ( struct arbel *arbel,
1858
union arbelprm_event_entry *eqe){
1862
/* Get port and link status */
1863
port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
1864
link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
1865
DBGC ( arbel, "Arbel %p port %d link %s\n", arbel, ( port + 1 ),
1866
( link_up ? "up" : "down" ) );
1869
if ( port >= ARBEL_NUM_PORTS ) {
1870
DBGC ( arbel, "Arbel %p port %d does not exist!\n",
1871
arbel, ( port + 1 ) );
1875
/* Update MAD parameters */
1876
ib_smc_update ( arbel->ibdev[port], arbel_mad );
1882
* @v ibdev Infiniband device
1884
static void arbel_poll_eq ( struct ib_device *ibdev ) {
1885
struct arbel *arbel = ib_get_drvdata ( ibdev );
1886
struct arbel_event_queue *arbel_eq = &arbel->eq;
1887
union arbelprm_event_entry *eqe;
1888
union arbelprm_eq_doorbell_register db_reg;
1889
unsigned int eqe_idx_mask;
1890
unsigned int event_type;
1892
/* No event is generated upon reaching INIT, so we must poll
1893
* separately for link state changes while we remain DOWN.
1895
if ( ib_is_open ( ibdev ) &&
1896
( ibdev->port_state == IB_PORT_STATE_DOWN ) ) {
1897
ib_smc_update ( ibdev, arbel_mad );
1900
/* Poll event queue */
1902
/* Look for event entry */
1903
eqe_idx_mask = ( ARBEL_NUM_EQES - 1 );
1904
eqe = &arbel_eq->eqe[arbel_eq->next_idx & eqe_idx_mask];
1905
if ( MLX_GET ( &eqe->generic, owner ) != 0 ) {
1906
/* Entry still owned by hardware; end of poll */
1909
DBGCP ( arbel, "Arbel %p EQN %#lx event:\n",
1910
arbel, arbel_eq->eqn );
1911
DBGCP_HDA ( arbel, virt_to_phys ( eqe ),
1912
eqe, sizeof ( *eqe ) );
1915
event_type = MLX_GET ( &eqe->generic, event_type );
1916
switch ( event_type ) {
1917
case ARBEL_EV_PORT_STATE_CHANGE:
1918
arbel_event_port_state_change ( arbel, eqe );
1921
DBGC ( arbel, "Arbel %p EQN %#lx unrecognised event "
1923
arbel, arbel_eq->eqn, event_type );
1924
DBGC_HDA ( arbel, virt_to_phys ( eqe ),
1925
eqe, sizeof ( *eqe ) );
1929
/* Return ownership to hardware */
1930
MLX_FILL_1 ( &eqe->generic, 7, owner, 1 );
1933
/* Update event queue's index */
1934
arbel_eq->next_idx++;
1937
MLX_FILL_1 ( &db_reg.ci, 0, ci, arbel_eq->next_idx );
1938
writel ( db_reg.dword[0], arbel_eq->doorbell );
1942
/***************************************************************************
1946
***************************************************************************
1950
* Map virtual to physical address for firmware usage
1952
* @v arbel Arbel device
1953
* @v map Mapping function
1954
* @v va Virtual address
1955
* @v pa Physical address
1956
* @v len Length of region
1957
* @ret rc Return status code
1959
static int arbel_map_vpm ( struct arbel *arbel,
1960
int ( *map ) ( struct arbel *arbel,
1961
const struct arbelprm_virtual_physical_mapping* ),
1962
uint64_t va, physaddr_t pa, size_t len ) {
1963
struct arbelprm_virtual_physical_mapping mapping;
1972
assert ( ( va & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
1973
assert ( ( pa & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
1974
assert ( ( len & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
1976
/* Calculate starting points */
1978
end = ( start + len );
1979
size = ( 1UL << ( fls ( start ^ end ) - 1 ) );
1980
low = high = ( end & ~( size - 1 ) );
1981
assert ( start < low );
1982
assert ( high <= end );
1984
/* These mappings tend to generate huge volumes of
1985
* uninteresting debug data, which basically makes it
1986
* impossible to use debugging otherwise.
1988
DBG_DISABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
1990
/* Map blocks in descending order of size */
1991
while ( size >= ARBEL_PAGE_SIZE ) {
1993
/* Find the next candidate block */
1994
if ( ( low - size ) >= start ) {
1997
} else if ( ( high + size ) <= end ) {
2004
assert ( ( va & ( size - 1 ) ) == 0 );
2005
assert ( ( pa & ( size - 1 ) ) == 0 );
2007
/* Map this block */
2008
memset ( &mapping, 0, sizeof ( mapping ) );
2009
MLX_FILL_1 ( &mapping, 0, va_h, ( va >> 32 ) );
2010
MLX_FILL_1 ( &mapping, 1, va_l, ( va >> 12 ) );
2011
MLX_FILL_H ( &mapping, 2, pa_h, pa );
2012
MLX_FILL_2 ( &mapping, 3,
2013
log2size, ( ( fls ( size ) - 1 ) - 12 ),
2014
pa_l, ( pa >> 12 ) );
2015
if ( ( rc = map ( arbel, &mapping ) ) != 0 ) {
2016
DBG_ENABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
2017
DBGC ( arbel, "Arbel %p could not map %08llx+%zx to "
2019
arbel, va, size, pa, strerror ( rc ) );
2024
assert ( low == start );
2025
assert ( high == end );
2027
DBG_ENABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
2032
* Start firmware running
2034
* @v arbel Arbel device
2035
* @ret rc Return status code
2037
static int arbel_start_firmware ( struct arbel *arbel ) {
2038
struct arbelprm_query_fw fw;
2039
struct arbelprm_access_lam lam;
2040
unsigned int fw_pages;
2043
uint64_t eq_set_ci_base_addr;
2046
/* Get firmware parameters */
2047
if ( ( rc = arbel_cmd_query_fw ( arbel, &fw ) ) != 0 ) {
2048
DBGC ( arbel, "Arbel %p could not query firmware: %s\n",
2049
arbel, strerror ( rc ) );
2052
DBGC ( arbel, "Arbel %p firmware version %d.%d.%d\n", arbel,
2053
MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
2054
MLX_GET ( &fw, fw_rev_subminor ) );
2055
fw_pages = MLX_GET ( &fw, fw_pages );
2056
DBGC ( arbel, "Arbel %p requires %d kB for firmware\n",
2057
arbel, ( fw_pages * 4 ) );
2058
eq_set_ci_base_addr =
2059
( ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_h ) << 32 ) |
2060
( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_l ) ) );
2061
arbel->eq_ci_doorbells = ioremap ( eq_set_ci_base_addr, 0x200 );
2063
/* Enable locally-attached memory. Ignore failure; there may
2064
* be no attached memory.
2066
arbel_cmd_enable_lam ( arbel, &lam );
2068
/* Allocate firmware pages and map firmware area */
2069
fw_len = ( fw_pages * ARBEL_PAGE_SIZE );
2070
if ( ! arbel->firmware_area ) {
2071
arbel->firmware_len = fw_len;
2072
arbel->firmware_area = umalloc ( arbel->firmware_len );
2073
if ( ! arbel->firmware_area ) {
2078
assert ( arbel->firmware_len == fw_len );
2080
fw_base = user_to_phys ( arbel->firmware_area, 0 );
2081
DBGC ( arbel, "Arbel %p firmware area at [%08lx,%08lx)\n",
2082
arbel, fw_base, ( fw_base + fw_len ) );
2083
if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_fa,
2084
0, fw_base, fw_len ) ) != 0 ) {
2085
DBGC ( arbel, "Arbel %p could not map firmware: %s\n",
2086
arbel, strerror ( rc ) );
2090
/* Start firmware */
2091
if ( ( rc = arbel_cmd_run_fw ( arbel ) ) != 0 ) {
2092
DBGC ( arbel, "Arbel %p could not run firmware: %s\n",
2093
arbel, strerror ( rc ) );
2097
DBGC ( arbel, "Arbel %p firmware started\n", arbel );
2101
arbel_cmd_unmap_fa ( arbel );
2109
* Stop firmware running
2111
* @v arbel Arbel device
2113
static void arbel_stop_firmware ( struct arbel *arbel ) {
2116
if ( ( rc = arbel_cmd_unmap_fa ( arbel ) ) != 0 ) {
2117
DBGC ( arbel, "Arbel %p FATAL could not stop firmware: %s\n",
2118
arbel, strerror ( rc ) );
2119
/* Leak memory and return; at least we avoid corruption */
2120
arbel->firmware_area = UNULL;
2125
/***************************************************************************
2127
* Infinihost Context Memory management
2129
***************************************************************************
2135
* @v arbel Arbel device
2136
* @ret rc Return status code
2138
static int arbel_get_limits ( struct arbel *arbel ) {
2139
struct arbelprm_query_dev_lim dev_lim;
2142
if ( ( rc = arbel_cmd_query_dev_lim ( arbel, &dev_lim ) ) != 0 ) {
2143
DBGC ( arbel, "Arbel %p could not get device limits: %s\n",
2144
arbel, strerror ( rc ) );
2148
arbel->limits.reserved_qps =
2149
( 1 << MLX_GET ( &dev_lim, log2_rsvd_qps ) );
2150
arbel->limits.qpc_entry_size = MLX_GET ( &dev_lim, qpc_entry_sz );
2151
arbel->limits.eqpc_entry_size = MLX_GET ( &dev_lim, eqpc_entry_sz );
2152
arbel->limits.reserved_srqs =
2153
( 1 << MLX_GET ( &dev_lim, log2_rsvd_srqs ) );
2154
arbel->limits.srqc_entry_size = MLX_GET ( &dev_lim, srq_entry_sz );
2155
arbel->limits.reserved_ees =
2156
( 1 << MLX_GET ( &dev_lim, log2_rsvd_ees ) );
2157
arbel->limits.eec_entry_size = MLX_GET ( &dev_lim, eec_entry_sz );
2158
arbel->limits.eeec_entry_size = MLX_GET ( &dev_lim, eeec_entry_sz );
2159
arbel->limits.reserved_cqs =
2160
( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
2161
arbel->limits.cqc_entry_size = MLX_GET ( &dev_lim, cqc_entry_sz );
2162
arbel->limits.reserved_mtts =
2163
( 1 << MLX_GET ( &dev_lim, log2_rsvd_mtts ) );
2164
arbel->limits.mtt_entry_size = MLX_GET ( &dev_lim, mtt_entry_sz );
2165
arbel->limits.reserved_mrws =
2166
( 1 << MLX_GET ( &dev_lim, log2_rsvd_mrws ) );
2167
arbel->limits.mpt_entry_size = MLX_GET ( &dev_lim, mpt_entry_sz );
2168
arbel->limits.reserved_rdbs =
2169
( 1 << MLX_GET ( &dev_lim, log2_rsvd_rdbs ) );
2170
arbel->limits.reserved_eqs = MLX_GET ( &dev_lim, num_rsvd_eqs );
2171
arbel->limits.eqc_entry_size = MLX_GET ( &dev_lim, eqc_entry_sz );
2172
arbel->limits.reserved_uars = MLX_GET ( &dev_lim, num_rsvd_uars );
2173
arbel->limits.uar_scratch_entry_size =
2174
MLX_GET ( &dev_lim, uar_scratch_entry_sz );
2176
DBGC ( arbel, "Arbel %p reserves %d x %#zx QPC, %d x %#zx EQPC, "
2177
"%d x %#zx SRQC\n", arbel,
2178
arbel->limits.reserved_qps, arbel->limits.qpc_entry_size,
2179
arbel->limits.reserved_qps, arbel->limits.eqpc_entry_size,
2180
arbel->limits.reserved_srqs, arbel->limits.srqc_entry_size );
2181
DBGC ( arbel, "Arbel %p reserves %d x %#zx EEC, %d x %#zx EEEC, "
2182
"%d x %#zx CQC\n", arbel,
2183
arbel->limits.reserved_ees, arbel->limits.eec_entry_size,
2184
arbel->limits.reserved_ees, arbel->limits.eeec_entry_size,
2185
arbel->limits.reserved_cqs, arbel->limits.cqc_entry_size );
2186
DBGC ( arbel, "Arbel %p reserves %d x %#zx EQC, %d x %#zx MTT, "
2187
"%d x %#zx MPT\n", arbel,
2188
arbel->limits.reserved_eqs, arbel->limits.eqc_entry_size,
2189
arbel->limits.reserved_mtts, arbel->limits.mtt_entry_size,
2190
arbel->limits.reserved_mrws, arbel->limits.mpt_entry_size );
2191
DBGC ( arbel, "Arbel %p reserves %d x %#zx RDB, %d x %#zx UAR, "
2192
"%d x %#zx UAR scratchpad\n", arbel,
2193
arbel->limits.reserved_rdbs, ARBEL_RDB_ENTRY_SIZE,
2194
arbel->limits.reserved_uars, ARBEL_PAGE_SIZE,
2195
arbel->limits.reserved_uars,
2196
arbel->limits.uar_scratch_entry_size );
2204
* @v icm_offset Current ICM offset
2205
* @v len ICM table length
2206
* @ret icm_offset ICM offset
2208
static size_t icm_align ( size_t icm_offset, size_t len ) {
2210
/* Round up to a multiple of the table size */
2211
assert ( len == ( 1UL << ( fls ( len ) - 1 ) ) );
2212
return ( ( icm_offset + len - 1 ) & ~( len - 1 ) );
2218
* @v arbel Arbel device
2219
* @v init_hca INIT_HCA structure to fill in
2220
* @ret rc Return status code
2222
static int arbel_alloc_icm ( struct arbel *arbel,
2223
struct arbelprm_init_hca *init_hca ) {
2224
struct arbelprm_scalar_parameter icm_size;
2225
struct arbelprm_scalar_parameter icm_aux_size;
2226
struct arbelprm_scalar_parameter unmap_icm;
2227
union arbelprm_doorbell_record *db_rec;
2228
size_t icm_offset = 0;
2229
unsigned int log_num_uars, log_num_qps, log_num_srqs, log_num_ees;
2230
unsigned int log_num_cqs, log_num_mtts, log_num_mpts, log_num_rdbs;
2231
unsigned int log_num_eqs, log_num_mcs;
2232
size_t icm_len, icm_aux_len;
2234
physaddr_t icm_phys;
2237
/* Calculate number of each object type within ICM */
2238
log_num_qps = fls ( arbel->limits.reserved_qps +
2239
ARBEL_RSVD_SPECIAL_QPS + ARBEL_MAX_QPS - 1 );
2240
log_num_srqs = fls ( arbel->limits.reserved_srqs - 1 );
2241
log_num_ees = fls ( arbel->limits.reserved_ees - 1 );
2242
log_num_cqs = fls ( arbel->limits.reserved_cqs + ARBEL_MAX_CQS - 1 );
2243
log_num_eqs = fls ( arbel->limits.reserved_eqs + ARBEL_MAX_EQS - 1 );
2244
log_num_mtts = fls ( arbel->limits.reserved_mtts - 1 );
2245
log_num_mpts = fls ( arbel->limits.reserved_mrws + 1 - 1 );
2246
log_num_rdbs = fls ( arbel->limits.reserved_rdbs +
2247
ARBEL_RSVD_SPECIAL_QPS + ARBEL_MAX_QPS - 1 );
2248
log_num_uars = fls ( arbel->limits.reserved_uars +
2249
1 /* single UAR used */ - 1 );
2250
log_num_mcs = ARBEL_LOG_MULTICAST_HASH_SIZE;
2252
/* Queue pair contexts */
2253
len = ( ( 1 << log_num_qps ) * arbel->limits.qpc_entry_size );
2254
icm_offset = icm_align ( icm_offset, len );
2255
MLX_FILL_2 ( init_hca, 13,
2256
qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
2257
( icm_offset >> 7 ),
2258
qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp,
2260
DBGC ( arbel, "Arbel %p ICM QPC is %d x %#zx at [%zx,%zx)\n",
2261
arbel, ( 1 << log_num_qps ), arbel->limits.qpc_entry_size,
2262
icm_offset, ( icm_offset + len ) );
2265
/* Extended queue pair contexts */
2266
len = ( ( 1 << log_num_qps ) * arbel->limits.eqpc_entry_size );
2267
icm_offset = icm_align ( icm_offset, len );
2268
MLX_FILL_1 ( init_hca, 25,
2269
qpc_eec_cqc_eqc_rdb_parameters.eqpc_base_addr_l,
2271
DBGC ( arbel, "Arbel %p ICM EQPC is %d x %#zx at [%zx,%zx)\n",
2272
arbel, ( 1 << log_num_qps ), arbel->limits.eqpc_entry_size,
2273
icm_offset, ( icm_offset + len ) );
2276
/* Completion queue contexts */
2277
len = ( ( 1 << log_num_cqs ) * arbel->limits.cqc_entry_size );
2278
icm_offset = icm_align ( icm_offset, len );
2279
MLX_FILL_2 ( init_hca, 21,
2280
qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_l,
2281
( icm_offset >> 6 ),
2282
qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq,
2284
DBGC ( arbel, "Arbel %p ICM CQC is %d x %#zx at [%zx,%zx)\n",
2285
arbel, ( 1 << log_num_cqs ), arbel->limits.cqc_entry_size,
2286
icm_offset, ( icm_offset + len ) );
2289
/* Event queue contexts */
2290
len = ( ( 1 << log_num_eqs ) * arbel->limits.eqc_entry_size );
2291
icm_offset = icm_align ( icm_offset, len );
2292
MLX_FILL_2 ( init_hca, 33,
2293
qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
2294
( icm_offset >> 6 ),
2295
qpc_eec_cqc_eqc_rdb_parameters.log_num_eq,
2297
DBGC ( arbel, "Arbel %p ICM EQC is %d x %#zx at [%zx,%zx)\n",
2298
arbel, ( 1 << log_num_eqs ), arbel->limits.eqc_entry_size,
2299
icm_offset, ( icm_offset + len ) );
2302
/* End-to-end contexts */
2303
len = ( ( 1 << log_num_ees ) * arbel->limits.eec_entry_size );
2304
icm_offset = icm_align ( icm_offset, len );
2305
MLX_FILL_2 ( init_hca, 17,
2306
qpc_eec_cqc_eqc_rdb_parameters.eec_base_addr_l,
2307
( icm_offset >> 7 ),
2308
qpc_eec_cqc_eqc_rdb_parameters.log_num_of_ee,
2310
DBGC ( arbel, "Arbel %p ICM EEC is %d x %#zx at [%zx,%zx)\n",
2311
arbel, ( 1 << log_num_ees ), arbel->limits.eec_entry_size,
2312
icm_offset, ( icm_offset + len ) );
2315
/* Shared receive queue contexts */
2316
len = ( ( 1 << log_num_srqs ) * arbel->limits.srqc_entry_size );
2317
icm_offset = icm_align ( icm_offset, len );
2318
MLX_FILL_2 ( init_hca, 19,
2319
qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_l,
2320
( icm_offset >> 5 ),
2321
qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq,
2323
DBGC ( arbel, "Arbel %p ICM SRQC is %d x %#zx at [%zx,%zx)\n",
2324
arbel, ( 1 << log_num_srqs ), arbel->limits.srqc_entry_size,
2325
icm_offset, ( icm_offset + len ) );
2328
/* Memory protection table */
2329
len = ( ( 1 << log_num_mpts ) * arbel->limits.mpt_entry_size );
2330
icm_offset = icm_align ( icm_offset, len );
2331
MLX_FILL_1 ( init_hca, 61,
2332
tpt_parameters.mpt_base_adr_l, icm_offset );
2333
MLX_FILL_1 ( init_hca, 62,
2334
tpt_parameters.log_mpt_sz, log_num_mpts );
2335
DBGC ( arbel, "Arbel %p ICM MPT is %d x %#zx at [%zx,%zx)\n",
2336
arbel, ( 1 << log_num_mpts ), arbel->limits.mpt_entry_size,
2337
icm_offset, ( icm_offset + len ) );
2340
/* Remote read data base table */
2341
len = ( ( 1 << log_num_rdbs ) * ARBEL_RDB_ENTRY_SIZE );
2342
icm_offset = icm_align ( icm_offset, len );
2343
MLX_FILL_1 ( init_hca, 37,
2344
qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr_l,
2346
DBGC ( arbel, "Arbel %p ICM RDB is %d x %#zx at [%zx,%zx)\n",
2347
arbel, ( 1 << log_num_rdbs ), ARBEL_RDB_ENTRY_SIZE,
2348
icm_offset, ( icm_offset + len ) );
2351
/* Extended end-to-end contexts */
2352
len = ( ( 1 << log_num_ees ) * arbel->limits.eeec_entry_size );
2353
icm_offset = icm_align ( icm_offset, len );
2354
MLX_FILL_1 ( init_hca, 29,
2355
qpc_eec_cqc_eqc_rdb_parameters.eeec_base_addr_l,
2357
DBGC ( arbel, "Arbel %p ICM EEEC is %d x %#zx at [%zx,%zx)\n",
2358
arbel, ( 1 << log_num_ees ), arbel->limits.eeec_entry_size,
2359
icm_offset, ( icm_offset + len ) );
2362
/* Multicast table */
2363
len = ( ( 1 << log_num_mcs ) * sizeof ( struct arbelprm_mgm_entry ) );
2364
icm_offset = icm_align ( icm_offset, len );
2365
MLX_FILL_1 ( init_hca, 49,
2366
multicast_parameters.mc_base_addr_l, icm_offset );
2367
MLX_FILL_1 ( init_hca, 52,
2368
multicast_parameters.log_mc_table_entry_sz,
2369
fls ( sizeof ( struct arbelprm_mgm_entry ) - 1 ) );
2370
MLX_FILL_1 ( init_hca, 53,
2371
multicast_parameters.mc_table_hash_sz,
2372
( 1 << log_num_mcs ) );
2373
MLX_FILL_1 ( init_hca, 54,
2374
multicast_parameters.log_mc_table_sz,
2375
log_num_mcs /* Only one entry per hash */ );
2376
DBGC ( arbel, "Arbel %p ICM MC is %d x %#zx at [%zx,%zx)\n", arbel,
2377
( 1 << log_num_mcs ), sizeof ( struct arbelprm_mgm_entry ),
2378
icm_offset, ( icm_offset + len ) );
2381
/* Memory translation table */
2382
len = ( ( 1 << log_num_mtts ) * arbel->limits.mtt_entry_size );
2383
icm_offset = icm_align ( icm_offset, len );
2384
MLX_FILL_1 ( init_hca, 65,
2385
tpt_parameters.mtt_base_addr_l, icm_offset );
2386
DBGC ( arbel, "Arbel %p ICM MTT is %d x %#zx at [%zx,%zx)\n",
2387
arbel, ( 1 << log_num_mtts ), arbel->limits.mtt_entry_size,
2388
icm_offset, ( icm_offset + len ) );
2391
/* User access region scratchpads */
2392
len = ( ( 1 << log_num_uars ) * arbel->limits.uar_scratch_entry_size );
2393
icm_offset = icm_align ( icm_offset, len );
2394
MLX_FILL_1 ( init_hca, 77,
2395
uar_parameters.uar_scratch_base_addr_l, icm_offset );
2396
DBGC ( arbel, "Arbel %p UAR scratchpad is %d x %#zx at [%zx,%zx)\n",
2397
arbel, ( 1 << log_num_uars ),
2398
arbel->limits.uar_scratch_entry_size,
2399
icm_offset, ( icm_offset + len ) );
2402
/* Record amount of ICM to be allocated */
2403
icm_offset = icm_align ( icm_offset, ARBEL_PAGE_SIZE );
2404
icm_len = icm_offset;
2406
/* User access region contexts
2408
* The reserved UAR(s) do not need to be backed by physical
2409
* memory, and our UAR is allocated separately; neither are
2410
* part of the umalloc()ed ICM block, but both contribute to
2411
* the total length of ICM virtual address space.
2413
len = ( ( 1 << log_num_uars ) * ARBEL_PAGE_SIZE );
2414
icm_offset = icm_align ( icm_offset, len );
2415
MLX_FILL_1 ( init_hca, 74, uar_parameters.log_max_uars, log_num_uars );
2416
MLX_FILL_1 ( init_hca, 79,
2417
uar_parameters.uar_context_base_addr_l, icm_offset );
2418
arbel->db_rec_offset =
2420
( arbel->limits.reserved_uars * ARBEL_PAGE_SIZE ) );
2421
DBGC ( arbel, "Arbel %p UAR is %d x %#zx at [%zx,%zx), doorbells "
2422
"[%zx,%zx)\n", arbel, ( 1 << log_num_uars ), ARBEL_PAGE_SIZE,
2423
icm_offset, ( icm_offset + len ), arbel->db_rec_offset,
2424
( arbel->db_rec_offset + ARBEL_PAGE_SIZE ) );
2427
/* Get ICM auxiliary area size */
2428
memset ( &icm_size, 0, sizeof ( icm_size ) );
2429
MLX_FILL_1 ( &icm_size, 1, value, icm_len );
2430
if ( ( rc = arbel_cmd_set_icm_size ( arbel, &icm_size,
2431
&icm_aux_size ) ) != 0 ) {
2432
DBGC ( arbel, "Arbel %p could not set ICM size: %s\n",
2433
arbel, strerror ( rc ) );
2434
goto err_set_icm_size;
2436
icm_aux_len = ( MLX_GET ( &icm_aux_size, value ) * ARBEL_PAGE_SIZE );
2438
/* Allocate ICM data and auxiliary area */
2439
DBGC ( arbel, "Arbel %p requires %zd kB ICM and %zd kB AUX ICM\n",
2440
arbel, ( icm_len / 1024 ), ( icm_aux_len / 1024 ) );
2441
if ( ! arbel->icm ) {
2442
arbel->icm_len = icm_len;
2443
arbel->icm_aux_len = icm_aux_len;
2444
arbel->icm = umalloc ( arbel->icm_len + arbel->icm_aux_len );
2445
if ( ! arbel->icm ) {
2450
assert ( arbel->icm_len == icm_len );
2451
assert ( arbel->icm_aux_len == icm_aux_len );
2453
icm_phys = user_to_phys ( arbel->icm, 0 );
2455
/* Allocate doorbell UAR */
2456
arbel->db_rec = malloc_dma ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE );
2457
if ( ! arbel->db_rec ) {
2459
goto err_alloc_doorbell;
2462
/* Map ICM auxiliary area */
2463
DBGC ( arbel, "Arbel %p ICM AUX at [%08lx,%08lx)\n",
2464
arbel, icm_phys, ( icm_phys + arbel->icm_aux_len ) );
2465
if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_icm_aux,
2466
0, icm_phys, arbel->icm_aux_len ) ) != 0 ){
2467
DBGC ( arbel, "Arbel %p could not map AUX ICM: %s\n",
2468
arbel, strerror ( rc ) );
2469
goto err_map_icm_aux;
2471
icm_phys += arbel->icm_aux_len;
2474
DBGC ( arbel, "Arbel %p ICM at [%08lx,%08lx)\n",
2475
arbel, icm_phys, ( icm_phys + arbel->icm_len ) );
2476
if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_icm,
2477
0, icm_phys, arbel->icm_len ) ) != 0 ) {
2478
DBGC ( arbel, "Arbel %p could not map ICM: %s\n",
2479
arbel, strerror ( rc ) );
2482
icm_phys += arbel->icm_len;
2484
/* Map doorbell UAR */
2485
DBGC ( arbel, "Arbel %p UAR at [%08lx,%08lx)\n",
2486
arbel, virt_to_phys ( arbel->db_rec ),
2487
( virt_to_phys ( arbel->db_rec ) + ARBEL_PAGE_SIZE ) );
2488
if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_icm,
2489
arbel->db_rec_offset,
2490
virt_to_phys ( arbel->db_rec ),
2491
ARBEL_PAGE_SIZE ) ) != 0 ) {
2492
DBGC ( arbel, "Arbel %p could not map doorbell UAR: %s\n",
2493
arbel, strerror ( rc ) );
2494
goto err_map_doorbell;
2497
/* Initialise doorbell records */
2498
memset ( arbel->db_rec, 0, ARBEL_PAGE_SIZE );
2499
db_rec = &arbel->db_rec[ARBEL_GROUP_SEPARATOR_DOORBELL];
2500
MLX_FILL_1 ( &db_rec->qp, 1, res, ARBEL_UAR_RES_GROUP_SEP );
2504
memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2505
MLX_FILL_1 ( &unmap_icm, 1, value, arbel->db_rec_offset );
2506
arbel_cmd_unmap_icm ( arbel, 1, &unmap_icm );
2508
memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2509
arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ),
2512
arbel_cmd_unmap_icm_aux ( arbel );
2514
free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
2515
arbel->db_rec= NULL;
2525
* @v arbel Arbel device
2527
static void arbel_free_icm ( struct arbel *arbel ) {
2528
struct arbelprm_scalar_parameter unmap_icm;
2530
memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2531
MLX_FILL_1 ( &unmap_icm, 1, value, arbel->db_rec_offset );
2532
arbel_cmd_unmap_icm ( arbel, 1, &unmap_icm );
2533
memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2534
arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ),
2536
arbel_cmd_unmap_icm_aux ( arbel );
2537
free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
2538
arbel->db_rec = NULL;
2541
/***************************************************************************
2543
* Initialisation and teardown
2545
***************************************************************************
2551
* @v arbel Arbel device
2553
static void arbel_reset ( struct arbel *arbel ) {
2554
struct pci_device *pci = arbel->pci;
2555
struct pci_config_backup backup;
2556
static const uint8_t backup_exclude[] =
2557
PCI_CONFIG_BACKUP_EXCLUDE ( 0x58, 0x5c );
2561
/* Perform device reset and preserve PCI configuration */
2562
pci_backup ( pci, &backup, backup_exclude );
2563
writel ( ARBEL_RESET_MAGIC,
2564
( arbel->config + ARBEL_RESET_OFFSET ) );
2565
for ( i = 0 ; i < ARBEL_RESET_WAIT_TIME_MS ; i++ ) {
2567
pci_read_config_word ( pci, PCI_VENDOR_ID, &vendor );
2568
if ( vendor != 0xffff )
2571
pci_restore ( pci, &backup, backup_exclude );
2575
* Set up memory protection table
2577
* @v arbel Arbel device
2578
* @ret rc Return status code
2580
static int arbel_setup_mpt ( struct arbel *arbel ) {
2581
struct arbelprm_mpt mpt;
2586
key = ( arbel->limits.reserved_mrws | ARBEL_MKEY_PREFIX );
2587
arbel->lkey = ( ( key << 8 ) | ( key >> 24 ) );
2589
/* Initialise memory protection table */
2590
memset ( &mpt, 0, sizeof ( mpt ) );
2591
MLX_FILL_7 ( &mpt, 0,
2599
MLX_FILL_1 ( &mpt, 2, mem_key, key );
2600
MLX_FILL_2 ( &mpt, 3,
2601
pd, ARBEL_GLOBAL_PD,
2603
MLX_FILL_1 ( &mpt, 6, reg_wnd_len_h, 0xffffffffUL );
2604
MLX_FILL_1 ( &mpt, 7, reg_wnd_len_l, 0xffffffffUL );
2605
if ( ( rc = arbel_cmd_sw2hw_mpt ( arbel, arbel->limits.reserved_mrws,
2607
DBGC ( arbel, "Arbel %p could not set up MPT: %s\n",
2608
arbel, strerror ( rc ) );
2616
* Configure special queue pairs
2618
* @v arbel Arbel device
2619
* @ret rc Return status code
2621
static int arbel_configure_special_qps ( struct arbel *arbel ) {
2622
unsigned int smi_qpn_base;
2623
unsigned int gsi_qpn_base;
2626
/* Special QP block must be aligned on an even number */
2627
arbel->special_qpn_base = ( ( arbel->limits.reserved_qps + 1 ) & ~1 );
2628
arbel->qpn_base = ( arbel->special_qpn_base +
2629
ARBEL_NUM_SPECIAL_QPS );
2630
DBGC ( arbel, "Arbel %p special QPs at [%lx,%lx]\n", arbel,
2631
arbel->special_qpn_base, ( arbel->qpn_base - 1 ) );
2632
smi_qpn_base = arbel->special_qpn_base;
2633
gsi_qpn_base = ( smi_qpn_base + 2 );
2635
/* Issue commands to configure special QPs */
2636
if ( ( rc = arbel_cmd_conf_special_qp ( arbel, 0,
2637
smi_qpn_base ) ) != 0 ) {
2638
DBGC ( arbel, "Arbel %p could not configure SMI QPs: %s\n",
2639
arbel, strerror ( rc ) );
2642
if ( ( rc = arbel_cmd_conf_special_qp ( arbel, 1,
2643
gsi_qpn_base ) ) != 0 ) {
2644
DBGC ( arbel, "Arbel %p could not configure GSI QPs: %s\n",
2645
arbel, strerror ( rc ) );
2653
* Start Arbel device
2655
* @v arbel Arbel device
2656
* @v running Firmware is already running
2657
* @ret rc Return status code
2659
static int arbel_start ( struct arbel *arbel, int running ) {
2660
struct arbelprm_init_hca init_hca;
2664
/* Start firmware if not already running */
2666
if ( ( rc = arbel_start_firmware ( arbel ) ) != 0 )
2667
goto err_start_firmware;
2671
memset ( &init_hca, 0, sizeof ( init_hca ) );
2672
if ( ( rc = arbel_alloc_icm ( arbel, &init_hca ) ) != 0 )
2675
/* Initialise HCA */
2676
if ( ( rc = arbel_cmd_init_hca ( arbel, &init_hca ) ) != 0 ) {
2677
DBGC ( arbel, "Arbel %p could not initialise HCA: %s\n",
2678
arbel, strerror ( rc ) );
2682
/* Set up memory protection */
2683
if ( ( rc = arbel_setup_mpt ( arbel ) ) != 0 )
2685
for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ )
2686
arbel->ibdev[i]->rdma_key = arbel->lkey;
2688
/* Set up event queue */
2689
if ( ( rc = arbel_create_eq ( arbel ) ) != 0 )
2692
/* Configure special QPs */
2693
if ( ( rc = arbel_configure_special_qps ( arbel ) ) != 0 )
2694
goto err_conf_special_qps;
2698
err_conf_special_qps:
2699
arbel_destroy_eq ( arbel );
2702
arbel_cmd_close_hca ( arbel );
2704
arbel_free_icm ( arbel );
2706
arbel_stop_firmware ( arbel );
2714
* @v arbel Arbel device
2716
static void arbel_stop ( struct arbel *arbel ) {
2717
arbel_destroy_eq ( arbel );
2718
arbel_cmd_close_hca ( arbel );
2719
arbel_free_icm ( arbel );
2720
arbel_stop_firmware ( arbel );
2721
arbel_reset ( arbel );
2727
* @v arbel Arbel device
2728
* @ret rc Return status code
2730
static int arbel_open ( struct arbel *arbel ) {
2733
/* Start device if applicable */
2734
if ( arbel->open_count == 0 ) {
2735
if ( ( rc = arbel_start ( arbel, 0 ) ) != 0 )
2739
/* Increment open counter */
2740
arbel->open_count++;
2746
* Close Arbel device
2748
* @v arbel Arbel device
2750
static void arbel_close ( struct arbel *arbel ) {
2752
/* Decrement open counter */
2753
assert ( arbel->open_count != 0 );
2754
arbel->open_count--;
2756
/* Stop device if applicable */
2757
if ( arbel->open_count == 0 )
2758
arbel_stop ( arbel );
2761
/***************************************************************************
2763
* Infiniband link-layer operations
2765
***************************************************************************
2769
* Initialise Infiniband link
2771
* @v ibdev Infiniband device
2772
* @ret rc Return status code
2774
static int arbel_ib_open ( struct ib_device *ibdev ) {
2775
struct arbel *arbel = ib_get_drvdata ( ibdev );
2776
struct arbelprm_init_ib init_ib;
2780
if ( ( rc = arbel_open ( arbel ) ) != 0 )
2784
memset ( &init_ib, 0, sizeof ( init_ib ) );
2785
MLX_FILL_3 ( &init_ib, 0,
2786
mtu_cap, ARBEL_MTU_2048,
2789
MLX_FILL_1 ( &init_ib, 1, max_gid, 1 );
2790
MLX_FILL_1 ( &init_ib, 2, max_pkey, 64 );
2791
if ( ( rc = arbel_cmd_init_ib ( arbel, ibdev->port,
2792
&init_ib ) ) != 0 ) {
2793
DBGC ( arbel, "Arbel %p port %d could not intialise IB: %s\n",
2794
arbel, ibdev->port, strerror ( rc ) );
2798
/* Update MAD parameters */
2799
ib_smc_update ( ibdev, arbel_mad );
2804
arbel_close ( arbel );
2810
* Close Infiniband link
2812
* @v ibdev Infiniband device
2814
static void arbel_ib_close ( struct ib_device *ibdev ) {
2815
struct arbel *arbel = ib_get_drvdata ( ibdev );
2819
if ( ( rc = arbel_cmd_close_ib ( arbel, ibdev->port ) ) != 0 ) {
2820
DBGC ( arbel, "Arbel %p port %d could not close IB: %s\n",
2821
arbel, ibdev->port, strerror ( rc ) );
2822
/* Nothing we can do about this */
2825
/* Close hardware */
2826
arbel_close ( arbel );
2830
* Inform embedded subnet management agent of a received MAD
2832
* @v ibdev Infiniband device
2834
* @ret rc Return status code
2836
static int arbel_inform_sma ( struct ib_device *ibdev, union ib_mad *mad ) {
2839
/* Send the MAD to the embedded SMA */
2840
if ( ( rc = arbel_mad ( ibdev, mad ) ) != 0 )
2843
/* Update parameters held in software */
2844
ib_smc_update ( ibdev, arbel_mad );
2849
/***************************************************************************
2851
* Multicast group operations
2853
***************************************************************************
2857
* Attach to multicast group
2859
* @v ibdev Infiniband device
2861
* @v gid Multicast GID
2862
* @ret rc Return status code
2864
static int arbel_mcast_attach ( struct ib_device *ibdev,
2865
struct ib_queue_pair *qp,
2866
union ib_gid *gid ) {
2867
struct arbel *arbel = ib_get_drvdata ( ibdev );
2868
struct arbelprm_mgm_hash hash;
2869
struct arbelprm_mgm_entry mgm;
2873
/* Generate hash table index */
2874
if ( ( rc = arbel_cmd_mgid_hash ( arbel, gid, &hash ) ) != 0 ) {
2875
DBGC ( arbel, "Arbel %p could not hash GID: %s\n",
2876
arbel, strerror ( rc ) );
2879
index = MLX_GET ( &hash, hash );
2881
/* Check for existing hash table entry */
2882
if ( ( rc = arbel_cmd_read_mgm ( arbel, index, &mgm ) ) != 0 ) {
2883
DBGC ( arbel, "Arbel %p could not read MGM %#x: %s\n",
2884
arbel, index, strerror ( rc ) );
2887
if ( MLX_GET ( &mgm, mgmqp_0.qi ) != 0 ) {
2888
/* FIXME: this implementation allows only a single QP
2889
* per multicast group, and doesn't handle hash
2890
* collisions. Sufficient for IPoIB but may need to
2891
* be extended in future.
2893
DBGC ( arbel, "Arbel %p MGID index %#x already in use\n",
2898
/* Update hash table entry */
2899
MLX_FILL_2 ( &mgm, 8,
2900
mgmqp_0.qpn_i, qp->qpn,
2902
memcpy ( &mgm.u.dwords[4], gid, sizeof ( *gid ) );
2903
if ( ( rc = arbel_cmd_write_mgm ( arbel, index, &mgm ) ) != 0 ) {
2904
DBGC ( arbel, "Arbel %p could not write MGM %#x: %s\n",
2905
arbel, index, strerror ( rc ) );
2913
* Detach from multicast group
2915
* @v ibdev Infiniband device
2917
* @v gid Multicast GID
2919
static void arbel_mcast_detach ( struct ib_device *ibdev,
2920
struct ib_queue_pair *qp __unused,
2921
union ib_gid *gid ) {
2922
struct arbel *arbel = ib_get_drvdata ( ibdev );
2923
struct arbelprm_mgm_hash hash;
2924
struct arbelprm_mgm_entry mgm;
2928
/* Generate hash table index */
2929
if ( ( rc = arbel_cmd_mgid_hash ( arbel, gid, &hash ) ) != 0 ) {
2930
DBGC ( arbel, "Arbel %p could not hash GID: %s\n",
2931
arbel, strerror ( rc ) );
2934
index = MLX_GET ( &hash, hash );
2936
/* Clear hash table entry */
2937
memset ( &mgm, 0, sizeof ( mgm ) );
2938
if ( ( rc = arbel_cmd_write_mgm ( arbel, index, &mgm ) ) != 0 ) {
2939
DBGC ( arbel, "Arbel %p could not write MGM %#x: %s\n",
2940
arbel, index, strerror ( rc ) );
2945
/** Arbel Infiniband operations */
2946
static struct ib_device_operations arbel_ib_operations = {
2947
.create_cq = arbel_create_cq,
2948
.destroy_cq = arbel_destroy_cq,
2949
.create_qp = arbel_create_qp,
2950
.modify_qp = arbel_modify_qp,
2951
.destroy_qp = arbel_destroy_qp,
2952
.post_send = arbel_post_send,
2953
.post_recv = arbel_post_recv,
2954
.poll_cq = arbel_poll_cq,
2955
.poll_eq = arbel_poll_eq,
2956
.open = arbel_ib_open,
2957
.close = arbel_ib_close,
2958
.mcast_attach = arbel_mcast_attach,
2959
.mcast_detach = arbel_mcast_detach,
2960
.set_port_info = arbel_inform_sma,
2961
.set_pkey_table = arbel_inform_sma,
2964
/***************************************************************************
2968
***************************************************************************
2972
* Allocate Arbel device
2974
* @ret arbel Arbel device
2976
static struct arbel * arbel_alloc ( void ) {
2977
struct arbel *arbel;
2979
/* Allocate Arbel device */
2980
arbel = zalloc ( sizeof ( *arbel ) );
2984
/* Allocate space for mailboxes */
2985
arbel->mailbox_in = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
2986
if ( ! arbel->mailbox_in )
2987
goto err_mailbox_in;
2988
arbel->mailbox_out = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
2989
if ( ! arbel->mailbox_out )
2990
goto err_mailbox_out;
2994
free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
2996
free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
3006
* @v arbel Arbel device
3008
static void arbel_free ( struct arbel *arbel ) {
3010
ufree ( arbel->icm );
3011
ufree ( arbel->firmware_area );
3012
free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
3013
free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
3022
* @ret rc Return status code
3024
static int arbel_probe ( struct pci_device *pci ) {
3025
struct arbel *arbel;
3026
struct ib_device *ibdev;
3030
/* Allocate Arbel device */
3031
arbel = arbel_alloc();
3036
pci_set_drvdata ( pci, arbel );
3039
/* Fix up PCI device */
3040
adjust_pci_device ( pci );
3043
arbel->config = ioremap ( pci_bar_start ( pci, ARBEL_PCI_CONFIG_BAR ),
3044
ARBEL_PCI_CONFIG_BAR_SIZE );
3045
arbel->uar = ioremap ( ( pci_bar_start ( pci, ARBEL_PCI_UAR_BAR ) +
3046
ARBEL_PCI_UAR_IDX * ARBEL_PCI_UAR_SIZE ),
3047
ARBEL_PCI_UAR_SIZE );
3049
/* Allocate Infiniband devices */
3050
for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
3051
ibdev = alloc_ibdev ( 0 );
3054
goto err_alloc_ibdev;
3056
arbel->ibdev[i] = ibdev;
3057
ibdev->op = &arbel_ib_operations;
3058
ibdev->dev = &pci->dev;
3059
ibdev->port = ( ARBEL_PORT_BASE + i );
3060
ib_set_drvdata ( ibdev, arbel );
3064
arbel_reset ( arbel );
3066
/* Start firmware */
3067
if ( ( rc = arbel_start_firmware ( arbel ) ) != 0 )
3068
goto err_start_firmware;
3070
/* Get device limits */
3071
if ( ( rc = arbel_get_limits ( arbel ) ) != 0 )
3072
goto err_get_limits;
3075
if ( ( rc = arbel_start ( arbel, 1 ) ) != 0 )
3078
/* Initialise parameters using SMC */
3079
for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ )
3080
ib_smc_init ( arbel->ibdev[i], arbel_mad );
3082
/* Register Infiniband devices */
3083
for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
3084
if ( ( rc = register_ibdev ( arbel->ibdev[i] ) ) != 0 ) {
3085
DBGC ( arbel, "Arbel %p port %d could not register IB "
3086
"device: %s\n", arbel,
3087
arbel->ibdev[i]->port, strerror ( rc ) );
3088
goto err_register_ibdev;
3092
/* Leave device quiescent until opened */
3093
if ( arbel->open_count == 0 )
3094
arbel_stop ( arbel );
3098
i = ARBEL_NUM_PORTS;
3100
for ( i-- ; i >= 0 ; i-- )
3101
unregister_ibdev ( arbel->ibdev[i] );
3102
arbel_stop ( arbel );
3105
arbel_stop_firmware ( arbel );
3107
i = ARBEL_NUM_PORTS;
3109
for ( i-- ; i >= 0 ; i-- )
3110
ibdev_put ( arbel->ibdev[i] );
3111
iounmap ( arbel->uar );
3112
iounmap ( arbel->config );
3113
arbel_free ( arbel );
3123
static void arbel_remove ( struct pci_device *pci ) {
3124
struct arbel *arbel = pci_get_drvdata ( pci );
3127
for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
3128
unregister_ibdev ( arbel->ibdev[i] );
3129
for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
3130
ibdev_put ( arbel->ibdev[i] );
3131
iounmap ( arbel->uar );
3132
iounmap ( arbel->config );
3133
arbel_free ( arbel );
3136
static struct pci_device_id arbel_nics[] = {
3137
PCI_ROM ( 0x15b3, 0x6282, "mt25218", "MT25218 HCA driver", 0 ),
3138
PCI_ROM ( 0x15b3, 0x6274, "mt25204", "MT25204 HCA driver", 0 ),
3141
struct pci_driver arbel_driver __pci_driver = {
3143
.id_count = ( sizeof ( arbel_nics ) / sizeof ( arbel_nics[0] ) ),
3144
.probe = arbel_probe,
3145
.remove = arbel_remove,