2
* Copyright (C) 2009 Michael Brown <mbrown@fensystems.co.uk>.
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License as
6
* published by the Free Software Foundation; either version 2 of the
7
* License, or any later version.
9
* This program is distributed in the hope that it will be useful, but
10
* WITHOUT ANY WARRANTY; without even the implied warranty of
11
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12
* General Public License for more details.
14
* You should have received a copy of the GNU General Public License
15
* along with this program; if not, write to the Free Software
16
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
* You can also choose to distribute this program under the terms of
20
* the Unmodified Binary Distribution Licence (as given in the file
21
* COPYING.UBDL), provided that you have satisfied its requirements.
24
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
33
#include <ipxe/infiniband.h>
35
#include <ipxe/bitbash.h>
36
#include <ipxe/malloc.h>
37
#include <ipxe/iobuf.h>
38
#include <ipxe/pcibackup.h>
44
* QLogic QIB7322 Infiniband HCA
48
/** A QIB7322 send buffer set */
49
struct qib7322_send_buffers {
50
/** Offset within register space of the first send buffer */
52
/** Send buffer size */
54
/** Index of first send buffer */
56
/** Number of send buffers
58
* Must be a power of two.
61
/** Send buffer availability producer counter */
63
/** Send buffer availability consumer counter */
65
/** Send buffer availability */
69
/** A QIB7322 send work queue */
70
struct qib7322_send_work_queue {
71
/** Send buffer set */
72
struct qib7322_send_buffers *send_bufs;
73
/** Send buffer usage */
81
/** A QIB7322 receive work queue */
82
struct qib7322_recv_work_queue {
83
/** Receive header ring */
85
/** Receive header producer offset (written by hardware) */
86
struct QIB_7322_scalar header_prod;
87
/** Receive header consumer offset */
88
unsigned int header_cons;
89
/** Offset within register space of the eager array */
90
unsigned long eager_array;
91
/** Number of entries in eager array */
92
unsigned int eager_entries;
93
/** Eager array producer index */
94
unsigned int eager_prod;
95
/** Eager array consumer index */
96
unsigned int eager_cons;
104
/** In-use contexts */
105
uint8_t used_ctx[QIB7322_NUM_CONTEXTS];
106
/** Send work queues */
107
struct qib7322_send_work_queue send_wq[QIB7322_NUM_CONTEXTS];
108
/** Receive work queues */
109
struct qib7322_recv_work_queue recv_wq[QIB7322_NUM_CONTEXTS];
111
/** Send buffer availability (reported by hardware) */
112
struct QIB_7322_SendBufAvail *sendbufavail;
113
/** Small send buffers */
114
struct qib7322_send_buffers *send_bufs_small;
115
/** VL15 port 0 send buffers */
116
struct qib7322_send_buffers *send_bufs_vl15_port0;
117
/** VL15 port 1 send buffers */
118
struct qib7322_send_buffers *send_bufs_vl15_port1;
120
/** I2C bit-bashing interface */
121
struct i2c_bit_basher i2c;
122
/** I2C serial EEPROM */
123
struct i2c_device eeprom;
127
/** Infiniband devices */
128
struct ib_device *ibdev[QIB7322_MAX_PORTS];
131
/***************************************************************************
133
* QIB7322 register access
135
***************************************************************************
137
* This card requires atomic 64-bit accesses. Strange things happen
138
* if you try to use 32-bit accesses; sometimes they work, sometimes
139
* they don't, sometimes you get random data.
143
* Read QIB7322 qword register
145
* @v qib7322 QIB7322 device
146
* @v qword Register buffer to read into
147
* @v offset Register offset
149
static void qib7322_readq ( struct qib7322 *qib7322, uint64_t *qword,
150
unsigned long offset ) {
151
*qword = readq ( qib7322->regs + offset );
153
#define qib7322_readq( _qib7322, _ptr, _offset ) \
154
qib7322_readq ( (_qib7322), (_ptr)->u.qwords, (_offset) )
155
#define qib7322_readq_array8b( _qib7322, _ptr, _offset, _idx ) \
156
qib7322_readq ( (_qib7322), (_ptr), ( (_offset) + ( (_idx) * 8 ) ) )
157
#define qib7322_readq_array64k( _qib7322, _ptr, _offset, _idx ) \
158
qib7322_readq ( (_qib7322), (_ptr), ( (_offset) + ( (_idx) * 65536 ) ) )
159
#define qib7322_readq_port( _qib7322, _ptr, _offset, _port ) \
160
qib7322_readq ( (_qib7322), (_ptr), ( (_offset) + ( (_port) * 4096 ) ) )
163
* Write QIB7322 qword register
165
* @v qib7322 QIB7322 device
166
* @v qword Register buffer to write
167
* @v offset Register offset
169
static void qib7322_writeq ( struct qib7322 *qib7322, const uint64_t *qword,
170
unsigned long offset ) {
171
writeq ( *qword, ( qib7322->regs + offset ) );
173
#define qib7322_writeq( _qib7322, _ptr, _offset ) \
174
qib7322_writeq ( (_qib7322), (_ptr)->u.qwords, (_offset) )
175
#define qib7322_writeq_array8b( _qib7322, _ptr, _offset, _idx ) \
176
qib7322_writeq ( (_qib7322), (_ptr), ( (_offset) + ( (_idx) * 8 ) ) )
177
#define qib7322_writeq_array64k( _qib7322, _ptr, _offset, _idx ) \
178
qib7322_writeq ( (_qib7322), (_ptr), ( (_offset) + ( (_idx) * 65536 ) ))
179
#define qib7322_writeq_port( _qib7322, _ptr, _offset, _port ) \
180
qib7322_writeq ( (_qib7322), (_ptr), ( (_offset) + ( (_port) * 4096 ) ))
183
* Write QIB7322 dword register
185
* @v qib7322 QIB7322 device
186
* @v dword Value to write
187
* @v offset Register offset
189
static void qib7322_writel ( struct qib7322 *qib7322, uint32_t dword,
190
unsigned long offset ) {
191
writel ( dword, ( qib7322->regs + offset ) );
194
/***************************************************************************
196
* Link state management
198
***************************************************************************
202
* Textual representation of link state
204
* @v link_state Link state
205
* @ret link_text Link state text
207
static const char * qib7322_link_state_text ( unsigned int link_state ) {
208
switch ( link_state ) {
209
case QIB7322_LINK_STATE_DOWN: return "DOWN";
210
case QIB7322_LINK_STATE_INIT: return "INIT";
211
case QIB7322_LINK_STATE_ARM: return "ARM";
212
case QIB7322_LINK_STATE_ACTIVE: return "ACTIVE";
213
case QIB7322_LINK_STATE_ACT_DEFER: return "ACT_DEFER";
214
default: return "UNKNOWN";
219
* Handle link state change
221
* @v qib7322 QIB7322 device
223
static void qib7322_link_state_changed ( struct ib_device *ibdev ) {
224
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
225
struct QIB_7322_IBCStatusA_0 ibcstatusa;
226
struct QIB_7322_EXTCtrl extctrl;
227
unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
228
unsigned int link_training_state;
229
unsigned int link_state;
230
unsigned int link_width;
231
unsigned int link_speed;
232
unsigned int link_speed_qdr;
236
/* Read link state */
237
qib7322_readq_port ( qib7322, &ibcstatusa,
238
QIB_7322_IBCStatusA_0_offset, port );
239
link_training_state = BIT_GET ( &ibcstatusa, LinkTrainingState );
240
link_state = BIT_GET ( &ibcstatusa, LinkState );
241
link_width = BIT_GET ( &ibcstatusa, LinkWidthActive );
242
link_speed = BIT_GET ( &ibcstatusa, LinkSpeedActive );
243
link_speed_qdr = BIT_GET ( &ibcstatusa, LinkSpeedQDR );
244
DBGC ( qib7322, "QIB7322 %p port %d training state %#x link state %s "
245
"(%s %s)\n", qib7322, port, link_training_state,
246
qib7322_link_state_text ( link_state ),
247
( link_speed_qdr ? "QDR" : ( link_speed ? "DDR" : "SDR" ) ),
248
( link_width ? "x4" : "x1" ) );
250
/* Set LEDs according to link state */
251
qib7322_readq ( qib7322, &extctrl, QIB_7322_EXTCtrl_offset );
252
green = ( ( link_state >= QIB7322_LINK_STATE_INIT ) ? 1 : 0 );
253
yellow = ( ( link_state >= QIB7322_LINK_STATE_ACTIVE ) ? 1 : 0 );
255
BIT_SET ( &extctrl, LEDPort0GreenOn, green );
256
BIT_SET ( &extctrl, LEDPort0YellowOn, yellow );
258
BIT_SET ( &extctrl, LEDPort1GreenOn, green );
259
BIT_SET ( &extctrl, LEDPort1YellowOn, yellow );
261
qib7322_writeq ( qib7322, &extctrl, QIB_7322_EXTCtrl_offset );
263
/* Notify Infiniband core of link state change */
264
ibdev->port_state = ( link_state + 1 );
265
ibdev->link_width_active =
266
( link_width ? IB_LINK_WIDTH_4X : IB_LINK_WIDTH_1X );
267
ibdev->link_speed_active =
268
( link_speed ? IB_LINK_SPEED_DDR : IB_LINK_SPEED_SDR );
269
ib_link_state_changed ( ibdev );
273
* Wait for link state change to take effect
275
* @v ibdev Infiniband device
276
* @v new_link_state Expected link state
277
* @ret rc Return status code
279
static int qib7322_link_state_check ( struct ib_device *ibdev,
280
unsigned int new_link_state ) {
281
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
282
struct QIB_7322_IBCStatusA_0 ibcstatusa;
283
unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
284
unsigned int link_state;
287
for ( i = 0 ; i < QIB7322_LINK_STATE_MAX_WAIT_US ; i++ ) {
288
qib7322_readq_port ( qib7322, &ibcstatusa,
289
QIB_7322_IBCStatusA_0_offset, port );
290
link_state = BIT_GET ( &ibcstatusa, LinkState );
291
if ( link_state == new_link_state )
296
DBGC ( qib7322, "QIB7322 %p port %d timed out waiting for link state "
297
"%s\n", qib7322, port, qib7322_link_state_text ( link_state ) );
302
* Set port information
304
* @v ibdev Infiniband device
305
* @v mad Set port information MAD
307
static int qib7322_set_port_info ( struct ib_device *ibdev,
308
union ib_mad *mad ) {
309
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
310
struct ib_port_info *port_info = &mad->smp.smp_data.port_info;
311
struct QIB_7322_IBCCtrlA_0 ibcctrla;
312
unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
313
unsigned int port_state;
314
unsigned int link_state;
316
/* Set new link state */
317
port_state = ( port_info->link_speed_supported__port_state & 0xf );
319
link_state = ( port_state - 1 );
320
DBGC ( qib7322, "QIB7322 %p set link state to %s (%x)\n",
321
qib7322, qib7322_link_state_text ( link_state ),
323
qib7322_readq_port ( qib7322, &ibcctrla,
324
QIB_7322_IBCCtrlA_0_offset, port );
325
BIT_SET ( &ibcctrla, LinkCmd, link_state );
326
qib7322_writeq_port ( qib7322, &ibcctrla,
327
QIB_7322_IBCCtrlA_0_offset, port );
329
/* Wait for link state change to take effect. Ignore
330
* errors; the current link state will be returned via
331
* the GetResponse MAD.
333
qib7322_link_state_check ( ibdev, link_state );
336
/* Detect and report link state change */
337
qib7322_link_state_changed ( ibdev );
343
* Set partition key table
345
* @v ibdev Infiniband device
346
* @v mad Set partition key table MAD
348
static int qib7322_set_pkey_table ( struct ib_device *ibdev __unused,
349
union ib_mad *mad __unused ) {
354
/***************************************************************************
358
***************************************************************************
362
* Allocate a context and set queue pair number
364
* @v ibdev Infiniband device
366
* @ret rc Return status code
368
static int qib7322_alloc_ctx ( struct ib_device *ibdev,
369
struct ib_queue_pair *qp ) {
370
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
371
unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
374
for ( ctx = port ; ctx < QIB7322_NUM_CONTEXTS ; ctx += 2 ) {
376
if ( ! qib7322->used_ctx[ctx] ) {
377
qib7322->used_ctx[ctx] = 1;
378
qp->qpn = ( ctx & ~0x01 );
379
DBGC2 ( qib7322, "QIB7322 %p port %d QPN %ld is CTX "
380
"%d\n", qib7322, port, qp->qpn, ctx );
385
DBGC ( qib7322, "QIB7322 %p port %d out of available contexts\n",
391
* Get queue pair context number
393
* @v ibdev Infiniband device
395
* @ret ctx Context index
397
static unsigned int qib7322_ctx ( struct ib_device *ibdev,
398
struct ib_queue_pair *qp ) {
399
return ( qp->qpn + ( ibdev->port - QIB7322_PORT_BASE ) );
405
* @v qib7322 QIB7322 device
406
* @v ctx Context index
408
static void qib7322_free_ctx ( struct ib_device *ibdev,
409
struct ib_queue_pair *qp ) {
410
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
411
unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
412
unsigned int ctx = qib7322_ctx ( ibdev, qp );
414
qib7322->used_ctx[ctx] = 0;
415
DBGC2 ( qib7322, "QIB7322 %p port %d CTX %d freed\n",
416
qib7322, port, ctx );
419
/***************************************************************************
423
***************************************************************************
426
/** Send buffer toggle bit
428
* We encode send buffers as 15 bits of send buffer index plus a
429
* single bit which should match the "check" bit in the SendBufAvail
432
#define QIB7322_SEND_BUF_TOGGLE 0x8000
435
* Create send buffer set
437
* @v qib7322 QIB7322 device
438
* @v base Send buffer base offset
439
* @v size Send buffer size
440
* @v start Index of first send buffer
441
* @v count Number of send buffers
442
* @ret send_bufs Send buffer set
444
static struct qib7322_send_buffers *
445
qib7322_create_send_bufs ( struct qib7322 *qib7322, unsigned long base,
446
unsigned int size, unsigned int start,
447
unsigned int count ) {
448
struct qib7322_send_buffers *send_bufs;
451
/* Allocate send buffer set */
452
send_bufs = zalloc ( sizeof ( *send_bufs ) +
453
( count * sizeof ( send_bufs->avail[0] ) ) );
457
/* Populate send buffer set */
458
send_bufs->base = base;
459
send_bufs->size = size;
460
send_bufs->start = start;
461
send_bufs->count = count;
462
for ( i = 0 ; i < count ; i++ )
463
send_bufs->avail[i] = ( start + i );
465
DBGC2 ( qib7322, "QIB7322 %p send buffer set %p [%d,%d] at %lx\n",
466
qib7322, send_bufs, start, ( start + count - 1 ),
473
* Destroy send buffer set
475
* @v qib7322 QIB7322 device
476
* @v send_bufs Send buffer set
479
qib7322_destroy_send_bufs ( struct qib7322 *qib7322 __unused,
480
struct qib7322_send_buffers *send_bufs ) {
485
* Allocate a send buffer
487
* @v qib7322 QIB7322 device
488
* @v send_bufs Send buffer set
489
* @ret send_buf Send buffer, or negative error
491
static int qib7322_alloc_send_buf ( struct qib7322 *qib7322,
492
struct qib7322_send_buffers *send_bufs ) {
495
unsigned int send_buf;
497
used = ( send_bufs->cons - send_bufs->prod );
498
if ( used >= send_bufs->count ) {
499
DBGC ( qib7322, "QIB7322 %p send buffer set %p out of "
500
"buffers\n", qib7322, send_bufs );
504
mask = ( send_bufs->count - 1 );
505
send_buf = send_bufs->avail[ send_bufs->cons++ & mask ];
506
send_buf ^= QIB7322_SEND_BUF_TOGGLE;
513
* @v qib7322 QIB7322 device
514
* @v send_bufs Send buffer set
515
* @v send_buf Send buffer
517
static void qib7322_free_send_buf ( struct qib7322 *qib7322 __unused,
518
struct qib7322_send_buffers *send_bufs,
519
unsigned int send_buf ) {
522
mask = ( send_bufs->count - 1 );
523
send_bufs->avail[ send_bufs->prod++ & mask ] = send_buf;
527
* Check to see if send buffer is in use
529
* @v qib7322 QIB7322 device
530
* @v send_buf Send buffer
531
* @ret in_use Send buffer is in use
533
static int qib7322_send_buf_in_use ( struct qib7322 *qib7322,
534
unsigned int send_buf ) {
535
unsigned int send_idx;
536
unsigned int send_check;
537
unsigned int inusecheck;
541
send_idx = ( send_buf & ~QIB7322_SEND_BUF_TOGGLE );
542
send_check = ( !! ( send_buf & QIB7322_SEND_BUF_TOGGLE ) );
543
inusecheck = BIT_GET ( qib7322->sendbufavail, InUseCheck[send_idx] );
544
inuse = ( !! ( inusecheck & 0x02 ) );
545
check = ( !! ( inusecheck & 0x01 ) );
546
return ( inuse || ( check != send_check ) );
550
* Calculate starting offset for send buffer
552
* @v qib7322 QIB7322 device
553
* @v send_buf Send buffer
554
* @ret offset Starting offset
557
qib7322_send_buffer_offset ( struct qib7322 *qib7322 __unused,
558
struct qib7322_send_buffers *send_bufs,
559
unsigned int send_buf ) {
562
index = ( ( send_buf & ~QIB7322_SEND_BUF_TOGGLE ) - send_bufs->start );
563
return ( send_bufs->base + ( index * send_bufs->size ) );
567
* Create send work queue
569
* @v ibdev Infiniband device
572
static int qib7322_create_send_wq ( struct ib_device *ibdev,
573
struct ib_queue_pair *qp ) {
574
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
575
struct ib_work_queue *wq = &qp->send;
576
struct qib7322_send_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
577
unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
579
/* Select send buffer set */
580
if ( qp->type == IB_QPT_SMI ) {
582
qib7322_wq->send_bufs = qib7322->send_bufs_vl15_port0;
584
qib7322_wq->send_bufs = qib7322->send_bufs_vl15_port1;
587
qib7322_wq->send_bufs = qib7322->send_bufs_small;
590
/* Allocate space for send buffer usage list */
591
qib7322_wq->used = zalloc ( qp->send.num_wqes *
592
sizeof ( qib7322_wq->used[0] ) );
593
if ( ! qib7322_wq->used )
596
/* Reset work queue */
597
qib7322_wq->prod = 0;
598
qib7322_wq->cons = 0;
604
* Destroy send work queue
606
* @v ibdev Infiniband device
609
static void qib7322_destroy_send_wq ( struct ib_device *ibdev __unused,
610
struct ib_queue_pair *qp ) {
611
struct ib_work_queue *wq = &qp->send;
612
struct qib7322_send_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
614
free ( qib7322_wq->used );
618
* Initialise send datapath
620
* @v qib7322 QIB7322 device
621
* @ret rc Return status code
623
static int qib7322_init_send ( struct qib7322 *qib7322 ) {
624
struct QIB_7322_SendBufBase sendbufbase;
625
struct QIB_7322_SendBufAvailAddr sendbufavailaddr;
626
struct QIB_7322_SendCtrl sendctrl;
627
struct QIB_7322_SendCtrl_0 sendctrlp;
628
unsigned long baseaddr_smallpio;
629
unsigned long baseaddr_largepio;
630
unsigned long baseaddr_vl15_port0;
631
unsigned long baseaddr_vl15_port1;
634
/* Create send buffer sets */
635
qib7322_readq ( qib7322, &sendbufbase, QIB_7322_SendBufBase_offset );
636
baseaddr_smallpio = BIT_GET ( &sendbufbase, BaseAddr_SmallPIO );
637
baseaddr_largepio = BIT_GET ( &sendbufbase, BaseAddr_LargePIO );
638
baseaddr_vl15_port0 = ( baseaddr_largepio +
639
( QIB7322_LARGE_SEND_BUF_SIZE *
640
QIB7322_LARGE_SEND_BUF_COUNT ) );
641
baseaddr_vl15_port1 = ( baseaddr_vl15_port0 +
642
QIB7322_VL15_PORT0_SEND_BUF_SIZE );
643
qib7322->send_bufs_small =
644
qib7322_create_send_bufs ( qib7322, baseaddr_smallpio,
645
QIB7322_SMALL_SEND_BUF_SIZE,
646
QIB7322_SMALL_SEND_BUF_START,
647
QIB7322_SMALL_SEND_BUF_USED );
648
if ( ! qib7322->send_bufs_small ) {
650
goto err_create_send_bufs_small;
652
qib7322->send_bufs_vl15_port0 =
653
qib7322_create_send_bufs ( qib7322, baseaddr_vl15_port0,
654
QIB7322_VL15_PORT0_SEND_BUF_SIZE,
655
QIB7322_VL15_PORT0_SEND_BUF_START,
656
QIB7322_VL15_PORT0_SEND_BUF_COUNT );
657
if ( ! qib7322->send_bufs_vl15_port0 ) {
659
goto err_create_send_bufs_vl15_port0;
661
qib7322->send_bufs_vl15_port1 =
662
qib7322_create_send_bufs ( qib7322, baseaddr_vl15_port1,
663
QIB7322_VL15_PORT1_SEND_BUF_SIZE,
664
QIB7322_VL15_PORT1_SEND_BUF_START,
665
QIB7322_VL15_PORT1_SEND_BUF_COUNT );
666
if ( ! qib7322->send_bufs_vl15_port1 ) {
668
goto err_create_send_bufs_vl15_port1;
671
/* Allocate space for the SendBufAvail array */
672
qib7322->sendbufavail = malloc_dma ( sizeof ( *qib7322->sendbufavail ),
673
QIB7322_SENDBUFAVAIL_ALIGN );
674
if ( ! qib7322->sendbufavail ) {
676
goto err_alloc_sendbufavail;
678
memset ( qib7322->sendbufavail, 0, sizeof ( qib7322->sendbufavail ) );
680
/* Program SendBufAvailAddr into the hardware */
681
memset ( &sendbufavailaddr, 0, sizeof ( sendbufavailaddr ) );
682
BIT_FILL_1 ( &sendbufavailaddr, SendBufAvailAddr,
683
( virt_to_bus ( qib7322->sendbufavail ) >> 6 ) );
684
qib7322_writeq ( qib7322, &sendbufavailaddr,
685
QIB_7322_SendBufAvailAddr_offset );
688
memset ( &sendctrlp, 0, sizeof ( sendctrlp ) );
689
BIT_FILL_1 ( &sendctrlp, SendEnable, 1 );
690
qib7322_writeq ( qib7322, &sendctrlp, QIB_7322_SendCtrl_0_offset );
691
qib7322_writeq ( qib7322, &sendctrlp, QIB_7322_SendCtrl_1_offset );
693
/* Enable DMA of SendBufAvail */
694
memset ( &sendctrl, 0, sizeof ( sendctrl ) );
695
BIT_FILL_1 ( &sendctrl, SendBufAvailUpd, 1 );
696
qib7322_writeq ( qib7322, &sendctrl, QIB_7322_SendCtrl_offset );
700
free_dma ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
701
err_alloc_sendbufavail:
702
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port1 );
703
err_create_send_bufs_vl15_port1:
704
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port0 );
705
err_create_send_bufs_vl15_port0:
706
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_small );
707
err_create_send_bufs_small:
712
* Shut down send datapath
714
* @v qib7322 QIB7322 device
716
static void qib7322_fini_send ( struct qib7322 *qib7322 ) {
717
struct QIB_7322_SendCtrl sendctrl;
719
/* Disable sending and DMA of SendBufAvail */
720
memset ( &sendctrl, 0, sizeof ( sendctrl ) );
721
qib7322_writeq ( qib7322, &sendctrl, QIB_7322_SendCtrl_offset );
724
/* Ensure hardware has seen this disable */
725
qib7322_readq ( qib7322, &sendctrl, QIB_7322_SendCtrl_offset );
727
free_dma ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
728
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port1 );
729
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port0 );
730
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_small );
733
/***************************************************************************
737
***************************************************************************
741
* Create receive work queue
743
* @v ibdev Infiniband device
745
* @ret rc Return status code
747
static int qib7322_create_recv_wq ( struct ib_device *ibdev,
748
struct ib_queue_pair *qp ) {
749
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
750
struct ib_work_queue *wq = &qp->recv;
751
struct qib7322_recv_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
752
struct QIB_7322_RcvHdrAddr0 rcvhdraddr;
753
struct QIB_7322_RcvHdrTailAddr0 rcvhdrtailaddr;
754
struct QIB_7322_RcvHdrHead0 rcvhdrhead;
755
struct QIB_7322_scalar rcvegrindexhead;
756
struct QIB_7322_RcvCtrl rcvctrl;
757
struct QIB_7322_RcvCtrl_P rcvctrlp;
758
unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
759
unsigned int ctx = qib7322_ctx ( ibdev, qp );
762
/* Reset context information */
763
memset ( &qib7322_wq->header_prod, 0,
764
sizeof ( qib7322_wq->header_prod ) );
765
qib7322_wq->header_cons = 0;
766
qib7322_wq->eager_prod = 0;
767
qib7322_wq->eager_cons = 0;
769
/* Allocate receive header buffer */
770
qib7322_wq->header = malloc_dma ( QIB7322_RECV_HEADERS_SIZE,
771
QIB7322_RECV_HEADERS_ALIGN );
772
if ( ! qib7322_wq->header ) {
774
goto err_alloc_header;
777
/* Enable context in hardware */
778
memset ( &rcvhdraddr, 0, sizeof ( rcvhdraddr ) );
779
BIT_FILL_1 ( &rcvhdraddr, RcvHdrAddr,
780
( virt_to_bus ( qib7322_wq->header ) >> 2 ) );
781
qib7322_writeq_array8b ( qib7322, &rcvhdraddr,
782
QIB_7322_RcvHdrAddr0_offset, ctx );
783
memset ( &rcvhdrtailaddr, 0, sizeof ( rcvhdrtailaddr ) );
784
BIT_FILL_1 ( &rcvhdrtailaddr, RcvHdrTailAddr,
785
( virt_to_bus ( &qib7322_wq->header_prod ) >> 2 ) );
786
qib7322_writeq_array8b ( qib7322, &rcvhdrtailaddr,
787
QIB_7322_RcvHdrTailAddr0_offset, ctx );
788
memset ( &rcvhdrhead, 0, sizeof ( rcvhdrhead ) );
789
BIT_FILL_1 ( &rcvhdrhead, counter, 1 );
790
qib7322_writeq_array64k ( qib7322, &rcvhdrhead,
791
QIB_7322_RcvHdrHead0_offset, ctx );
792
memset ( &rcvegrindexhead, 0, sizeof ( rcvegrindexhead ) );
793
BIT_FILL_1 ( &rcvegrindexhead, Value, 1 );
794
qib7322_writeq_array64k ( qib7322, &rcvegrindexhead,
795
QIB_7322_RcvEgrIndexHead0_offset, ctx );
796
qib7322_readq_port ( qib7322, &rcvctrlp,
797
QIB_7322_RcvCtrl_0_offset, port );
798
BIT_SET ( &rcvctrlp, ContextEnable[ctx], 1 );
799
qib7322_writeq_port ( qib7322, &rcvctrlp,
800
QIB_7322_RcvCtrl_0_offset, port );
801
qib7322_readq ( qib7322, &rcvctrl, QIB_7322_RcvCtrl_offset );
802
BIT_SET ( &rcvctrl, IntrAvail[ctx], 1 );
803
qib7322_writeq ( qib7322, &rcvctrl, QIB_7322_RcvCtrl_offset );
805
DBGC ( qib7322, "QIB7322 %p port %d QPN %ld CTX %d hdrs [%lx,%lx) prod "
806
"%lx\n", qib7322, port, qp->qpn, ctx,
807
virt_to_bus ( qib7322_wq->header ),
808
( virt_to_bus ( qib7322_wq->header )
809
+ QIB7322_RECV_HEADERS_SIZE ),
810
virt_to_bus ( &qib7322_wq->header_prod ) );
813
free_dma ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
819
* Destroy receive work queue
821
* @v ibdev Infiniband device
824
static void qib7322_destroy_recv_wq ( struct ib_device *ibdev,
825
struct ib_queue_pair *qp ) {
826
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
827
struct ib_work_queue *wq = &qp->recv;
828
struct qib7322_recv_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
829
struct QIB_7322_RcvCtrl rcvctrl;
830
struct QIB_7322_RcvCtrl_P rcvctrlp;
831
unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
832
unsigned int ctx = qib7322_ctx ( ibdev, qp );
834
/* Disable context in hardware */
835
qib7322_readq_port ( qib7322, &rcvctrlp,
836
QIB_7322_RcvCtrl_0_offset, port );
837
BIT_SET ( &rcvctrlp, ContextEnable[ctx], 0 );
838
qib7322_writeq_port ( qib7322, &rcvctrlp,
839
QIB_7322_RcvCtrl_0_offset, port );
840
qib7322_readq ( qib7322, &rcvctrl, QIB_7322_RcvCtrl_offset );
841
BIT_SET ( &rcvctrl, IntrAvail[ctx], 0 );
842
qib7322_writeq ( qib7322, &rcvctrl, QIB_7322_RcvCtrl_offset );
844
/* Make sure the hardware has seen that the context is disabled */
845
qib7322_readq ( qib7322, &rcvctrl, QIB_7322_RcvCtrl_offset );
848
/* Free headers ring */
849
free_dma ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
853
* Initialise receive datapath
855
* @v qib7322 QIB7322 device
856
* @ret rc Return status code
858
static int qib7322_init_recv ( struct qib7322 *qib7322 ) {
859
struct QIB_7322_RcvCtrl rcvctrl;
860
struct QIB_7322_RcvCtrl_0 rcvctrlp;
861
struct QIB_7322_RcvQPMapTableA_0 rcvqpmaptablea0;
862
struct QIB_7322_RcvQPMapTableB_0 rcvqpmaptableb0;
863
struct QIB_7322_RcvQPMapTableA_1 rcvqpmaptablea1;
864
struct QIB_7322_RcvQPMapTableB_1 rcvqpmaptableb1;
865
struct QIB_7322_RcvQPMulticastContext_0 rcvqpmcastctx0;
866
struct QIB_7322_RcvQPMulticastContext_1 rcvqpmcastctx1;
867
struct QIB_7322_scalar rcvegrbase;
868
struct QIB_7322_scalar rcvhdrentsize;
869
struct QIB_7322_scalar rcvhdrcnt;
870
struct QIB_7322_RcvBTHQP_0 rcvbthqp;
871
struct QIB_7322_RxCreditVL0_0 rxcreditvl;
872
unsigned int contextcfg;
873
unsigned long egrbase;
874
unsigned int eager_array_size_kernel;
875
unsigned int eager_array_size_user;
878
/* Select configuration based on number of contexts */
879
switch ( QIB7322_NUM_CONTEXTS ) {
881
contextcfg = QIB7322_CONTEXTCFG_6CTX;
882
eager_array_size_kernel = QIB7322_EAGER_ARRAY_SIZE_6CTX_KERNEL;
883
eager_array_size_user = QIB7322_EAGER_ARRAY_SIZE_6CTX_USER;
886
contextcfg = QIB7322_CONTEXTCFG_10CTX;
887
eager_array_size_kernel = QIB7322_EAGER_ARRAY_SIZE_10CTX_KERNEL;
888
eager_array_size_user = QIB7322_EAGER_ARRAY_SIZE_10CTX_USER;
891
contextcfg = QIB7322_CONTEXTCFG_18CTX;
892
eager_array_size_kernel = QIB7322_EAGER_ARRAY_SIZE_18CTX_KERNEL;
893
eager_array_size_user = QIB7322_EAGER_ARRAY_SIZE_18CTX_USER;
896
linker_assert ( 0, invalid_QIB7322_NUM_CONTEXTS );
900
/* Configure number of contexts */
901
memset ( &rcvctrl, 0, sizeof ( rcvctrl ) );
902
BIT_FILL_2 ( &rcvctrl,
904
ContextCfg, contextcfg );
905
qib7322_writeq ( qib7322, &rcvctrl, QIB_7322_RcvCtrl_offset );
907
/* Map QPNs to contexts */
908
memset ( &rcvctrlp, 0, sizeof ( rcvctrlp ) );
909
BIT_FILL_3 ( &rcvctrlp,
912
RcvPartitionKeyDisable, 1 );
913
qib7322_writeq ( qib7322, &rcvctrlp, QIB_7322_RcvCtrl_0_offset );
914
qib7322_writeq ( qib7322, &rcvctrlp, QIB_7322_RcvCtrl_1_offset );
915
memset ( &rcvqpmaptablea0, 0, sizeof ( rcvqpmaptablea0 ) );
916
BIT_FILL_6 ( &rcvqpmaptablea0,
922
RcvQPMapContext5, 10 );
923
qib7322_writeq ( qib7322, &rcvqpmaptablea0,
924
QIB_7322_RcvQPMapTableA_0_offset );
925
memset ( &rcvqpmaptableb0, 0, sizeof ( rcvqpmaptableb0 ) );
926
BIT_FILL_3 ( &rcvqpmaptableb0,
927
RcvQPMapContext6, 12,
928
RcvQPMapContext7, 14,
929
RcvQPMapContext8, 16 );
930
qib7322_writeq ( qib7322, &rcvqpmaptableb0,
931
QIB_7322_RcvQPMapTableB_0_offset );
932
memset ( &rcvqpmaptablea1, 0, sizeof ( rcvqpmaptablea1 ) );
933
BIT_FILL_6 ( &rcvqpmaptablea1,
939
RcvQPMapContext5, 11 );
940
qib7322_writeq ( qib7322, &rcvqpmaptablea1,
941
QIB_7322_RcvQPMapTableA_1_offset );
942
memset ( &rcvqpmaptableb1, 0, sizeof ( rcvqpmaptableb1 ) );
943
BIT_FILL_3 ( &rcvqpmaptableb1,
944
RcvQPMapContext6, 13,
945
RcvQPMapContext7, 15,
946
RcvQPMapContext8, 17 );
947
qib7322_writeq ( qib7322, &rcvqpmaptableb1,
948
QIB_7322_RcvQPMapTableB_1_offset );
950
/* Map multicast QPNs to contexts */
951
memset ( &rcvqpmcastctx0, 0, sizeof ( rcvqpmcastctx0 ) );
952
BIT_FILL_1 ( &rcvqpmcastctx0, RcvQpMcContext, 0 );
953
qib7322_writeq ( qib7322, &rcvqpmcastctx0,
954
QIB_7322_RcvQPMulticastContext_0_offset );
955
memset ( &rcvqpmcastctx1, 0, sizeof ( rcvqpmcastctx1 ) );
956
BIT_FILL_1 ( &rcvqpmcastctx1, RcvQpMcContext, 1 );
957
qib7322_writeq ( qib7322, &rcvqpmcastctx1,
958
QIB_7322_RcvQPMulticastContext_1_offset );
960
/* Configure receive header buffer sizes */
961
memset ( &rcvhdrcnt, 0, sizeof ( rcvhdrcnt ) );
962
BIT_FILL_1 ( &rcvhdrcnt, Value, QIB7322_RECV_HEADER_COUNT );
963
qib7322_writeq ( qib7322, &rcvhdrcnt, QIB_7322_RcvHdrCnt_offset );
964
memset ( &rcvhdrentsize, 0, sizeof ( rcvhdrentsize ) );
965
BIT_FILL_1 ( &rcvhdrentsize, Value, ( QIB7322_RECV_HEADER_SIZE >> 2 ) );
966
qib7322_writeq ( qib7322, &rcvhdrentsize,
967
QIB_7322_RcvHdrEntSize_offset );
969
/* Calculate eager array start addresses for each context */
970
qib7322_readq ( qib7322, &rcvegrbase, QIB_7322_RcvEgrBase_offset );
971
egrbase = BIT_GET ( &rcvegrbase, Value );
972
for ( ctx = 0 ; ctx < QIB7322_MAX_PORTS ; ctx++ ) {
973
qib7322->recv_wq[ctx].eager_array = egrbase;
974
qib7322->recv_wq[ctx].eager_entries = eager_array_size_kernel;
975
egrbase += ( eager_array_size_kernel *
976
sizeof ( struct QIB_7322_RcvEgr ) );
978
for ( ; ctx < QIB7322_NUM_CONTEXTS ; ctx++ ) {
979
qib7322->recv_wq[ctx].eager_array = egrbase;
980
qib7322->recv_wq[ctx].eager_entries = eager_array_size_user;
981
egrbase += ( eager_array_size_user *
982
sizeof ( struct QIB_7322_RcvEgr ) );
984
for ( ctx = 0 ; ctx < QIB7322_NUM_CONTEXTS ; ctx++ ) {
985
DBGC ( qib7322, "QIB7322 %p CTX %d eager array at %lx (%d "
986
"entries)\n", qib7322, ctx,
987
qib7322->recv_wq[ctx].eager_array,
988
qib7322->recv_wq[ctx].eager_entries );
991
/* Set the BTH QP for Infinipath packets to an unused value */
992
memset ( &rcvbthqp, 0, sizeof ( rcvbthqp ) );
993
BIT_FILL_1 ( &rcvbthqp, RcvBTHQP, QIB7322_QP_IDETH );
994
qib7322_writeq ( qib7322, &rcvbthqp, QIB_7322_RcvBTHQP_0_offset );
995
qib7322_writeq ( qib7322, &rcvbthqp, QIB_7322_RcvBTHQP_1_offset );
997
/* Assign initial credits */
998
memset ( &rxcreditvl, 0, sizeof ( rxcreditvl ) );
999
BIT_FILL_1 ( &rxcreditvl, RxMaxCreditVL, QIB7322_MAX_CREDITS_VL0 );
1000
qib7322_writeq_array8b ( qib7322, &rxcreditvl,
1001
QIB_7322_RxCreditVL0_0_offset, 0 );
1002
qib7322_writeq_array8b ( qib7322, &rxcreditvl,
1003
QIB_7322_RxCreditVL0_1_offset, 0 );
1004
BIT_FILL_1 ( &rxcreditvl, RxMaxCreditVL, QIB7322_MAX_CREDITS_VL15 );
1005
qib7322_writeq_array8b ( qib7322, &rxcreditvl,
1006
QIB_7322_RxCreditVL0_0_offset, 15 );
1007
qib7322_writeq_array8b ( qib7322, &rxcreditvl,
1008
QIB_7322_RxCreditVL0_1_offset, 15 );
1014
* Shut down receive datapath
1016
* @v qib7322 QIB7322 device
1018
static void qib7322_fini_recv ( struct qib7322 *qib7322 __unused ) {
1019
/* Nothing to do; all contexts were already disabled when the
1020
* queue pairs were destroyed
1024
/***************************************************************************
1026
* Completion queue operations
1028
***************************************************************************
1032
* Create completion queue
1034
* @v ibdev Infiniband device
1035
* @v cq Completion queue
1036
* @ret rc Return status code
1038
static int qib7322_create_cq ( struct ib_device *ibdev,
1039
struct ib_completion_queue *cq ) {
1040
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1043
/* The hardware has no concept of completion queues. We
1044
* simply use the association between CQs and WQs (already
1045
* handled by the IB core) to decide which WQs to poll.
1047
* We do set a CQN, just to avoid confusing debug messages
1051
DBGC ( qib7322, "QIB7322 %p CQN %ld created\n", qib7322, cq->cqn );
1057
* Destroy completion queue
1059
* @v ibdev Infiniband device
1060
* @v cq Completion queue
1062
static void qib7322_destroy_cq ( struct ib_device *ibdev,
1063
struct ib_completion_queue *cq ) {
1064
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1067
DBGC ( qib7322, "QIB7322 %p CQN %ld destroyed\n", qib7322, cq->cqn );
1070
/***************************************************************************
1072
* Queue pair operations
1074
***************************************************************************
1080
* @v ibdev Infiniband device
1082
* @ret rc Return status code
1084
static int qib7322_create_qp ( struct ib_device *ibdev,
1085
struct ib_queue_pair *qp ) {
1086
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1090
/* Allocate a context and QPN */
1091
if ( ( rc = qib7322_alloc_ctx ( ibdev, qp ) ) != 0 )
1093
ctx = qib7322_ctx ( ibdev, qp );
1095
/* Set work-queue private data pointers */
1096
ib_wq_set_drvdata ( &qp->send, &qib7322->send_wq[ctx] );
1097
ib_wq_set_drvdata ( &qp->recv, &qib7322->recv_wq[ctx] );
1099
/* Create receive work queue */
1100
if ( ( rc = qib7322_create_recv_wq ( ibdev, qp ) ) != 0 )
1101
goto err_create_recv_wq;
1103
/* Create send work queue */
1104
if ( ( rc = qib7322_create_send_wq ( ibdev, qp ) ) != 0 )
1105
goto err_create_send_wq;
1109
qib7322_destroy_send_wq ( ibdev, qp );
1111
qib7322_destroy_recv_wq ( ibdev, qp );
1113
qib7322_free_ctx ( ibdev, qp );
1121
* @v ibdev Infiniband device
1123
* @ret rc Return status code
1125
static int qib7322_modify_qp ( struct ib_device *ibdev,
1126
struct ib_queue_pair *qp ) {
1127
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1129
/* Nothing to do; the hardware doesn't have a notion of queue
1132
DBGC2 ( qib7322, "QIB7322 %p QPN %ld modified\n", qib7322, qp->qpn );
1137
* Destroy queue pair
1139
* @v ibdev Infiniband device
1142
static void qib7322_destroy_qp ( struct ib_device *ibdev,
1143
struct ib_queue_pair *qp ) {
1145
qib7322_destroy_send_wq ( ibdev, qp );
1146
qib7322_destroy_recv_wq ( ibdev, qp );
1147
qib7322_free_ctx ( ibdev, qp );
1150
/***************************************************************************
1152
* Work request operations
1154
***************************************************************************
1158
* Post send work queue entry
1160
* @v ibdev Infiniband device
1162
* @v dest Destination address vector
1163
* @v iobuf I/O buffer
1164
* @ret rc Return status code
1166
static int qib7322_post_send ( struct ib_device *ibdev,
1167
struct ib_queue_pair *qp,
1168
struct ib_address_vector *dest,
1169
struct io_buffer *iobuf ) {
1170
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1171
struct ib_work_queue *wq = &qp->send;
1172
struct qib7322_send_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
1173
struct QIB_7322_SendPbc sendpbc;
1174
unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
1175
uint8_t header_buf[IB_MAX_HEADER_SIZE];
1176
struct io_buffer headers;
1178
unsigned long start_offset;
1179
unsigned long offset;
1184
/* Allocate send buffer and calculate offset */
1185
send_buf = qib7322_alloc_send_buf ( qib7322, qib7322_wq->send_bufs );
1188
start_offset = offset =
1189
qib7322_send_buffer_offset ( qib7322, qib7322_wq->send_bufs,
1192
/* Store I/O buffer and send buffer index */
1193
assert ( wq->iobufs[qib7322_wq->prod] == NULL );
1194
wq->iobufs[qib7322_wq->prod] = iobuf;
1195
qib7322_wq->used[qib7322_wq->prod] = send_buf;
1197
/* Construct headers */
1198
iob_populate ( &headers, header_buf, 0, sizeof ( header_buf ) );
1199
iob_reserve ( &headers, sizeof ( header_buf ) );
1200
ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), dest );
1202
/* Calculate packet length */
1203
len = ( ( sizeof ( sendpbc ) + iob_len ( &headers ) +
1204
iob_len ( iobuf ) + 3 ) & ~3 );
1206
/* Construct send per-buffer control word */
1207
memset ( &sendpbc, 0, sizeof ( sendpbc ) );
1208
BIT_FILL_3 ( &sendpbc,
1209
LengthP1_toibc, ( ( len >> 2 ) - 1 ),
1211
VL15, ( ( qp->type == IB_QPT_SMI ) ? 1 : 0 ) );
1214
DBG_DISABLE ( DBGLVL_IO );
1215
qib7322_writeq ( qib7322, &sendpbc, offset );
1216
offset += sizeof ( sendpbc );
1219
for ( data = headers.data, frag_len = iob_len ( &headers ) ;
1220
frag_len > 0 ; data++, offset += 4, frag_len -= 4 ) {
1221
qib7322_writel ( qib7322, *data, offset );
1225
for ( data = iobuf->data, frag_len = iob_len ( iobuf ) ;
1226
frag_len > 0 ; data++, offset += 4, frag_len -= 4 ) {
1227
qib7322_writel ( qib7322, *data, offset );
1229
DBG_ENABLE ( DBGLVL_IO );
1231
assert ( ( start_offset + len ) == offset );
1232
DBGC2 ( qib7322, "QIB7322 %p QPN %ld TX %04x(%04x) posted [%lx,%lx)\n",
1233
qib7322, qp->qpn, send_buf, qib7322_wq->prod,
1234
start_offset, offset );
1236
/* Increment producer counter */
1237
qib7322_wq->prod = ( ( qib7322_wq->prod + 1 ) & ( wq->num_wqes - 1 ) );
1243
* Complete send work queue entry
1245
* @v ibdev Infiniband device
1247
* @v wqe_idx Work queue entry index
1249
static void qib7322_complete_send ( struct ib_device *ibdev,
1250
struct ib_queue_pair *qp,
1251
unsigned int wqe_idx ) {
1252
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1253
struct ib_work_queue *wq = &qp->send;
1254
struct qib7322_send_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
1255
struct io_buffer *iobuf;
1256
unsigned int send_buf;
1258
/* Parse completion */
1259
send_buf = qib7322_wq->used[wqe_idx];
1260
DBGC2 ( qib7322, "QIB7322 %p QPN %ld TX %04x(%04x) complete\n",
1261
qib7322, qp->qpn, send_buf, wqe_idx );
1263
/* Complete work queue entry */
1264
iobuf = wq->iobufs[wqe_idx];
1265
assert ( iobuf != NULL );
1266
ib_complete_send ( ibdev, qp, iobuf, 0 );
1267
wq->iobufs[wqe_idx] = NULL;
1269
/* Free send buffer */
1270
qib7322_free_send_buf ( qib7322, qib7322_wq->send_bufs, send_buf );
1274
* Poll send work queue
1276
* @v ibdev Infiniband device
1279
static void qib7322_poll_send_wq ( struct ib_device *ibdev,
1280
struct ib_queue_pair *qp ) {
1281
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1282
struct ib_work_queue *wq = &qp->send;
1283
struct qib7322_send_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
1284
unsigned int send_buf;
1286
/* Look for completions */
1287
while ( wq->fill ) {
1289
/* Check to see if send buffer has completed */
1290
send_buf = qib7322_wq->used[qib7322_wq->cons];
1291
if ( qib7322_send_buf_in_use ( qib7322, send_buf ) )
1294
/* Complete this buffer */
1295
qib7322_complete_send ( ibdev, qp, qib7322_wq->cons );
1297
/* Increment consumer counter */
1298
qib7322_wq->cons = ( ( qib7322_wq->cons + 1 ) &
1299
( wq->num_wqes - 1 ) );
1304
* Post receive work queue entry
1306
* @v ibdev Infiniband device
1308
* @v iobuf I/O buffer
1309
* @ret rc Return status code
1311
static int qib7322_post_recv ( struct ib_device *ibdev,
1312
struct ib_queue_pair *qp,
1313
struct io_buffer *iobuf ) {
1314
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1315
struct ib_work_queue *wq = &qp->recv;
1316
struct qib7322_recv_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
1317
struct QIB_7322_RcvEgr rcvegr;
1318
struct QIB_7322_scalar rcvegrindexhead;
1319
unsigned int ctx = qib7322_ctx ( ibdev, qp );
1322
unsigned int wqe_idx;
1323
unsigned int bufsize;
1326
addr = virt_to_bus ( iobuf->data );
1327
len = iob_tailroom ( iobuf );
1328
if ( addr & ( QIB7322_EAGER_BUFFER_ALIGN - 1 ) ) {
1329
DBGC ( qib7322, "QIB7322 %p QPN %ld misaligned RX buffer "
1330
"(%08lx)\n", qib7322, qp->qpn, addr );
1333
if ( len != QIB7322_RECV_PAYLOAD_SIZE ) {
1334
DBGC ( qib7322, "QIB7322 %p QPN %ld wrong RX buffer size "
1335
"(%zd)\n", qib7322, qp->qpn, len );
1339
/* Calculate eager producer index and WQE index */
1340
wqe_idx = ( qib7322_wq->eager_prod & ( wq->num_wqes - 1 ) );
1341
assert ( wq->iobufs[wqe_idx] == NULL );
1343
/* Store I/O buffer */
1344
wq->iobufs[wqe_idx] = iobuf;
1346
/* Calculate buffer size */
1347
switch ( QIB7322_RECV_PAYLOAD_SIZE ) {
1348
case 2048: bufsize = QIB7322_EAGER_BUFFER_2K; break;
1349
case 4096: bufsize = QIB7322_EAGER_BUFFER_4K; break;
1350
case 8192: bufsize = QIB7322_EAGER_BUFFER_8K; break;
1351
case 16384: bufsize = QIB7322_EAGER_BUFFER_16K; break;
1352
case 32768: bufsize = QIB7322_EAGER_BUFFER_32K; break;
1353
case 65536: bufsize = QIB7322_EAGER_BUFFER_64K; break;
1354
default: linker_assert ( 0, invalid_rx_payload_size );
1355
bufsize = QIB7322_EAGER_BUFFER_NONE;
1358
/* Post eager buffer */
1359
memset ( &rcvegr, 0, sizeof ( rcvegr ) );
1360
BIT_FILL_2 ( &rcvegr,
1361
Addr, ( addr >> 11 ),
1363
qib7322_writeq_array8b ( qib7322, &rcvegr, qib7322_wq->eager_array,
1364
qib7322_wq->eager_prod );
1365
DBGC2 ( qib7322, "QIB7322 %p QPN %ld RX egr %04x(%04x) posted "
1366
"[%lx,%lx)\n", qib7322, qp->qpn, qib7322_wq->eager_prod,
1367
wqe_idx, addr, ( addr + len ) );
1369
/* Increment producer index */
1370
qib7322_wq->eager_prod = ( ( qib7322_wq->eager_prod + 1 ) &
1371
( qib7322_wq->eager_entries - 1 ) );
1373
/* Update head index */
1374
memset ( &rcvegrindexhead, 0, sizeof ( rcvegrindexhead ) );
1375
BIT_FILL_1 ( &rcvegrindexhead,
1376
Value, ( ( qib7322_wq->eager_prod + 1 ) &
1377
( qib7322_wq->eager_entries - 1 ) ) );
1378
qib7322_writeq_array64k ( qib7322, &rcvegrindexhead,
1379
QIB_7322_RcvEgrIndexHead0_offset, ctx );
1385
* Complete receive work queue entry
1387
* @v ibdev Infiniband device
1389
* @v header_offs Header offset
1391
static void qib7322_complete_recv ( struct ib_device *ibdev,
1392
struct ib_queue_pair *qp,
1393
unsigned int header_offs ) {
1394
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1395
struct ib_work_queue *wq = &qp->recv;
1396
struct qib7322_recv_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
1397
struct QIB_7322_RcvHdrFlags *rcvhdrflags;
1398
struct QIB_7322_RcvEgr rcvegr;
1399
struct io_buffer headers;
1400
struct io_buffer *iobuf;
1401
struct ib_queue_pair *intended_qp;
1402
struct ib_address_vector dest;
1403
struct ib_address_vector source;
1404
unsigned int rcvtype;
1405
unsigned int pktlen;
1406
unsigned int egrindex;
1407
unsigned int useegrbfr;
1408
unsigned int iberr, mkerr, tiderr, khdrerr, mtuerr;
1409
unsigned int lenerr, parityerr, vcrcerr, icrcerr;
1411
unsigned int hdrqoffset;
1412
unsigned int header_len;
1413
unsigned int padded_payload_len;
1414
unsigned int wqe_idx;
1419
/* RcvHdrFlags are at the end of the header entry */
1420
rcvhdrflags = ( qib7322_wq->header + header_offs +
1421
QIB7322_RECV_HEADER_SIZE - sizeof ( *rcvhdrflags ) );
1422
rcvtype = BIT_GET ( rcvhdrflags, RcvType );
1423
pktlen = ( BIT_GET ( rcvhdrflags, PktLen ) << 2 );
1424
egrindex = BIT_GET ( rcvhdrflags, EgrIndex );
1425
useegrbfr = BIT_GET ( rcvhdrflags, UseEgrBfr );
1426
hdrqoffset = ( BIT_GET ( rcvhdrflags, HdrqOffset ) << 2 );
1427
iberr = BIT_GET ( rcvhdrflags, IBErr );
1428
mkerr = BIT_GET ( rcvhdrflags, MKErr );
1429
tiderr = BIT_GET ( rcvhdrflags, TIDErr );
1430
khdrerr = BIT_GET ( rcvhdrflags, KHdrErr );
1431
mtuerr = BIT_GET ( rcvhdrflags, MTUErr );
1432
lenerr = BIT_GET ( rcvhdrflags, LenErr );
1433
parityerr = BIT_GET ( rcvhdrflags, ParityErr );
1434
vcrcerr = BIT_GET ( rcvhdrflags, VCRCErr );
1435
icrcerr = BIT_GET ( rcvhdrflags, ICRCErr );
1436
header_len = ( QIB7322_RECV_HEADER_SIZE - hdrqoffset -
1437
sizeof ( *rcvhdrflags ) );
1438
padded_payload_len = ( pktlen - header_len - 4 /* ICRC */ );
1439
err = ( iberr | mkerr | tiderr | khdrerr | mtuerr |
1440
lenerr | parityerr | vcrcerr | icrcerr );
1441
/* IB header is placed immediately before RcvHdrFlags */
1442
iob_populate ( &headers, ( ( ( void * ) rcvhdrflags ) - header_len ),
1443
header_len, header_len );
1445
/* Dump diagnostic information */
1446
DBGC2 ( qib7322, "QIB7322 %p QPN %ld RX egr %04x%s hdr %d type %d len "
1447
"%d(%d+%d+4)%s%s%s%s%s%s%s%s%s%s%s\n", qib7322, qp->qpn,
1448
egrindex, ( useegrbfr ? "" : "(unused)" ),
1449
( header_offs / QIB7322_RECV_HEADER_SIZE ),
1450
rcvtype, pktlen, header_len, padded_payload_len,
1451
( err ? " [Err" : "" ), ( iberr ? " IB" : "" ),
1452
( mkerr ? " MK" : "" ), ( tiderr ? " TID" : "" ),
1453
( khdrerr ? " KHdr" : "" ), ( mtuerr ? " MTU" : "" ),
1454
( lenerr ? " Len" : "" ), ( parityerr ? " Parity" : ""),
1455
( vcrcerr ? " VCRC" : "" ), ( icrcerr ? " ICRC" : "" ),
1456
( err ? "]" : "" ) );
1457
DBGCP_HDA ( qib7322, hdrqoffset, headers.data,
1458
( header_len + sizeof ( *rcvhdrflags ) ) );
1460
/* Parse header to generate address vector */
1461
qp0 = ( qp->qpn == 0 );
1463
if ( ( rc = ib_pull ( ibdev, &headers, ( qp0 ? &intended_qp : NULL ),
1464
&payload_len, &dest, &source ) ) != 0 ) {
1465
DBGC ( qib7322, "QIB7322 %p could not parse headers: %s\n",
1466
qib7322, strerror ( rc ) );
1469
if ( ! intended_qp )
1472
/* Complete this buffer and any skipped buffers. Note that
1473
* when the hardware runs out of buffers, it will repeatedly
1474
* report the same buffer (the tail) as a TID error, and that
1475
* it also has a habit of sometimes skipping over several
1480
/* If we have caught up to the producer counter, stop.
1481
* This will happen when the hardware first runs out
1482
* of buffers and starts reporting TID errors against
1483
* the eager buffer it wants to use next.
1485
if ( qib7322_wq->eager_cons == qib7322_wq->eager_prod )
1488
/* If we have caught up to where we should be after
1489
* completing this egrindex, stop. We phrase the test
1490
* this way to avoid completing the entire ring when
1491
* we receive the same egrindex twice in a row.
1493
if ( ( qib7322_wq->eager_cons ==
1494
( ( egrindex + 1 ) & ( qib7322_wq->eager_entries - 1 ))))
1497
/* Identify work queue entry and corresponding I/O
1500
wqe_idx = ( qib7322_wq->eager_cons & ( wq->num_wqes - 1 ) );
1501
iobuf = wq->iobufs[wqe_idx];
1502
assert ( iobuf != NULL );
1503
wq->iobufs[wqe_idx] = NULL;
1505
/* Complete the eager buffer */
1506
if ( qib7322_wq->eager_cons == egrindex ) {
1507
/* Completing the eager buffer described in
1508
* this header entry.
1510
if ( payload_len <= iob_tailroom ( iobuf ) ) {
1511
iob_put ( iobuf, payload_len );
1513
-EIO : ( useegrbfr ? 0 : -ECANCELED ) );
1515
DBGC ( qib7322, "QIB7322 %p bad payload len "
1516
"%zd\n", qib7322, payload_len );
1519
/* Redirect to target QP if necessary */
1520
if ( qp != intended_qp ) {
1521
DBGC2 ( qib7322, "QIB7322 %p redirecting QPN "
1523
qib7322, qp->qpn, intended_qp->qpn );
1524
/* Compensate for incorrect fill levels */
1526
intended_qp->recv.fill++;
1528
ib_complete_recv ( ibdev, intended_qp, &dest, &source,
1531
/* Completing on a skipped-over eager buffer */
1532
ib_complete_recv ( ibdev, qp, &dest, &source, iobuf,
1536
/* Clear eager buffer */
1537
memset ( &rcvegr, 0, sizeof ( rcvegr ) );
1538
qib7322_writeq_array8b ( qib7322, &rcvegr,
1539
qib7322_wq->eager_array,
1540
qib7322_wq->eager_cons );
1542
/* Increment consumer index */
1543
qib7322_wq->eager_cons = ( ( qib7322_wq->eager_cons + 1 ) &
1544
( qib7322_wq->eager_entries - 1 ) );
1549
* Poll receive work queue
1551
* @v ibdev Infiniband device
1554
static void qib7322_poll_recv_wq ( struct ib_device *ibdev,
1555
struct ib_queue_pair *qp ) {
1556
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1557
struct ib_work_queue *wq = &qp->recv;
1558
struct qib7322_recv_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
1559
struct QIB_7322_RcvHdrHead0 rcvhdrhead;
1560
unsigned int ctx = qib7322_ctx ( ibdev, qp );
1561
unsigned int header_prod;
1563
/* Check for received packets */
1564
header_prod = ( BIT_GET ( &qib7322_wq->header_prod, Value ) << 2 );
1565
if ( header_prod == qib7322_wq->header_cons )
1568
/* Process all received packets */
1569
while ( qib7322_wq->header_cons != header_prod ) {
1571
/* Complete the receive */
1572
qib7322_complete_recv ( ibdev, qp, qib7322_wq->header_cons );
1574
/* Increment the consumer offset */
1575
qib7322_wq->header_cons += QIB7322_RECV_HEADER_SIZE;
1576
qib7322_wq->header_cons %= QIB7322_RECV_HEADERS_SIZE;
1578
/* QIB7322 has only one send buffer per port for VL15,
1579
* which almost always leads to send buffer exhaustion
1580
* and dropped MADs. Mitigate this by refusing to
1581
* process more than one VL15 MAD per poll, which will
1582
* enforce interleaved TX/RX polls.
1584
if ( qp->type == IB_QPT_SMI )
1588
/* Update consumer offset */
1589
memset ( &rcvhdrhead, 0, sizeof ( rcvhdrhead ) );
1590
BIT_FILL_2 ( &rcvhdrhead,
1591
RcvHeadPointer, ( qib7322_wq->header_cons >> 2 ),
1593
qib7322_writeq_array64k ( qib7322, &rcvhdrhead,
1594
QIB_7322_RcvHdrHead0_offset, ctx );
1598
* Poll completion queue
1600
* @v ibdev Infiniband device
1601
* @v cq Completion queue
1603
static void qib7322_poll_cq ( struct ib_device *ibdev,
1604
struct ib_completion_queue *cq ) {
1605
struct ib_work_queue *wq;
1607
/* Poll associated send and receive queues */
1608
list_for_each_entry ( wq, &cq->work_queues, list ) {
1609
if ( wq->is_send ) {
1610
qib7322_poll_send_wq ( ibdev, wq->qp );
1612
qib7322_poll_recv_wq ( ibdev, wq->qp );
1617
/***************************************************************************
1621
***************************************************************************
1627
* @v ibdev Infiniband device
1629
static void qib7322_poll_eq ( struct ib_device *ibdev ) {
1630
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1631
struct QIB_7322_ErrStatus_0 errstatus;
1632
unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
1634
/* Check for and clear status bits */
1635
DBG_DISABLE ( DBGLVL_IO );
1636
qib7322_readq_port ( qib7322, &errstatus,
1637
QIB_7322_ErrStatus_0_offset, port );
1638
if ( errstatus.u.qwords[0] ) {
1639
DBGC ( qib7322, "QIB7322 %p port %d status %08x%08x\n", qib7322,
1640
port, errstatus.u.dwords[1], errstatus.u.dwords[0] );
1641
qib7322_writeq_port ( qib7322, &errstatus,
1642
QIB_7322_ErrClear_0_offset, port );
1644
DBG_ENABLE ( DBGLVL_IO );
1646
/* Check for link status changes */
1647
if ( BIT_GET ( &errstatus, IBStatusChanged ) )
1648
qib7322_link_state_changed ( ibdev );
1651
/***************************************************************************
1653
* Infiniband link-layer operations
1655
***************************************************************************
1659
* Determine supported link speeds
1661
* @v qib7322 QIB7322 device
1662
* @ret supported Supported link speeds
1664
static unsigned int qib7322_link_speed_supported ( struct qib7322 *qib7322,
1665
unsigned int port ) {
1666
struct QIB_7322_feature_mask features;
1667
struct QIB_7322_Revision revision;
1668
unsigned int supported;
1669
unsigned int boardid;
1671
/* Read the active feature mask */
1672
qib7322_readq ( qib7322, &features,
1673
QIB_7322_active_feature_mask_offset );
1676
supported = BIT_GET ( &features, Port0_Link_Speed_Supported );
1679
supported = BIT_GET ( &features, Port1_Link_Speed_Supported );
1682
DBGC ( qib7322, "QIB7322 %p port %d is invalid\n",
1688
/* Apply hacks for specific board IDs */
1689
qib7322_readq ( qib7322, &revision, QIB_7322_Revision_offset );
1690
boardid = BIT_GET ( &revision, BoardID );
1691
switch ( boardid ) {
1692
case QIB7322_BOARD_QMH7342 :
1693
DBGC2 ( qib7322, "QIB7322 %p is a QMH7342; forcing QDR-only\n",
1695
supported = IB_LINK_SPEED_QDR;
1702
DBGC2 ( qib7322, "QIB7322 %p port %d %s%s%s%s\n", qib7322, port,
1703
( supported ? "supports" : "disabled" ),
1704
( ( supported & IB_LINK_SPEED_SDR ) ? " SDR" : "" ),
1705
( ( supported & IB_LINK_SPEED_DDR ) ? " DDR" : "" ),
1706
( ( supported & IB_LINK_SPEED_QDR ) ? " QDR" : "" ) );
1711
* Initialise Infiniband link
1713
* @v ibdev Infiniband device
1714
* @ret rc Return status code
1716
static int qib7322_open ( struct ib_device *ibdev ) {
1717
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1718
struct QIB_7322_IBCCtrlA_0 ibcctrla;
1719
unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
1722
qib7322_readq_port ( qib7322, &ibcctrla,
1723
QIB_7322_IBCCtrlA_0_offset, port );
1724
BIT_SET ( &ibcctrla, IBLinkEn, 1 );
1725
qib7322_writeq_port ( qib7322, &ibcctrla,
1726
QIB_7322_IBCCtrlA_0_offset, port );
1732
* Close Infiniband link
1734
* @v ibdev Infiniband device
1736
static void qib7322_close ( struct ib_device *ibdev ) {
1737
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1738
struct QIB_7322_IBCCtrlA_0 ibcctrla;
1739
unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
1742
qib7322_readq_port ( qib7322, &ibcctrla,
1743
QIB_7322_IBCCtrlA_0_offset, port );
1744
BIT_SET ( &ibcctrla, IBLinkEn, 0 );
1745
qib7322_writeq_port ( qib7322, &ibcctrla,
1746
QIB_7322_IBCCtrlA_0_offset, port );
1749
/***************************************************************************
1751
* Multicast group operations
1753
***************************************************************************
1757
* Attach to multicast group
1759
* @v ibdev Infiniband device
1761
* @v gid Multicast GID
1762
* @ret rc Return status code
1764
static int qib7322_mcast_attach ( struct ib_device *ibdev,
1765
struct ib_queue_pair *qp,
1766
union ib_gid *gid ) {
1767
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1776
* Detach from multicast group
1778
* @v ibdev Infiniband device
1780
* @v gid Multicast GID
1782
static void qib7322_mcast_detach ( struct ib_device *ibdev,
1783
struct ib_queue_pair *qp,
1784
union ib_gid *gid ) {
1785
struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1792
/** QIB7322 Infiniband operations */
1793
static struct ib_device_operations qib7322_ib_operations = {
1794
.create_cq = qib7322_create_cq,
1795
.destroy_cq = qib7322_destroy_cq,
1796
.create_qp = qib7322_create_qp,
1797
.modify_qp = qib7322_modify_qp,
1798
.destroy_qp = qib7322_destroy_qp,
1799
.post_send = qib7322_post_send,
1800
.post_recv = qib7322_post_recv,
1801
.poll_cq = qib7322_poll_cq,
1802
.poll_eq = qib7322_poll_eq,
1803
.open = qib7322_open,
1804
.close = qib7322_close,
1805
.mcast_attach = qib7322_mcast_attach,
1806
.mcast_detach = qib7322_mcast_detach,
1807
.set_port_info = qib7322_set_port_info,
1808
.set_pkey_table = qib7322_set_pkey_table,
1811
/***************************************************************************
1813
* I2C bus operations
1815
***************************************************************************
1818
/** QIB7322 I2C bit to GPIO mappings */
1819
static unsigned int qib7322_i2c_bits[] = {
1820
[I2C_BIT_SCL] = ( 1 << QIB7322_GPIO_SCL ),
1821
[I2C_BIT_SDA] = ( 1 << QIB7322_GPIO_SDA ),
1825
* Read QIB7322 I2C line status
1827
* @v basher Bit-bashing interface
1828
* @v bit_id Bit number
1829
* @ret zero Input is a logic 0
1830
* @ret non-zero Input is a logic 1
1832
static int qib7322_i2c_read_bit ( struct bit_basher *basher,
1833
unsigned int bit_id ) {
1834
struct qib7322 *qib7322 =
1835
container_of ( basher, struct qib7322, i2c.basher );
1836
struct QIB_7322_EXTStatus extstatus;
1837
unsigned int status;
1839
DBG_DISABLE ( DBGLVL_IO );
1841
qib7322_readq ( qib7322, &extstatus, QIB_7322_EXTStatus_offset );
1842
status = ( BIT_GET ( &extstatus, GPIOIn ) & qib7322_i2c_bits[bit_id] );
1844
DBG_ENABLE ( DBGLVL_IO );
1850
* Write QIB7322 I2C line status
1852
* @v basher Bit-bashing interface
1853
* @v bit_id Bit number
1854
* @v data Value to write
1856
static void qib7322_i2c_write_bit ( struct bit_basher *basher,
1857
unsigned int bit_id, unsigned long data ) {
1858
struct qib7322 *qib7322 =
1859
container_of ( basher, struct qib7322, i2c.basher );
1860
struct QIB_7322_EXTCtrl extctrl;
1861
struct QIB_7322_GPIO gpioout;
1862
unsigned int bit = qib7322_i2c_bits[bit_id];
1863
unsigned int outputs = 0;
1864
unsigned int output_enables = 0;
1866
DBG_DISABLE ( DBGLVL_IO );
1868
/* Read current GPIO mask and outputs */
1869
qib7322_readq ( qib7322, &extctrl, QIB_7322_EXTCtrl_offset );
1870
qib7322_readq ( qib7322, &gpioout, QIB_7322_GPIOOut_offset );
1872
/* Update outputs and output enables. I2C lines are tied
1873
* high, so we always set the output to 0 and use the output
1874
* enable to control the line.
1876
output_enables = BIT_GET ( &extctrl, GPIOOe );
1877
output_enables = ( ( output_enables & ~bit ) | ( ~data & bit ) );
1878
outputs = BIT_GET ( &gpioout, GPIO );
1879
outputs = ( outputs & ~bit );
1880
BIT_SET ( &extctrl, GPIOOe, output_enables );
1881
BIT_SET ( &gpioout, GPIO, outputs );
1883
/* Write the output enable first; that way we avoid logic
1886
qib7322_writeq ( qib7322, &extctrl, QIB_7322_EXTCtrl_offset );
1887
qib7322_writeq ( qib7322, &gpioout, QIB_7322_GPIOOut_offset );
1890
DBG_ENABLE ( DBGLVL_IO );
1893
/** QIB7322 I2C bit-bashing interface operations */
1894
static struct bit_basher_operations qib7322_i2c_basher_ops = {
1895
.read = qib7322_i2c_read_bit,
1896
.write = qib7322_i2c_write_bit,
1900
* Initialise QIB7322 I2C subsystem
1902
* @v qib7322 QIB7322 device
1903
* @ret rc Return status code
1905
static int qib7322_init_i2c ( struct qib7322 *qib7322 ) {
1906
static int try_eeprom_address[] = { 0x51, 0x50 };
1910
/* Initialise bus */
1911
if ( ( rc = init_i2c_bit_basher ( &qib7322->i2c,
1912
&qib7322_i2c_basher_ops ) ) != 0 ) {
1913
DBGC ( qib7322, "QIB7322 %p could not initialise I2C bus: %s\n",
1914
qib7322, strerror ( rc ) );
1918
/* Probe for devices */
1919
for ( i = 0 ; i < ( sizeof ( try_eeprom_address ) /
1920
sizeof ( try_eeprom_address[0] ) ) ; i++ ) {
1921
init_i2c_eeprom ( &qib7322->eeprom, try_eeprom_address[i] );
1922
if ( ( rc = i2c_check_presence ( &qib7322->i2c.i2c,
1923
&qib7322->eeprom ) ) == 0 ) {
1924
DBGC2 ( qib7322, "QIB7322 %p found EEPROM at %02x\n",
1925
qib7322, try_eeprom_address[i] );
1930
DBGC ( qib7322, "QIB7322 %p could not find EEPROM\n", qib7322 );
1935
* Read EEPROM parameters
1937
* @v qib7322 QIB7322 device
1938
* @ret rc Return status code
1940
static int qib7322_read_eeprom ( struct qib7322 *qib7322 ) {
1941
struct i2c_interface *i2c = &qib7322->i2c.i2c;
1942
union ib_guid *guid = &qib7322->guid;
1946
if ( ( rc = i2c->read ( i2c, &qib7322->eeprom,
1947
QIB7322_EEPROM_GUID_OFFSET, guid->bytes,
1948
sizeof ( *guid ) ) ) != 0 ) {
1949
DBGC ( qib7322, "QIB7322 %p could not read GUID: %s\n",
1950
qib7322, strerror ( rc ) );
1953
DBGC2 ( qib7322, "QIB7322 %p has GUID " IB_GUID_FMT "\n",
1954
qib7322, IB_GUID_ARGS ( guid ) );
1956
/* Read serial number (debug only) */
1958
uint8_t serial[QIB7322_EEPROM_SERIAL_SIZE + 1];
1960
serial[ sizeof ( serial ) - 1 ] = '\0';
1961
if ( ( rc = i2c->read ( i2c, &qib7322->eeprom,
1962
QIB7322_EEPROM_SERIAL_OFFSET, serial,
1963
( sizeof ( serial ) - 1 ) ) ) != 0 ) {
1964
DBGC ( qib7322, "QIB7322 %p could not read serial: "
1965
"%s\n", qib7322, strerror ( rc ) );
1968
DBGC2 ( qib7322, "QIB7322 %p has serial number \"%s\"\n",
1975
/***************************************************************************
1977
* Advanced High-performance Bus (AHB) access
1979
***************************************************************************
1983
* Wait for AHB transaction to complete
1985
* @v qib7322 QIB7322 device
1986
* @ret rc Return status code
1988
static int qib7322_ahb_wait ( struct qib7322 *qib7322 ) {
1989
struct QIB_7322_ahb_transaction_reg transaction;
1992
/* Wait for Ready bit to be asserted */
1993
for ( i = 0 ; i < QIB7322_AHB_MAX_WAIT_US ; i++ ) {
1994
qib7322_readq ( qib7322, &transaction,
1995
QIB_7322_ahb_transaction_reg_offset );
1996
if ( BIT_GET ( &transaction, ahb_rdy ) )
2001
DBGC ( qib7322, "QIB7322 %p timed out waiting for AHB transaction\n",
2007
* Request ownership of the AHB
2009
* @v qib7322 QIB7322 device
2010
* @v location AHB location
2011
* @ret rc Return status code
2013
static int qib7322_ahb_request ( struct qib7322 *qib7322,
2014
unsigned int location ) {
2015
struct QIB_7322_ahb_access_ctrl access;
2018
/* Request ownership */
2019
memset ( &access, 0, sizeof ( access ) );
2020
BIT_FILL_2 ( &access,
2022
sw_sel_ahb_trgt, QIB7322_AHB_LOC_TARGET ( location ) );
2023
qib7322_writeq ( qib7322, &access, QIB_7322_ahb_access_ctrl_offset );
2025
/* Wait for ownership to be granted */
2026
if ( ( rc = qib7322_ahb_wait ( qib7322 ) ) != 0 ) {
2027
DBGC ( qib7322, "QIB7322 %p could not obtain AHB ownership: "
2028
"%s\n", qib7322, strerror ( rc ) );
2036
* Release ownership of the AHB
2038
* @v qib7322 QIB7322 device
2040
static void qib7322_ahb_release ( struct qib7322 *qib7322 ) {
2041
struct QIB_7322_ahb_access_ctrl access;
2043
memset ( &access, 0, sizeof ( access ) );
2044
qib7322_writeq ( qib7322, &access, QIB_7322_ahb_access_ctrl_offset );
2050
* @v qib7322 QIB7322 device
2051
* @v location AHB location
2052
* @v data Data to read
2053
* @ret rc Return status code
2055
* You must have already acquired ownership of the AHB.
2057
static int qib7322_ahb_read ( struct qib7322 *qib7322, unsigned int location,
2059
struct QIB_7322_ahb_transaction_reg xact;
2062
/* Avoid returning uninitialised data on error */
2065
/* Initiate transaction */
2066
memset ( &xact, 0, sizeof ( xact ) );
2068
ahb_address, QIB7322_AHB_LOC_ADDRESS ( location ),
2069
write_not_read, 0 );
2070
qib7322_writeq ( qib7322, &xact, QIB_7322_ahb_transaction_reg_offset );
2072
/* Wait for transaction to complete */
2073
if ( ( rc = qib7322_ahb_wait ( qib7322 ) ) != 0 )
2076
/* Read transaction data */
2077
qib7322_readq ( qib7322, &xact, QIB_7322_ahb_transaction_reg_offset );
2078
*data = BIT_GET ( &xact, ahb_data );
2083
* Write data via AHB
2085
* @v qib7322 QIB7322 device
2086
* @v location AHB location
2087
* @v data Data to write
2088
* @ret rc Return status code
2090
* You must have already acquired ownership of the AHB.
2092
static int qib7322_ahb_write ( struct qib7322 *qib7322, unsigned int location,
2094
struct QIB_7322_ahb_transaction_reg xact;
2097
/* Initiate transaction */
2098
memset ( &xact, 0, sizeof ( xact ) );
2100
ahb_address, QIB7322_AHB_LOC_ADDRESS ( location ),
2103
qib7322_writeq ( qib7322, &xact, QIB_7322_ahb_transaction_reg_offset );
2105
/* Wait for transaction to complete */
2106
if ( ( rc = qib7322_ahb_wait ( qib7322 ) ) != 0 )
2113
* Read/modify/write AHB register
2115
* @v qib7322 QIB7322 device
2116
* @v location AHB location
2117
* @v value Value to set
2118
* @v mask Mask to apply to old value
2119
* @ret rc Return status code
2121
static int qib7322_ahb_mod_reg ( struct qib7322 *qib7322, unsigned int location,
2122
uint32_t value, uint32_t mask ) {
2127
DBG_DISABLE ( DBGLVL_IO );
2130
assert ( ( value & mask ) == value );
2132
/* Acquire bus ownership */
2133
if ( ( rc = qib7322_ahb_request ( qib7322, location ) ) != 0 )
2136
/* Read existing value */
2137
if ( ( rc = qib7322_ahb_read ( qib7322, location, &old_value ) ) != 0 )
2141
new_value = ( ( old_value & ~mask ) | value );
2142
DBGCP ( qib7322, "QIB7322 %p AHB %x %#08x => %#08x\n",
2143
qib7322, location, old_value, new_value );
2144
if ( ( rc = qib7322_ahb_write ( qib7322, location, new_value ) ) != 0 )
2149
qib7322_ahb_release ( qib7322 );
2151
DBG_ENABLE ( DBGLVL_IO );
2156
* Read/modify/write AHB register across all ports and channels
2158
* @v qib7322 QIB7322 device
2159
* @v reg AHB register
2160
* @v value Value to set
2161
* @v mask Mask to apply to old value
2162
* @ret rc Return status code
2164
static int qib7322_ahb_mod_reg_all ( struct qib7322 *qib7322, unsigned int reg,
2165
uint32_t value, uint32_t mask ) {
2167
unsigned int channel;
2168
unsigned int location;
2171
for ( port = 0 ; port < QIB7322_MAX_PORTS ; port++ ) {
2172
for ( channel = 0 ; channel < QIB7322_MAX_WIDTH ; channel++ ) {
2173
location = QIB7322_AHB_LOCATION ( port, channel, reg );
2174
if ( ( rc = qib7322_ahb_mod_reg ( qib7322, location,
2175
value, mask ) ) != 0 )
2182
/***************************************************************************
2184
* Infiniband SerDes initialisation
2186
***************************************************************************
2190
* Initialise the IB SerDes
2192
* @v qib7322 QIB7322 device
2193
* @ret rc Return status code
2195
static int qib7322_init_ib_serdes ( struct qib7322 *qib7322 ) {
2196
struct QIB_7322_IBCCtrlA_0 ibcctrla;
2197
struct QIB_7322_IBCCtrlB_0 ibcctrlb;
2198
struct QIB_7322_IBPCSConfig_0 ibpcsconfig;
2200
/* Configure sensible defaults for IBC */
2201
memset ( &ibcctrla, 0, sizeof ( ibcctrla ) );
2202
BIT_FILL_5 ( &ibcctrla, /* Tuning values taken from Linux driver */
2203
FlowCtrlPeriod, 0x03,
2204
FlowCtrlWaterMark, 0x05,
2205
MaxPktLen, ( ( QIB7322_RECV_HEADER_SIZE +
2206
QIB7322_RECV_PAYLOAD_SIZE +
2207
4 /* ICRC */ ) >> 2 ),
2208
PhyerrThreshold, 0xf,
2209
OverrunThreshold, 0xf );
2210
qib7322_writeq ( qib7322, &ibcctrla, QIB_7322_IBCCtrlA_0_offset );
2211
qib7322_writeq ( qib7322, &ibcctrla, QIB_7322_IBCCtrlA_1_offset );
2213
/* Force SDR only to avoid needing all the DDR tuning,
2214
* Mellanox compatibility hacks etc. SDR is plenty for
2215
* boot-time operation.
2217
qib7322_readq ( qib7322, &ibcctrlb, QIB_7322_IBCCtrlB_0_offset );
2218
BIT_SET ( &ibcctrlb, IB_ENHANCED_MODE, 0 );
2219
BIT_SET ( &ibcctrlb, SD_SPEED_SDR, 1 );
2220
BIT_SET ( &ibcctrlb, SD_SPEED_DDR, 0 );
2221
BIT_SET ( &ibcctrlb, SD_SPEED_QDR, 0 );
2222
BIT_SET ( &ibcctrlb, IB_NUM_CHANNELS, 1 ); /* 4X only */
2223
BIT_SET ( &ibcctrlb, IB_LANE_REV_SUPPORTED, 0 );
2224
BIT_SET ( &ibcctrlb, HRTBT_ENB, 0 );
2225
BIT_SET ( &ibcctrlb, HRTBT_AUTO, 0 );
2226
qib7322_writeq ( qib7322, &ibcctrlb, QIB_7322_IBCCtrlB_0_offset );
2227
qib7322_writeq ( qib7322, &ibcctrlb, QIB_7322_IBCCtrlB_1_offset );
2230
qib7322_ahb_mod_reg_all ( qib7322, 2, 0, 0x00000e00UL );
2232
/* Bring XGXS out of reset */
2233
memset ( &ibpcsconfig, 0, sizeof ( ibpcsconfig ) );
2234
qib7322_writeq ( qib7322, &ibpcsconfig, QIB_7322_IBPCSConfig_0_offset );
2235
qib7322_writeq ( qib7322, &ibpcsconfig, QIB_7322_IBPCSConfig_1_offset );
2240
/***************************************************************************
2242
* PCI layer interface
2244
***************************************************************************
2250
* @v qib7322 QIB7322 device
2252
* @ret rc Return status code
2254
static void qib7322_reset ( struct qib7322 *qib7322, struct pci_device *pci ) {
2255
struct QIB_7322_Control control;
2256
struct pci_config_backup backup;
2258
/* Back up PCI configuration space */
2259
pci_backup ( pci, &backup, NULL );
2262
memset ( &control, 0, sizeof ( control ) );
2263
BIT_FILL_1 ( &control, SyncReset, 1 );
2264
qib7322_writeq ( qib7322, &control, QIB_7322_Control_offset );
2266
/* Wait for reset to complete */
2269
/* Restore PCI configuration space */
2270
pci_restore ( pci, &backup, NULL );
2278
* @ret rc Return status code
2280
static int qib7322_probe ( struct pci_device *pci ) {
2281
struct qib7322 *qib7322;
2282
struct QIB_7322_Revision revision;
2283
struct ib_device *ibdev;
2284
unsigned int link_speed_supported;
2288
/* Allocate QIB7322 device */
2289
qib7322 = zalloc ( sizeof ( *qib7322 ) );
2292
goto err_alloc_qib7322;
2294
pci_set_drvdata ( pci, qib7322 );
2296
/* Fix up PCI device */
2297
adjust_pci_device ( pci );
2300
qib7322->regs = ioremap ( pci->membase, QIB7322_BAR0_SIZE );
2301
DBGC2 ( qib7322, "QIB7322 %p has BAR at %08lx\n",
2302
qib7322, pci->membase );
2305
qib7322_reset ( qib7322, pci );
2307
/* Print some general data */
2308
qib7322_readq ( qib7322, &revision, QIB_7322_Revision_offset );
2309
DBGC2 ( qib7322, "QIB7322 %p board %02lx v%ld.%ld.%ld.%ld\n", qib7322,
2310
BIT_GET ( &revision, BoardID ),
2311
BIT_GET ( &revision, R_SW ),
2312
BIT_GET ( &revision, R_Arch ),
2313
BIT_GET ( &revision, R_ChipRevMajor ),
2314
BIT_GET ( &revision, R_ChipRevMinor ) );
2316
/* Initialise I2C subsystem */
2317
if ( ( rc = qib7322_init_i2c ( qib7322 ) ) != 0 )
2320
/* Read EEPROM parameters */
2321
if ( ( rc = qib7322_read_eeprom ( qib7322 ) ) != 0 )
2322
goto err_read_eeprom;
2324
/* Initialise send datapath */
2325
if ( ( rc = qib7322_init_send ( qib7322 ) ) != 0 )
2328
/* Initialise receive datapath */
2329
if ( ( rc = qib7322_init_recv ( qib7322 ) ) != 0 )
2332
/* Initialise the IB SerDes */
2333
if ( ( rc = qib7322_init_ib_serdes ( qib7322 ) ) != 0 )
2334
goto err_init_ib_serdes;
2336
/* Allocate Infiniband devices */
2337
for ( i = 0 ; i < QIB7322_MAX_PORTS ; i++ ) {
2338
link_speed_supported =
2339
qib7322_link_speed_supported ( qib7322, i );
2340
if ( ! link_speed_supported )
2342
ibdev = alloc_ibdev ( 0 );
2345
goto err_alloc_ibdev;
2347
qib7322->ibdev[i] = ibdev;
2348
ibdev->dev = &pci->dev;
2349
ibdev->op = &qib7322_ib_operations;
2350
ibdev->port = ( QIB7322_PORT_BASE + i );
2351
ibdev->link_width_enabled = ibdev->link_width_supported =
2352
IB_LINK_WIDTH_4X; /* 1x does not work */
2353
ibdev->link_speed_enabled = ibdev->link_speed_supported =
2354
IB_LINK_SPEED_SDR; /* to avoid need for link tuning */
2355
memcpy ( &ibdev->node_guid, &qib7322->guid,
2356
sizeof ( ibdev->node_guid ) );
2357
memcpy ( &ibdev->gid.s.guid, &qib7322->guid,
2358
sizeof ( ibdev->gid.s.guid ) );
2359
assert ( ( ibdev->gid.s.guid.bytes[7] & i ) == 0 );
2360
ibdev->gid.s.guid.bytes[7] |= i;
2361
ib_set_drvdata ( ibdev, qib7322 );
2364
/* Register Infiniband devices */
2365
for ( i = 0 ; i < QIB7322_MAX_PORTS ; i++ ) {
2366
if ( ! qib7322->ibdev[i] )
2368
if ( ( rc = register_ibdev ( qib7322->ibdev[i] ) ) != 0 ) {
2369
DBGC ( qib7322, "QIB7322 %p port %d could not register "
2370
"IB device: %s\n", qib7322, i, strerror ( rc ) );
2371
goto err_register_ibdev;
2377
i = QIB7322_MAX_PORTS;
2379
for ( i-- ; i >= 0 ; i-- ) {
2380
if ( qib7322->ibdev[i] )
2381
unregister_ibdev ( qib7322->ibdev[i] );
2383
i = QIB7322_MAX_PORTS;
2385
for ( i-- ; i >= 0 ; i-- )
2386
ibdev_put ( qib7322->ibdev[i] );
2388
qib7322_fini_send ( qib7322 );
2390
qib7322_fini_recv ( qib7322 );
2394
iounmap ( qib7322->regs );
2405
static void qib7322_remove ( struct pci_device *pci ) {
2406
struct qib7322 *qib7322 = pci_get_drvdata ( pci );
2409
for ( i = ( QIB7322_MAX_PORTS - 1 ) ; i >= 0 ; i-- ) {
2410
if ( qib7322->ibdev[i] )
2411
unregister_ibdev ( qib7322->ibdev[i] );
2413
for ( i = ( QIB7322_MAX_PORTS - 1 ) ; i >= 0 ; i-- )
2414
ibdev_put ( qib7322->ibdev[i] );
2415
qib7322_fini_send ( qib7322 );
2416
qib7322_fini_recv ( qib7322 );
2417
iounmap ( qib7322->regs );
2421
static struct pci_device_id qib7322_nics[] = {
2422
PCI_ROM ( 0x1077, 0x7322, "iba7322", "IBA7322 QDR InfiniBand HCA", 0 ),
2425
struct pci_driver qib7322_driver __pci_driver = {
2426
.ids = qib7322_nics,
2427
.id_count = ( sizeof ( qib7322_nics ) / sizeof ( qib7322_nics[0] ) ),
2428
.probe = qib7322_probe,
2429
.remove = qib7322_remove,