2
* Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
3
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
4
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
6
* This software is available to you under a choice of one of two
7
* licenses. You may choose to be licensed under the terms of the GNU
8
* General Public License (GPL) Version 2, available from the file
9
* COPYING the madirectory of this source tree, or the
10
* OpenIB.org BSD license below:
12
* Redistribution and use source and binary forms, with or
13
* withmodification, are permitted provided that the following
16
* - Redistributions of source code must retathe above
17
* copyright notice, this list of conditions and the following
20
* - Redistributions binary form must reproduce the above
21
* copyright notice, this list of conditions and the following
22
* disclaimer the documentation and/or other materials
23
* provided with the distribution.
25
* THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
26
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28
* NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
30
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
31
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
34
#if !defined(CM_MSGS_H)
37
#include <rdma/ib_mad.h>
38
#include <rdma/ib_cm.h>
41
* Parameters to routines below should be in network-byte order, and values
42
* are returned in network-byte order.
45
#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
47
#define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
48
#define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
49
#define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
50
#define CM_REP_ATTR_ID cpu_to_be16(0x0013)
51
#define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
52
#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
53
#define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
54
#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
55
#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
56
#define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
57
#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
59
enum cm_msg_sequence {
67
struct ib_mad_hdr hdr;
75
/* local QPN:24, responder resources:8 */
77
/* local EECN:24, initiator depth:8 */
80
* remote EECN:24, remote CM response timeout:5,
81
* transport service type:2, end-to-end flow control:1
84
/* starting PSN:24, local CM response timeout:5, retry count:3 */
87
/* path MTU:4, RDC exists:1, RNR retry count:3. */
89
/* max CM Retries:4, SRQ:1, extended transport type:3 */
92
__be16 primary_local_lid;
93
__be16 primary_remote_lid;
94
union ib_gid primary_local_gid;
95
union ib_gid primary_remote_gid;
96
/* flow label:20, rsvd:6, packet rate:6 */
97
__be32 primary_offset88;
98
u8 primary_traffic_class;
100
/* SL:4, subnet local:1, rsvd:3 */
102
/* local ACK timeout:5, rsvd:3 */
105
__be16 alt_local_lid;
106
__be16 alt_remote_lid;
107
union ib_gid alt_local_gid;
108
union ib_gid alt_remote_gid;
109
/* flow label:20, rsvd:6, packet rate:6 */
110
__be32 alt_offset132;
111
u8 alt_traffic_class;
113
/* SL:4, subnet local:1, rsvd:3 */
115
/* local ACK timeout:5, rsvd:3 */
118
u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
120
} __attribute__ ((packed));
122
static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
124
return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
127
static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
129
req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
130
(be32_to_cpu(req_msg->offset32) &
134
static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
136
return (u8) be32_to_cpu(req_msg->offset32);
139
static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
141
req_msg->offset32 = cpu_to_be32(resp_res |
142
(be32_to_cpu(req_msg->offset32) &
146
static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
148
return (u8) be32_to_cpu(req_msg->offset36);
151
static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
154
req_msg->offset36 = cpu_to_be32(init_depth |
155
(be32_to_cpu(req_msg->offset36) &
159
static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
161
return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
164
static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
167
req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
168
(be32_to_cpu(req_msg->offset40) &
172
static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
174
u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
175
switch(transport_type) {
176
case 0: return IB_QPT_RC;
177
case 1: return IB_QPT_UC;
179
switch (req_msg->offset51 & 0x7) {
180
case 1: return IB_QPT_XRC_TGT;
187
static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
188
enum ib_qp_type qp_type)
192
req_msg->offset40 = cpu_to_be32((be32_to_cpu(
197
req_msg->offset40 = cpu_to_be32((be32_to_cpu(
200
req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
203
req_msg->offset40 = cpu_to_be32(be32_to_cpu(
209
static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
211
return be32_to_cpu(req_msg->offset40) & 0x1;
214
static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
217
req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
218
(be32_to_cpu(req_msg->offset40) &
222
static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
224
return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
227
static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
230
req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
231
(be32_to_cpu(req_msg->offset44) & 0x000000FF));
234
static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
236
return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
239
static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
242
req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
243
(be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
246
static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
248
return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
251
static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
254
req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
255
(be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
258
static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
260
return req_msg->offset50 >> 4;
263
static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
265
req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
268
static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
270
return req_msg->offset50 & 0x7;
273
static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
276
req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
277
(rnr_retry_count & 0x7));
280
static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
282
return req_msg->offset51 >> 4;
285
static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
288
req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
291
static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
293
return (req_msg->offset51 & 0x8) >> 3;
296
static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
298
req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
302
static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
304
return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
307
static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
310
req_msg->primary_offset88 = cpu_to_be32(
311
(be32_to_cpu(req_msg->primary_offset88) &
313
(be32_to_cpu(flow_label) << 12));
316
static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
318
return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
321
static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
324
req_msg->primary_offset88 = cpu_to_be32(
325
(be32_to_cpu(req_msg->primary_offset88) &
326
0xFFFFFFC0) | (rate & 0x3F));
329
static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
331
return (u8) (req_msg->primary_offset94 >> 4);
334
static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
336
req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
340
static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
342
return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
345
static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
348
req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
349
((subnet_local & 0x1) << 3));
352
static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
354
return (u8) (req_msg->primary_offset95 >> 3);
357
static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
358
u8 local_ack_timeout)
360
req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
361
(local_ack_timeout << 3));
364
static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
366
return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
369
static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
372
req_msg->alt_offset132 = cpu_to_be32(
373
(be32_to_cpu(req_msg->alt_offset132) &
375
(be32_to_cpu(flow_label) << 12));
378
static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
380
return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
383
static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
386
req_msg->alt_offset132 = cpu_to_be32(
387
(be32_to_cpu(req_msg->alt_offset132) &
388
0xFFFFFFC0) | (rate & 0x3F));
391
static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
393
return (u8) (req_msg->alt_offset138 >> 4);
396
static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
398
req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
402
static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
404
return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
407
static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
410
req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
411
((subnet_local & 0x1) << 3));
414
static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
416
return (u8) (req_msg->alt_offset139 >> 3);
419
static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
420
u8 local_ack_timeout)
422
req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
423
(local_ack_timeout << 3));
426
/* Message REJected or MRAed */
427
enum cm_msg_response {
428
CM_MSG_RESPONSE_REQ = 0x0,
429
CM_MSG_RESPONSE_REP = 0x1,
430
CM_MSG_RESPONSE_OTHER = 0x2
434
struct ib_mad_hdr hdr;
436
__be32 local_comm_id;
437
__be32 remote_comm_id;
438
/* message MRAed:2, rsvd:6 */
440
/* service timeout:5, rsvd:3 */
443
u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
445
} __attribute__ ((packed));
447
static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
449
return (u8) (mra_msg->offset8 >> 6);
452
static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
454
mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
457
static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
459
return (u8) (mra_msg->offset9 >> 3);
462
static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
465
mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
466
(service_timeout << 3));
470
struct ib_mad_hdr hdr;
472
__be32 local_comm_id;
473
__be32 remote_comm_id;
474
/* message REJected:2, rsvd:6 */
476
/* reject info length:7, rsvd:1. */
479
u8 ari[IB_CM_REJ_ARI_LENGTH];
481
u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
483
} __attribute__ ((packed));
485
static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
487
return (u8) (rej_msg->offset8 >> 6);
490
static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
492
rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
495
static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
497
return (u8) (rej_msg->offset9 >> 1);
500
static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
503
rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
507
struct ib_mad_hdr hdr;
509
__be32 local_comm_id;
510
__be32 remote_comm_id;
512
/* local QPN:24, rsvd:8 */
514
/* local EECN:24, rsvd:8 */
516
/* starting PSN:24 rsvd:8 */
520
/* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
522
/* RNR retry count:3, SRQ:1, rsvd:5 */
524
__be64 local_ca_guid;
526
u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
528
} __attribute__ ((packed));
530
static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
532
return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
535
static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
537
rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
538
(be32_to_cpu(rep_msg->offset12) & 0x000000FF));
541
static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
543
return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
546
static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
548
rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
549
(be32_to_cpu(rep_msg->offset16) & 0x000000FF));
552
static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
554
return (qp_type == IB_QPT_XRC_INI) ?
555
cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
558
static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
560
return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
563
static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
566
rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
567
(be32_to_cpu(rep_msg->offset20) & 0x000000FF));
570
static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
572
return (u8) (rep_msg->offset26 >> 3);
575
static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
578
rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
579
(target_ack_delay << 3));
582
static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
584
return (u8) ((rep_msg->offset26 & 0x06) >> 1);
587
static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
589
rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
590
((failover & 0x3) << 1));
593
static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
595
return (u8) (rep_msg->offset26 & 0x01);
598
static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
601
rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
605
static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
607
return (u8) (rep_msg->offset27 >> 5);
610
static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
613
rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
614
(rnr_retry_count << 5));
617
static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
619
return (u8) ((rep_msg->offset27 >> 4) & 0x1);
622
static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
624
rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
629
struct ib_mad_hdr hdr;
631
__be32 local_comm_id;
632
__be32 remote_comm_id;
634
u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
636
} __attribute__ ((packed));
639
struct ib_mad_hdr hdr;
641
__be32 local_comm_id;
642
__be32 remote_comm_id;
643
/* remote QPN/EECN:24, rsvd:8 */
646
u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
648
} __attribute__ ((packed));
650
static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
652
return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
655
static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
657
dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
658
(be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
662
struct ib_mad_hdr hdr;
664
__be32 local_comm_id;
665
__be32 remote_comm_id;
667
u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
669
} __attribute__ ((packed));
672
struct ib_mad_hdr hdr;
674
__be32 local_comm_id;
675
__be32 remote_comm_id;
678
/* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
682
__be16 alt_local_lid;
683
__be16 alt_remote_lid;
684
union ib_gid alt_local_gid;
685
union ib_gid alt_remote_gid;
686
/* flow label:20, rsvd:4, traffic class:8 */
689
/* rsvd:2, packet rate:6 */
691
/* SL:4, subnet local:1, rsvd:3 */
693
/* local ACK timeout:5, rsvd:3 */
696
u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
697
} __attribute__ ((packed));
699
static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
701
return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
704
static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
706
lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
707
(be32_to_cpu(lap_msg->offset12) &
711
static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
713
return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
716
static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
719
lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
720
(be32_to_cpu(lap_msg->offset12) &
724
static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
726
return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
729
static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
732
lap_msg->offset56 = cpu_to_be32(
733
(be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
734
(be32_to_cpu(flow_label) << 12));
737
static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
739
return (u8) be32_to_cpu(lap_msg->offset56);
742
static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
745
lap_msg->offset56 = cpu_to_be32(traffic_class |
746
(be32_to_cpu(lap_msg->offset56) &
750
static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
752
return lap_msg->offset61 & 0x3F;
755
static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
758
lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
761
static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
763
return lap_msg->offset62 >> 4;
766
static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
768
lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
771
static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
773
return (lap_msg->offset62 >> 3) & 0x1;
776
static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
779
lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
780
(lap_msg->offset61 & 0xF7);
782
static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
784
return lap_msg->offset63 >> 3;
787
static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
788
u8 local_ack_timeout)
790
lap_msg->offset63 = (local_ack_timeout << 3) |
791
(lap_msg->offset63 & 0x07);
795
struct ib_mad_hdr hdr;
797
__be32 local_comm_id;
798
__be32 remote_comm_id;
802
u8 info[IB_CM_APR_INFO_LENGTH];
804
u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
805
} __attribute__ ((packed));
807
struct cm_sidr_req_msg {
808
struct ib_mad_hdr hdr;
815
u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
816
} __attribute__ ((packed));
818
struct cm_sidr_rep_msg {
819
struct ib_mad_hdr hdr;
829
u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
831
u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
832
} __attribute__ ((packed));
834
static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
836
return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
839
static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
842
sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
843
(be32_to_cpu(sidr_rep_msg->offset8) &
847
#endif /* CM_MSGS_H */