2
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
5
* This software is available to you under a choice of one of two
6
* licenses. You may choose to be licensed under the terms of the GNU
7
* General Public License (GPL) Version 2, available from the file
8
* COPYING in the main directory of this source tree, or the
9
* OpenIB.org BSD license below:
11
* Redistribution and use in source and binary forms, with or
12
* without modification, are permitted provided that the following
15
* - Redistributions of source code must retain the above
16
* copyright notice, this list of conditions and the following
19
* - Redistributions in binary form must reproduce the above
20
* copyright notice, this list of conditions and the following
21
* disclaimer in the documentation and/or other materials
22
* provided with the distribution.
24
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33
#include <linux/slab.h>
42
* Send all the PBL messages to convey the remainder of the PBL
43
* Wait for the adapter's reply on the last one.
44
* This is indicated by setting the MEM_PBL_COMPLETE in the flags.
46
* NOTE: vq_req is _not_ freed by this function. The VQ Host
47
* Reply buffer _is_ freed by this function.
50
send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
51
unsigned long va, u32 pbl_depth,
52
struct c2_vq_req *vq_req, int pbl_type)
54
u32 pbe_count; /* amt that fits in a PBL msg */
55
u32 count; /* amt in this PBL MSG. */
56
struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */
57
struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */
58
int err, pbl_virt, pbl_index, i;
72
pbe_count = (c2dev->req_vq.msg_size -
73
sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
74
wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
78
c2_wr_set_id(wr, CCWR_NSMR_PBL);
81
* Only the last PBL message will generate a reply from the verbs,
82
* so we set the context to 0 indicating there is no kernel verbs
83
* handler blocked awaiting this reply.
86
wr->rnic_handle = c2dev->adapter_handle;
87
wr->stag_index = stag_index; /* already swapped */
91
count = min(pbe_count, pbl_depth);
92
wr->addrs_length = cpu_to_be32(count);
95
* If this is the last message, then reference the
96
* vq request struct cuz we're gonna wait for a reply.
97
* also make this PBL msg as the last one.
99
if (count == pbl_depth) {
101
* reference the request struct. dereferenced in the
104
vq_req_get(c2dev, vq_req);
105
wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);
108
* This is the last PBL message.
109
* Set the context to our VQ Request Object so we can
110
* wait for the reply.
112
wr->hdr.context = (unsigned long) vq_req;
116
* If pbl_virt is set then va is a virtual address
117
* that describes a virtually contiguous memory
118
* allocation. The wr needs the start of each virtual page
119
* to be converted to the corresponding physical address
120
* of the page. If pbl_virt is not set then va is an array
121
* of physical addresses and there is no conversion to do.
122
* Just fill in the wr with what is in the array.
124
for (i = 0; i < count; i++) {
129
cpu_to_be64(((u64 *)va)[pbl_index + i]);
136
err = vq_send_wr(c2dev, (union c2wr *) wr);
138
if (count <= pbe_count) {
139
vq_req_put(c2dev, vq_req);
148
* Now wait for the reply...
150
err = vq_wait_for_reply(c2dev, vq_req);
158
reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
164
err = c2_errno(reply);
166
vq_repbuf_free(c2dev, reply);
172
#define C2_PBL_MAX_DEPTH 131072
174
c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
175
int page_size, int pbl_depth, u32 length,
176
u32 offset, u64 *va, enum c2_acf acf,
179
struct c2_vq_req *vq_req;
180
struct c2wr_nsmr_register_req *wr;
181
struct c2wr_nsmr_register_rep *reply;
183
int i, pbe_count, count;
186
if (!va || !length || !addr_list || !pbl_depth)
190
* Verify PBL depth is within rnic max
192
if (pbl_depth > C2_PBL_MAX_DEPTH) {
197
* allocate verbs request object
199
vq_req = vq_req_alloc(c2dev);
203
wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
212
c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
213
wr->hdr.context = (unsigned long) vq_req;
214
wr->rnic_handle = c2dev->adapter_handle;
216
flags = (acf | MEM_VA_BASED | MEM_REMOTE);
219
* compute how many pbes can fit in the message
221
pbe_count = (c2dev->req_vq.msg_size -
222
sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);
224
if (pbl_depth <= pbe_count) {
225
flags |= MEM_PBL_COMPLETE;
227
wr->flags = cpu_to_be16(flags);
228
wr->stag_key = 0; //stag_key;
229
wr->va = cpu_to_be64(*va);
230
wr->pd_id = mr->pd->pd_id;
231
wr->pbe_size = cpu_to_be32(page_size);
232
wr->length = cpu_to_be32(length);
233
wr->pbl_depth = cpu_to_be32(pbl_depth);
234
wr->fbo = cpu_to_be32(offset);
235
count = min(pbl_depth, pbe_count);
236
wr->addrs_length = cpu_to_be32(count);
239
* fill out the PBL for this message
241
for (i = 0; i < count; i++) {
242
wr->paddrs[i] = cpu_to_be64(addr_list[i]);
246
* regerence the request struct
248
vq_req_get(c2dev, vq_req);
251
* send the WR to the adapter
253
err = vq_send_wr(c2dev, (union c2wr *) wr);
255
vq_req_put(c2dev, vq_req);
260
* wait for reply from adapter
262
err = vq_wait_for_reply(c2dev, vq_req);
271
(struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
276
if ((err = c2_errno(reply))) {
279
//*p_pb_entries = be32_to_cpu(reply->pbl_depth);
280
mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
281
vq_repbuf_free(c2dev, reply);
284
* if there are still more PBEs we need to send them to
285
* the adapter and wait for a reply on the final one.
286
* reuse vq_req for this purpose.
291
vq_req->reply_msg = (unsigned long) NULL;
292
atomic_set(&vq_req->reply_ready, 0);
293
err = send_pbl_messages(c2dev,
294
cpu_to_be32(mr->ibmr.lkey),
295
(unsigned long) &addr_list[i],
296
pbl_depth, vq_req, PBL_PHYS);
302
vq_req_free(c2dev, vq_req);
308
vq_repbuf_free(c2dev, reply);
312
vq_req_free(c2dev, vq_req);
316
int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
318
struct c2_vq_req *vq_req; /* verbs request object */
319
struct c2wr_stag_dealloc_req wr; /* work request */
320
struct c2wr_stag_dealloc_rep *reply; /* WR reply */
325
* allocate verbs request object
327
vq_req = vq_req_alloc(c2dev);
335
c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
336
wr.hdr.context = (u64) (unsigned long) vq_req;
337
wr.rnic_handle = c2dev->adapter_handle;
338
wr.stag_index = cpu_to_be32(stag_index);
341
* reference the request struct. dereferenced in the int handler.
343
vq_req_get(c2dev, vq_req);
348
err = vq_send_wr(c2dev, (union c2wr *) & wr);
350
vq_req_put(c2dev, vq_req);
355
* Wait for reply from adapter
357
err = vq_wait_for_reply(c2dev, vq_req);
365
reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
371
err = c2_errno(reply);
373
vq_repbuf_free(c2dev, reply);
375
vq_req_free(c2dev, vq_req);