2
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
5
* This software is available to you under a choice of one of two
6
* licenses. You may choose to be licensed under the terms of the GNU
7
* General Public License (GPL) Version 2, available from the file
8
* COPYING in the main directory of this source tree, or the
9
* OpenIB.org BSD license below:
11
* Redistribution and use in source and binary forms, with or
12
* without modification, are permitted provided that the following
15
* - Redistributions of source code must retain the above
16
* copyright notice, this list of conditions and the following
19
* - Redistributions in binary form must reproduce the above
20
* copyright notice, this list of conditions and the following
21
* disclaimer in the documentation and/or other materials
22
* provided with the distribution.
24
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33
#include <linux/kernel.h>
34
#include <linux/module.h>
35
#include <linux/slab.h>
36
#include <linux/delay.h>
38
#include "iscsi_iser.h"
40
#define ISCSI_ISER_MAX_CONN 8
41
#define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
42
#define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
44
static void iser_cq_tasklet_fn(unsigned long data);
45
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
47
static void iser_cq_event_callback(struct ib_event *cause, void *context)
49
iser_err("got cq event %d \n", cause->event);
52
static void iser_qp_event_callback(struct ib_event *cause, void *context)
54
iser_err("got qp event %d\n",cause->event);
57
static void iser_event_handler(struct ib_event_handler *handler,
58
struct ib_event *event)
60
iser_err("async event %d on device %s port %d\n", event->event,
61
event->device->name, event->element.port_num);
65
* iser_create_device_ib_res - creates Protection Domain (PD), Completion
66
* Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
69
* returns 0 on success, -1 on failure
71
static int iser_create_device_ib_res(struct iser_device *device)
73
device->pd = ib_alloc_pd(device->ib_device);
74
if (IS_ERR(device->pd))
77
device->rx_cq = ib_create_cq(device->ib_device,
79
iser_cq_event_callback,
81
ISER_MAX_RX_CQ_LEN, 0);
82
if (IS_ERR(device->rx_cq))
85
device->tx_cq = ib_create_cq(device->ib_device,
86
NULL, iser_cq_event_callback,
88
ISER_MAX_TX_CQ_LEN, 0);
90
if (IS_ERR(device->tx_cq))
93
if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
96
tasklet_init(&device->cq_tasklet,
98
(unsigned long)device);
100
device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
101
IB_ACCESS_REMOTE_WRITE |
102
IB_ACCESS_REMOTE_READ);
103
if (IS_ERR(device->mr))
106
INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
108
if (ib_register_event_handler(&device->event_handler))
114
ib_dereg_mr(device->mr);
116
tasklet_kill(&device->cq_tasklet);
118
ib_destroy_cq(device->tx_cq);
120
ib_destroy_cq(device->rx_cq);
122
ib_dealloc_pd(device->pd);
124
iser_err("failed to allocate an IB resource\n");
129
* iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
130
* CQ and PD created with the device associated with the adapator.
132
static void iser_free_device_ib_res(struct iser_device *device)
134
BUG_ON(device->mr == NULL);
136
tasklet_kill(&device->cq_tasklet);
137
(void)ib_unregister_event_handler(&device->event_handler);
138
(void)ib_dereg_mr(device->mr);
139
(void)ib_destroy_cq(device->tx_cq);
140
(void)ib_destroy_cq(device->rx_cq);
141
(void)ib_dealloc_pd(device->pd);
144
device->tx_cq = NULL;
145
device->rx_cq = NULL;
150
* iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP)
152
* returns 0 on success, -1 on failure
154
static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
156
struct iser_device *device;
157
struct ib_qp_init_attr init_attr;
158
int req_err, resp_err, ret = -ENOMEM;
159
struct ib_fmr_pool_param params;
161
BUG_ON(ib_conn->device == NULL);
163
device = ib_conn->device;
165
ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
166
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
167
if (!ib_conn->login_buf)
170
ib_conn->login_req_buf = ib_conn->login_buf;
171
ib_conn->login_resp_buf = ib_conn->login_buf + ISCSI_DEF_MAX_RECV_SEG_LEN;
173
ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
174
(void *)ib_conn->login_req_buf,
175
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
177
ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
178
(void *)ib_conn->login_resp_buf,
179
ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
181
req_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_req_dma);
182
resp_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_resp_dma);
184
if (req_err || resp_err) {
186
ib_conn->login_req_dma = 0;
188
ib_conn->login_resp_dma = 0;
192
ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
193
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
195
if (!ib_conn->page_vec)
198
ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
200
params.page_shift = SHIFT_4K;
201
/* when the first/last SG element are not start/end *
202
* page aligned, the map whould be of N+1 pages */
203
params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
204
/* make the pool size twice the max number of SCSI commands *
205
* the ML is expected to queue, watermark for unmap at 50% */
206
params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2;
207
params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX;
209
params.flush_function = NULL;
210
params.access = (IB_ACCESS_LOCAL_WRITE |
211
IB_ACCESS_REMOTE_WRITE |
212
IB_ACCESS_REMOTE_READ);
214
ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, ¶ms);
215
if (IS_ERR(ib_conn->fmr_pool)) {
216
ret = PTR_ERR(ib_conn->fmr_pool);
217
ib_conn->fmr_pool = NULL;
221
memset(&init_attr, 0, sizeof init_attr);
223
init_attr.event_handler = iser_qp_event_callback;
224
init_attr.qp_context = (void *)ib_conn;
225
init_attr.send_cq = device->tx_cq;
226
init_attr.recv_cq = device->rx_cq;
227
init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
228
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
229
init_attr.cap.max_send_sge = 2;
230
init_attr.cap.max_recv_sge = 1;
231
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
232
init_attr.qp_type = IB_QPT_RC;
234
ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
238
ib_conn->qp = ib_conn->cma_id->qp;
239
iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
240
ib_conn, ib_conn->cma_id,
241
ib_conn->fmr_pool, ib_conn->cma_id->qp);
245
iser_err("unable to alloc mem or create resource, err %d\n", ret);
250
* releases the FMR pool, QP and CMA ID objects, returns 0 on success,
253
static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
255
BUG_ON(ib_conn == NULL);
257
iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
258
ib_conn, ib_conn->cma_id,
259
ib_conn->fmr_pool, ib_conn->qp);
261
/* qp is created only once both addr & route are resolved */
262
if (ib_conn->fmr_pool != NULL)
263
ib_destroy_fmr_pool(ib_conn->fmr_pool);
265
if (ib_conn->qp != NULL)
266
rdma_destroy_qp(ib_conn->cma_id);
268
/* if cma handler context, the caller acts s.t the cma destroy the id */
269
if (ib_conn->cma_id != NULL && can_destroy_id)
270
rdma_destroy_id(ib_conn->cma_id);
272
ib_conn->fmr_pool = NULL;
274
ib_conn->cma_id = NULL;
275
kfree(ib_conn->page_vec);
281
* based on the resolved device node GUID see if there already allocated
282
* device for this device. If there's no such, create one.
285
struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
287
struct iser_device *device;
289
mutex_lock(&ig.device_list_mutex);
291
list_for_each_entry(device, &ig.device_list, ig_list)
292
/* find if there's a match using the node GUID */
293
if (device->ib_device->node_guid == cma_id->device->node_guid)
296
device = kzalloc(sizeof *device, GFP_KERNEL);
300
/* assign this device to the device */
301
device->ib_device = cma_id->device;
302
/* init the device and link it into ig device list */
303
if (iser_create_device_ib_res(device)) {
308
list_add(&device->ig_list, &ig.device_list);
313
mutex_unlock(&ig.device_list_mutex);
317
/* if there's no demand for this device, release it */
318
static void iser_device_try_release(struct iser_device *device)
320
mutex_lock(&ig.device_list_mutex);
322
iser_err("device %p refcount %d\n",device,device->refcount);
323
if (!device->refcount) {
324
iser_free_device_ib_res(device);
325
list_del(&device->ig_list);
328
mutex_unlock(&ig.device_list_mutex);
331
static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
332
enum iser_ib_conn_state comp,
333
enum iser_ib_conn_state exch)
337
spin_lock_bh(&ib_conn->lock);
338
if ((ret = (ib_conn->state == comp)))
339
ib_conn->state = exch;
340
spin_unlock_bh(&ib_conn->lock);
345
* Frees all conn objects and deallocs conn descriptor
347
static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
349
struct iser_device *device = ib_conn->device;
351
BUG_ON(ib_conn->state != ISER_CONN_DOWN);
353
mutex_lock(&ig.connlist_mutex);
354
list_del(&ib_conn->conn_list);
355
mutex_unlock(&ig.connlist_mutex);
356
iser_free_rx_descriptors(ib_conn);
357
iser_free_ib_conn_res(ib_conn, can_destroy_id);
358
ib_conn->device = NULL;
359
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
361
iser_device_try_release(device);
362
iscsi_destroy_endpoint(ib_conn->ep);
365
void iser_conn_get(struct iser_conn *ib_conn)
367
atomic_inc(&ib_conn->refcount);
370
int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)
372
if (atomic_dec_and_test(&ib_conn->refcount)) {
373
iser_conn_release(ib_conn, can_destroy_id);
380
* triggers start of the disconnect procedures and wait for them to be done
382
void iser_conn_terminate(struct iser_conn *ib_conn)
386
/* change the ib conn state only if the conn is UP, however always call
387
* rdma_disconnect since this is the only way to cause the CMA to change
388
* the QP state to ERROR
391
iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING);
392
err = rdma_disconnect(ib_conn->cma_id);
394
iser_err("Failed to disconnect, conn: 0x%p err %d\n",
397
wait_event_interruptible(ib_conn->wait,
398
ib_conn->state == ISER_CONN_DOWN);
400
iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
403
static int iser_connect_error(struct rdma_cm_id *cma_id)
405
struct iser_conn *ib_conn;
406
ib_conn = (struct iser_conn *)cma_id->context;
408
ib_conn->state = ISER_CONN_DOWN;
409
wake_up_interruptible(&ib_conn->wait);
410
return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
413
static int iser_addr_handler(struct rdma_cm_id *cma_id)
415
struct iser_device *device;
416
struct iser_conn *ib_conn;
419
device = iser_device_find_by_ib_device(cma_id);
421
iser_err("device lookup/creation failed\n");
422
return iser_connect_error(cma_id);
425
ib_conn = (struct iser_conn *)cma_id->context;
426
ib_conn->device = device;
428
ret = rdma_resolve_route(cma_id, 1000);
430
iser_err("resolve route failed: %d\n", ret);
431
return iser_connect_error(cma_id);
437
static int iser_route_handler(struct rdma_cm_id *cma_id)
439
struct rdma_conn_param conn_param;
442
ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
446
memset(&conn_param, 0, sizeof conn_param);
447
conn_param.responder_resources = 4;
448
conn_param.initiator_depth = 1;
449
conn_param.retry_count = 7;
450
conn_param.rnr_retry_count = 6;
452
ret = rdma_connect(cma_id, &conn_param);
454
iser_err("failure connecting: %d\n", ret);
460
return iser_connect_error(cma_id);
463
static void iser_connected_handler(struct rdma_cm_id *cma_id)
465
struct iser_conn *ib_conn;
467
ib_conn = (struct iser_conn *)cma_id->context;
468
ib_conn->state = ISER_CONN_UP;
469
wake_up_interruptible(&ib_conn->wait);
472
static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
474
struct iser_conn *ib_conn;
477
ib_conn = (struct iser_conn *)cma_id->context;
479
/* getting here when the state is UP means that the conn is being *
480
* terminated asynchronously from the iSCSI layer's perspective. */
481
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
482
ISER_CONN_TERMINATING))
483
iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
484
ISCSI_ERR_CONN_FAILED);
486
/* Complete the termination process if no posts are pending */
487
if (ib_conn->post_recv_buf_count == 0 &&
488
(atomic_read(&ib_conn->post_send_buf_count) == 0)) {
489
ib_conn->state = ISER_CONN_DOWN;
490
wake_up_interruptible(&ib_conn->wait);
493
ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
497
static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
501
iser_err("event %d status %d conn %p id %p\n",
502
event->event, event->status, cma_id->context, cma_id);
504
switch (event->event) {
505
case RDMA_CM_EVENT_ADDR_RESOLVED:
506
ret = iser_addr_handler(cma_id);
508
case RDMA_CM_EVENT_ROUTE_RESOLVED:
509
ret = iser_route_handler(cma_id);
511
case RDMA_CM_EVENT_ESTABLISHED:
512
iser_connected_handler(cma_id);
514
case RDMA_CM_EVENT_ADDR_ERROR:
515
case RDMA_CM_EVENT_ROUTE_ERROR:
516
case RDMA_CM_EVENT_CONNECT_ERROR:
517
case RDMA_CM_EVENT_UNREACHABLE:
518
case RDMA_CM_EVENT_REJECTED:
519
ret = iser_connect_error(cma_id);
521
case RDMA_CM_EVENT_DISCONNECTED:
522
case RDMA_CM_EVENT_DEVICE_REMOVAL:
523
case RDMA_CM_EVENT_ADDR_CHANGE:
524
ret = iser_disconnected_handler(cma_id);
527
iser_err("Unexpected RDMA CM event (%d)\n", event->event);
533
void iser_conn_init(struct iser_conn *ib_conn)
535
ib_conn->state = ISER_CONN_INIT;
536
init_waitqueue_head(&ib_conn->wait);
537
ib_conn->post_recv_buf_count = 0;
538
atomic_set(&ib_conn->post_send_buf_count, 0);
539
atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */
540
INIT_LIST_HEAD(&ib_conn->conn_list);
541
spin_lock_init(&ib_conn->lock);
545
* starts the process of connecting to the target
546
* sleeps until the connection is established or rejected
548
int iser_connect(struct iser_conn *ib_conn,
549
struct sockaddr_in *src_addr,
550
struct sockaddr_in *dst_addr,
553
struct sockaddr *src, *dst;
556
sprintf(ib_conn->name, "%pI4:%d",
557
&dst_addr->sin_addr.s_addr, dst_addr->sin_port);
559
/* the device is known only --after-- address resolution */
560
ib_conn->device = NULL;
562
iser_err("connecting to: %pI4, port 0x%x\n",
563
&dst_addr->sin_addr, dst_addr->sin_port);
565
ib_conn->state = ISER_CONN_PENDING;
567
iser_conn_get(ib_conn); /* ref ib conn's cma id */
568
ib_conn->cma_id = rdma_create_id(iser_cma_handler,
570
RDMA_PS_TCP, IB_QPT_RC);
571
if (IS_ERR(ib_conn->cma_id)) {
572
err = PTR_ERR(ib_conn->cma_id);
573
iser_err("rdma_create_id failed: %d\n", err);
577
src = (struct sockaddr *)src_addr;
578
dst = (struct sockaddr *)dst_addr;
579
err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000);
581
iser_err("rdma_resolve_addr failed: %d\n", err);
586
wait_event_interruptible(ib_conn->wait,
587
(ib_conn->state != ISER_CONN_PENDING));
589
if (ib_conn->state != ISER_CONN_UP) {
591
goto connect_failure;
595
mutex_lock(&ig.connlist_mutex);
596
list_add(&ib_conn->conn_list, &ig.connlist);
597
mutex_unlock(&ig.connlist_mutex);
601
ib_conn->cma_id = NULL;
603
ib_conn->state = ISER_CONN_DOWN;
605
iser_conn_release(ib_conn, 1);
610
* iser_reg_page_vec - Register physical memory
612
* returns: 0 on success, errno code on failure
614
int iser_reg_page_vec(struct iser_conn *ib_conn,
615
struct iser_page_vec *page_vec,
616
struct iser_mem_reg *mem_reg)
618
struct ib_pool_fmr *mem;
623
page_list = page_vec->pages;
624
io_addr = page_list[0];
626
mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool,
632
status = (int)PTR_ERR(mem);
633
iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
637
mem_reg->lkey = mem->fmr->lkey;
638
mem_reg->rkey = mem->fmr->rkey;
639
mem_reg->len = page_vec->length * SIZE_4K;
640
mem_reg->va = io_addr;
642
mem_reg->mem_h = (void *)mem;
644
mem_reg->va += page_vec->offset;
645
mem_reg->len = page_vec->data_size;
647
iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
648
"entry[0]: (0x%08lx,%ld)] -> "
649
"[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
650
page_vec, page_vec->length,
651
(unsigned long)page_vec->pages[0],
652
(unsigned long)page_vec->data_size,
653
(unsigned int)mem_reg->lkey, mem_reg->mem_h,
654
(unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
659
* Unregister (previosuly registered) memory.
661
void iser_unreg_mem(struct iser_mem_reg *reg)
665
iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
667
ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
669
iser_err("ib_fmr_pool_unmap failed %d\n", ret);
674
int iser_post_recvl(struct iser_conn *ib_conn)
676
struct ib_recv_wr rx_wr, *rx_wr_failed;
680
sge.addr = ib_conn->login_resp_dma;
681
sge.length = ISER_RX_LOGIN_SIZE;
682
sge.lkey = ib_conn->device->mr->lkey;
684
rx_wr.wr_id = (unsigned long)ib_conn->login_resp_buf;
685
rx_wr.sg_list = &sge;
689
ib_conn->post_recv_buf_count++;
690
ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
692
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
693
ib_conn->post_recv_buf_count--;
698
int iser_post_recvm(struct iser_conn *ib_conn, int count)
700
struct ib_recv_wr *rx_wr, *rx_wr_failed;
702
unsigned int my_rx_head = ib_conn->rx_desc_head;
703
struct iser_rx_desc *rx_desc;
705
for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
706
rx_desc = &ib_conn->rx_descs[my_rx_head];
707
rx_wr->wr_id = (unsigned long)rx_desc;
708
rx_wr->sg_list = &rx_desc->rx_sg;
710
rx_wr->next = rx_wr + 1;
711
my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1);
715
rx_wr->next = NULL; /* mark end of work requests list */
717
ib_conn->post_recv_buf_count += count;
718
ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
720
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
721
ib_conn->post_recv_buf_count -= count;
723
ib_conn->rx_desc_head = my_rx_head;
729
* iser_start_send - Initiate a Send DTO operation
731
* returns 0 on success, -1 on failure
733
int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
736
struct ib_send_wr send_wr, *send_wr_failed;
738
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
739
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
742
send_wr.wr_id = (unsigned long)tx_desc;
743
send_wr.sg_list = tx_desc->tx_sg;
744
send_wr.num_sge = tx_desc->num_sge;
745
send_wr.opcode = IB_WR_SEND;
746
send_wr.send_flags = IB_SEND_SIGNALED;
748
atomic_inc(&ib_conn->post_send_buf_count);
750
ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
752
iser_err("ib_post_send failed, ret:%d\n", ib_ret);
753
atomic_dec(&ib_conn->post_send_buf_count);
758
static void iser_handle_comp_error(struct iser_tx_desc *desc,
759
struct iser_conn *ib_conn)
761
if (desc && desc->type == ISCSI_TX_DATAOUT)
762
kmem_cache_free(ig.desc_cache, desc);
764
if (ib_conn->post_recv_buf_count == 0 &&
765
atomic_read(&ib_conn->post_send_buf_count) == 0) {
766
/* getting here when the state is UP means that the conn is *
767
* being terminated asynchronously from the iSCSI layer's *
769
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
770
ISER_CONN_TERMINATING))
771
iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
772
ISCSI_ERR_CONN_FAILED);
774
/* no more non completed posts to the QP, complete the
775
* termination process w.o worrying on disconnect event */
776
ib_conn->state = ISER_CONN_DOWN;
777
wake_up_interruptible(&ib_conn->wait);
781
static int iser_drain_tx_cq(struct iser_device *device)
783
struct ib_cq *cq = device->tx_cq;
785
struct iser_tx_desc *tx_desc;
786
struct iser_conn *ib_conn;
787
int completed_tx = 0;
789
while (ib_poll_cq(cq, 1, &wc) == 1) {
790
tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id;
791
ib_conn = wc.qp->qp_context;
792
if (wc.status == IB_WC_SUCCESS) {
793
if (wc.opcode == IB_WC_SEND)
794
iser_snd_completion(tx_desc, ib_conn);
796
iser_err("expected opcode %d got %d\n",
797
IB_WC_SEND, wc.opcode);
799
iser_err("tx id %llx status %d vend_err %x\n",
800
wc.wr_id, wc.status, wc.vendor_err);
801
atomic_dec(&ib_conn->post_send_buf_count);
802
iser_handle_comp_error(tx_desc, ib_conn);
810
static void iser_cq_tasklet_fn(unsigned long data)
812
struct iser_device *device = (struct iser_device *)data;
813
struct ib_cq *cq = device->rx_cq;
815
struct iser_rx_desc *desc;
816
unsigned long xfer_len;
817
struct iser_conn *ib_conn;
818
int completed_tx, completed_rx;
819
completed_tx = completed_rx = 0;
821
while (ib_poll_cq(cq, 1, &wc) == 1) {
822
desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
823
BUG_ON(desc == NULL);
824
ib_conn = wc.qp->qp_context;
825
if (wc.status == IB_WC_SUCCESS) {
826
if (wc.opcode == IB_WC_RECV) {
827
xfer_len = (unsigned long)wc.byte_len;
828
iser_rcv_completion(desc, xfer_len, ib_conn);
830
iser_err("expected opcode %d got %d\n",
831
IB_WC_RECV, wc.opcode);
833
if (wc.status != IB_WC_WR_FLUSH_ERR)
834
iser_err("rx id %llx status %d vend_err %x\n",
835
wc.wr_id, wc.status, wc.vendor_err);
836
ib_conn->post_recv_buf_count--;
837
iser_handle_comp_error(NULL, ib_conn);
840
if (!(completed_rx & 63))
841
completed_tx += iser_drain_tx_cq(device);
843
/* #warning "it is assumed here that arming CQ only once its empty" *
844
* " would not cause interrupts to be missed" */
845
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
847
completed_tx += iser_drain_tx_cq(device);
848
iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
851
static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
853
struct iser_device *device = (struct iser_device *)cq_context;
855
tasklet_schedule(&device->cq_tasklet);