1
/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
2
* IO manager and SCSI IO processing.
4
* Copyright (c) 2008 - 2010 Broadcom Corporation
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation.
10
* Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
15
#define RESERVE_FREE_LIST_INDEX num_possible_cpus()
17
static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
19
static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
20
static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
21
static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
22
struct bnx2fc_cmd *io_req);
23
static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
24
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
25
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
26
struct fcoe_fcp_rsp_payload *fcp_rsp,
29
void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
30
unsigned int timer_msec)
32
struct bnx2fc_hba *hba = io_req->port->priv;
34
if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work,
35
msecs_to_jiffies(timer_msec)))
36
kref_get(&io_req->refcount);
39
static void bnx2fc_cmd_timeout(struct work_struct *work)
41
struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
43
struct fc_lport *lport;
44
struct fc_rport_priv *rdata;
45
u8 cmd_type = io_req->cmd_type;
46
struct bnx2fc_rport *tgt = io_req->tgt;
50
BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
51
"req_flags = %lx\n", cmd_type, io_req->req_flags);
53
spin_lock_bh(&tgt->tgt_lock);
54
if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
55
clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
57
* ideally we should hold the io_req until RRQ complets,
58
* and release io_req from timeout hold.
60
spin_unlock_bh(&tgt->tgt_lock);
61
bnx2fc_send_rrq(io_req);
64
if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
65
BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
71
if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
72
&io_req->req_flags)) {
73
/* Handle eh_abort timeout */
74
BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
75
complete(&io_req->tm_done);
76
} else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
77
&io_req->req_flags)) {
78
/* Handle internally generated ABTS timeout */
79
BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
80
io_req->refcount.refcount.counter);
81
if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
82
&io_req->req_flags))) {
84
lport = io_req->port->lport;
85
rdata = io_req->tgt->rdata;
86
logo_issued = test_and_set_bit(
87
BNX2FC_FLAG_EXPL_LOGO,
89
kref_put(&io_req->refcount, bnx2fc_cmd_release);
90
spin_unlock_bh(&tgt->tgt_lock);
92
/* Explicitly logo the target */
94
BNX2FC_IO_DBG(io_req, "Explicit "
95
"logo - tgt flags = 0x%lx\n",
98
mutex_lock(&lport->disc.disc_mutex);
99
lport->tt.rport_logoff(rdata);
100
mutex_unlock(&lport->disc.disc_mutex);
105
/* Hanlde IO timeout */
106
BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
107
if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
108
&io_req->req_flags)) {
109
BNX2FC_IO_DBG(io_req, "IO completed before "
114
if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
115
&io_req->req_flags)) {
116
rc = bnx2fc_initiate_abts(io_req);
120
* Explicitly logo the target if
121
* abts initiation fails
123
lport = io_req->port->lport;
124
rdata = io_req->tgt->rdata;
125
logo_issued = test_and_set_bit(
126
BNX2FC_FLAG_EXPL_LOGO,
128
kref_put(&io_req->refcount, bnx2fc_cmd_release);
129
spin_unlock_bh(&tgt->tgt_lock);
132
BNX2FC_IO_DBG(io_req, "Explicit "
133
"logo - tgt flags = 0x%lx\n",
137
mutex_lock(&lport->disc.disc_mutex);
138
lport->tt.rport_logoff(rdata);
139
mutex_unlock(&lport->disc.disc_mutex);
143
BNX2FC_IO_DBG(io_req, "IO already in "
144
"ABTS processing\n");
150
if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
151
BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
153
if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
154
&io_req->req_flags)) {
155
lport = io_req->port->lport;
156
rdata = io_req->tgt->rdata;
157
logo_issued = test_and_set_bit(
158
BNX2FC_FLAG_EXPL_LOGO,
160
kref_put(&io_req->refcount, bnx2fc_cmd_release);
161
spin_unlock_bh(&tgt->tgt_lock);
163
/* Explicitly logo the target */
165
BNX2FC_IO_DBG(io_req, "Explicitly logo"
167
mutex_lock(&lport->disc.disc_mutex);
168
lport->tt.rport_logoff(rdata);
169
mutex_unlock(&lport->disc.disc_mutex);
175
* Handle ELS timeout.
176
* tgt_lock is used to sync compl path and timeout
177
* path. If els compl path is processing this IO, we
178
* have nothing to do here, just release the timer hold
180
BNX2FC_IO_DBG(io_req, "ELS timed out\n");
181
if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
185
/* Indicate the cb_func that this ELS is timed out */
186
set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
188
if ((io_req->cb_func) && (io_req->cb_arg)) {
189
io_req->cb_func(io_req->cb_arg);
190
io_req->cb_arg = NULL;
195
printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
201
/* release the cmd that was held when timer was set */
202
kref_put(&io_req->refcount, bnx2fc_cmd_release);
203
spin_unlock_bh(&tgt->tgt_lock);
206
static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
208
/* Called with host lock held */
209
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
212
* active_cmd_queue may have other command types as well,
213
* and during flush operation, we want to error back only
216
if (io_req->cmd_type != BNX2FC_SCSI_CMD)
219
BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
220
bnx2fc_unmap_sg_list(io_req);
221
io_req->sc_cmd = NULL;
223
printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
224
"IO(0x%x) already cleaned up\n",
228
sc_cmd->result = err_code << 16;
230
BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
231
sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
233
scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
234
sc_cmd->SCp.ptr = NULL;
235
sc_cmd->scsi_done(sc_cmd);
238
struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
239
u16 min_xid, u16 max_xid)
241
struct bnx2fc_cmd_mgr *cmgr;
242
struct io_bdt *bdt_info;
243
struct bnx2fc_cmd *io_req;
248
int num_ios, num_pri_ios;
250
int arr_sz = num_possible_cpus() + 1;
252
if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
253
printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
254
and max_xid 0x%x\n", min_xid, max_xid);
257
BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
259
num_ios = max_xid - min_xid + 1;
260
len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
261
len += sizeof(struct bnx2fc_cmd_mgr);
263
cmgr = kzalloc(len, GFP_KERNEL);
265
printk(KERN_ERR PFX "failed to alloc cmgr\n");
269
cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
271
if (!cmgr->free_list) {
272
printk(KERN_ERR PFX "failed to alloc free_list\n");
276
cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
278
if (!cmgr->free_list_lock) {
279
printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
284
cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
286
for (i = 0; i < arr_sz; i++) {
287
INIT_LIST_HEAD(&cmgr->free_list[i]);
288
spin_lock_init(&cmgr->free_list_lock[i]);
292
* Pre-allocated pool of bnx2fc_cmds.
293
* Last entry in the free list array is the free list
294
* of slow path requests.
296
xid = BNX2FC_MIN_XID;
297
num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS;
298
for (i = 0; i < num_ios; i++) {
299
io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
302
printk(KERN_ERR PFX "failed to alloc io_req\n");
306
INIT_LIST_HEAD(&io_req->link);
307
INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
311
list_add_tail(&io_req->link,
312
&cmgr->free_list[io_req->xid %
313
num_possible_cpus()]);
315
list_add_tail(&io_req->link,
316
&cmgr->free_list[num_possible_cpus()]);
320
/* Allocate pool of io_bdts - one for each bnx2fc_cmd */
321
mem_size = num_ios * sizeof(struct io_bdt *);
322
cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
323
if (!cmgr->io_bdt_pool) {
324
printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
328
mem_size = sizeof(struct io_bdt);
329
for (i = 0; i < num_ios; i++) {
330
cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
331
if (!cmgr->io_bdt_pool[i]) {
332
printk(KERN_ERR PFX "failed to alloc "
333
"io_bdt_pool[%d]\n", i);
338
/* Allocate an map fcoe_bdt_ctx structures */
339
bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
340
for (i = 0; i < num_ios; i++) {
341
bdt_info = cmgr->io_bdt_pool[i];
342
bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
344
&bdt_info->bd_tbl_dma,
346
if (!bdt_info->bd_tbl) {
347
printk(KERN_ERR PFX "failed to alloc "
356
bnx2fc_cmd_mgr_free(cmgr);
360
void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
362
struct io_bdt *bdt_info;
363
struct bnx2fc_hba *hba = cmgr->hba;
365
u16 min_xid = BNX2FC_MIN_XID;
366
u16 max_xid = BNX2FC_MAX_XID;
370
num_ios = max_xid - min_xid + 1;
372
/* Free fcoe_bdt_ctx structures */
373
if (!cmgr->io_bdt_pool)
376
bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
377
for (i = 0; i < num_ios; i++) {
378
bdt_info = cmgr->io_bdt_pool[i];
379
if (bdt_info->bd_tbl) {
380
dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
382
bdt_info->bd_tbl_dma);
383
bdt_info->bd_tbl = NULL;
387
/* Destroy io_bdt pool */
388
for (i = 0; i < num_ios; i++) {
389
kfree(cmgr->io_bdt_pool[i]);
390
cmgr->io_bdt_pool[i] = NULL;
393
kfree(cmgr->io_bdt_pool);
394
cmgr->io_bdt_pool = NULL;
397
kfree(cmgr->free_list_lock);
399
/* Destroy cmd pool */
400
if (!cmgr->free_list)
403
for (i = 0; i < num_possible_cpus() + 1; i++) {
404
struct list_head *list;
405
struct list_head *tmp;
407
list_for_each_safe(list, tmp, &cmgr->free_list[i]) {
408
struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list;
409
list_del(&io_req->link);
413
kfree(cmgr->free_list);
415
/* Free command manager itself */
419
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
421
struct fcoe_port *port = tgt->port;
422
struct bnx2fc_hba *hba = port->priv;
423
struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
424
struct bnx2fc_cmd *io_req;
425
struct list_head *listp;
426
struct io_bdt *bd_tbl;
427
int index = RESERVE_FREE_LIST_INDEX;
431
max_sqes = tgt->max_sqes;
433
case BNX2FC_TASK_MGMT_CMD:
434
max_sqes = BNX2FC_TM_MAX_SQES;
437
max_sqes = BNX2FC_ELS_MAX_SQES;
444
* NOTE: Free list insertions and deletions are protected with
447
spin_lock_bh(&cmd_mgr->free_list_lock[index]);
448
if ((list_empty(&(cmd_mgr->free_list[index]))) ||
449
(tgt->num_active_ios.counter >= max_sqes)) {
450
BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
451
"ios(%d):sqes(%d)\n",
452
tgt->num_active_ios.counter, tgt->max_sqes);
453
if (list_empty(&(cmd_mgr->free_list[index])))
454
printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
455
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
459
listp = (struct list_head *)
460
cmd_mgr->free_list[index].next;
461
list_del_init(listp);
462
io_req = (struct bnx2fc_cmd *) listp;
464
cmd_mgr->cmds[xid] = io_req;
465
atomic_inc(&tgt->num_active_ios);
466
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
468
INIT_LIST_HEAD(&io_req->link);
471
io_req->cmd_mgr = cmd_mgr;
472
io_req->req_flags = 0;
473
io_req->cmd_type = type;
475
/* Bind io_bdt for this io_req */
476
/* Have a static link between io_req and io_bdt_pool */
477
bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
478
bd_tbl->io_req = io_req;
480
/* Hold the io_req against deletion */
481
kref_init(&io_req->refcount);
484
static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
486
struct fcoe_port *port = tgt->port;
487
struct bnx2fc_hba *hba = port->priv;
488
struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
489
struct bnx2fc_cmd *io_req;
490
struct list_head *listp;
491
struct io_bdt *bd_tbl;
494
int index = get_cpu();
496
max_sqes = BNX2FC_SCSI_MAX_SQES;
498
* NOTE: Free list insertions and deletions are protected with
501
spin_lock_bh(&cmd_mgr->free_list_lock[index]);
502
if ((list_empty(&cmd_mgr->free_list[index])) ||
503
(tgt->num_active_ios.counter >= max_sqes)) {
504
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
509
listp = (struct list_head *)
510
cmd_mgr->free_list[index].next;
511
list_del_init(listp);
512
io_req = (struct bnx2fc_cmd *) listp;
514
cmd_mgr->cmds[xid] = io_req;
515
atomic_inc(&tgt->num_active_ios);
516
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
519
INIT_LIST_HEAD(&io_req->link);
522
io_req->cmd_mgr = cmd_mgr;
523
io_req->req_flags = 0;
525
/* Bind io_bdt for this io_req */
526
/* Have a static link between io_req and io_bdt_pool */
527
bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
528
bd_tbl->io_req = io_req;
530
/* Hold the io_req against deletion */
531
kref_init(&io_req->refcount);
535
void bnx2fc_cmd_release(struct kref *ref)
537
struct bnx2fc_cmd *io_req = container_of(ref,
538
struct bnx2fc_cmd, refcount);
539
struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
542
if (io_req->cmd_type == BNX2FC_SCSI_CMD)
543
index = io_req->xid % num_possible_cpus();
545
index = RESERVE_FREE_LIST_INDEX;
548
spin_lock_bh(&cmd_mgr->free_list_lock[index]);
549
if (io_req->cmd_type != BNX2FC_SCSI_CMD)
550
bnx2fc_free_mp_resc(io_req);
551
cmd_mgr->cmds[io_req->xid] = NULL;
552
/* Delete IO from retire queue */
553
list_del_init(&io_req->link);
554
/* Add it to the free list */
555
list_add(&io_req->link,
556
&cmd_mgr->free_list[index]);
557
atomic_dec(&io_req->tgt->num_active_ios);
558
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
562
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
564
struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
565
struct bnx2fc_hba *hba = io_req->port->priv;
566
size_t sz = sizeof(struct fcoe_bd_ctx);
569
mp_req->tm_flags = 0;
570
if (mp_req->mp_req_bd) {
571
dma_free_coherent(&hba->pcidev->dev, sz,
573
mp_req->mp_req_bd_dma);
574
mp_req->mp_req_bd = NULL;
576
if (mp_req->mp_resp_bd) {
577
dma_free_coherent(&hba->pcidev->dev, sz,
579
mp_req->mp_resp_bd_dma);
580
mp_req->mp_resp_bd = NULL;
582
if (mp_req->req_buf) {
583
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
585
mp_req->req_buf_dma);
586
mp_req->req_buf = NULL;
588
if (mp_req->resp_buf) {
589
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
591
mp_req->resp_buf_dma);
592
mp_req->resp_buf = NULL;
596
int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
598
struct bnx2fc_mp_req *mp_req;
599
struct fcoe_bd_ctx *mp_req_bd;
600
struct fcoe_bd_ctx *mp_resp_bd;
601
struct bnx2fc_hba *hba = io_req->port->priv;
605
mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
606
memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
608
mp_req->req_len = sizeof(struct fcp_cmnd);
609
io_req->data_xfer_len = mp_req->req_len;
610
mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
611
&mp_req->req_buf_dma,
613
if (!mp_req->req_buf) {
614
printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
615
bnx2fc_free_mp_resc(io_req);
619
mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
620
&mp_req->resp_buf_dma,
622
if (!mp_req->resp_buf) {
623
printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
624
bnx2fc_free_mp_resc(io_req);
627
memset(mp_req->req_buf, 0, PAGE_SIZE);
628
memset(mp_req->resp_buf, 0, PAGE_SIZE);
630
/* Allocate and map mp_req_bd and mp_resp_bd */
631
sz = sizeof(struct fcoe_bd_ctx);
632
mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
633
&mp_req->mp_req_bd_dma,
635
if (!mp_req->mp_req_bd) {
636
printk(KERN_ERR PFX "unable to alloc MP req bd\n");
637
bnx2fc_free_mp_resc(io_req);
640
mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
641
&mp_req->mp_resp_bd_dma,
643
if (!mp_req->mp_req_bd) {
644
printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
645
bnx2fc_free_mp_resc(io_req);
649
addr = mp_req->req_buf_dma;
650
mp_req_bd = mp_req->mp_req_bd;
651
mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
652
mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
653
mp_req_bd->buf_len = PAGE_SIZE;
654
mp_req_bd->flags = 0;
657
* MP buffer is either a task mgmt command or an ELS.
658
* So the assumption is that it consumes a single bd
659
* entry in the bd table
661
mp_resp_bd = mp_req->mp_resp_bd;
662
addr = mp_req->resp_buf_dma;
663
mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
664
mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
665
mp_resp_bd->buf_len = PAGE_SIZE;
666
mp_resp_bd->flags = 0;
671
static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
673
struct fc_lport *lport;
674
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
675
struct fc_rport_libfc_priv *rp = rport->dd_data;
676
struct fcoe_port *port;
677
struct bnx2fc_hba *hba;
678
struct bnx2fc_rport *tgt;
679
struct bnx2fc_cmd *io_req;
680
struct bnx2fc_mp_req *tm_req;
681
struct fcoe_task_ctx_entry *task;
682
struct fcoe_task_ctx_entry *task_page;
683
struct Scsi_Host *host = sc_cmd->device->host;
684
struct fc_frame_header *fc_hdr;
685
struct fcp_cmnd *fcp_cmnd;
690
unsigned long start = jiffies;
692
lport = shost_priv(host);
693
port = lport_priv(lport);
697
printk(KERN_ALERT PFX "device_reset: rport is NULL\n");
702
rc = fc_block_scsi_eh(sc_cmd);
706
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
707
printk(KERN_ERR PFX "device_reset: link is not ready\n");
711
/* rport and tgt are allocated together, so tgt should be non-NULL */
712
tgt = (struct bnx2fc_rport *)&rp[1];
714
if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
715
printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
720
io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
722
if (time_after(jiffies, start + HZ)) {
723
printk(KERN_ERR PFX "tmf: Failed TMF");
730
/* Initialize rest of io_req fields */
731
io_req->sc_cmd = sc_cmd;
735
tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
737
rc = bnx2fc_init_mp_req(io_req);
739
printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
740
kref_put(&io_req->refcount, bnx2fc_cmd_release);
745
io_req->io_req_flags = 0;
746
tm_req->tm_flags = tm_flags;
749
bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
750
fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
751
memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
755
fc_hdr = &(tm_req->req_fc_hdr);
757
did = rport->port_id;
758
__fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
759
FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
761
/* Obtain exchange id */
764
BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
765
task_idx = xid/BNX2FC_TASKS_PER_PAGE;
766
index = xid % BNX2FC_TASKS_PER_PAGE;
768
/* Initialize task context for this IO request */
769
task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
770
task = &(task_page[index]);
771
bnx2fc_init_mp_task(io_req, task);
773
sc_cmd->SCp.ptr = (char *)io_req;
775
/* Obtain free SQ entry */
776
spin_lock_bh(&tgt->tgt_lock);
777
bnx2fc_add_2_sq(tgt, xid);
779
/* Enqueue the io_req to active_tm_queue */
780
io_req->on_tmf_queue = 1;
781
list_add_tail(&io_req->link, &tgt->active_tm_queue);
783
init_completion(&io_req->tm_done);
784
io_req->wait_for_comp = 1;
787
bnx2fc_ring_doorbell(tgt);
788
spin_unlock_bh(&tgt->tgt_lock);
790
rc = wait_for_completion_timeout(&io_req->tm_done,
791
BNX2FC_TM_TIMEOUT * HZ);
792
spin_lock_bh(&tgt->tgt_lock);
794
io_req->wait_for_comp = 0;
795
if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
796
set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
798
spin_unlock_bh(&tgt->tgt_lock);
801
printk(KERN_ERR PFX "task mgmt command failed...\n");
804
printk(KERN_ERR PFX "task mgmt command success...\n");
811
int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
813
struct fc_lport *lport;
814
struct bnx2fc_rport *tgt = io_req->tgt;
815
struct fc_rport *rport = tgt->rport;
816
struct fc_rport_priv *rdata = tgt->rdata;
817
struct bnx2fc_hba *hba;
818
struct fcoe_port *port;
819
struct bnx2fc_cmd *abts_io_req;
820
struct fcoe_task_ctx_entry *task;
821
struct fcoe_task_ctx_entry *task_page;
822
struct fc_frame_header *fc_hdr;
823
struct bnx2fc_mp_req *abts_req;
828
u32 r_a_tov = rdata->r_a_tov;
830
/* called with tgt_lock held */
831
BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
837
if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
838
printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
844
printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n");
849
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
850
printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
855
abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
857
printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
862
/* Initialize rest of io_req fields */
863
abts_io_req->sc_cmd = NULL;
864
abts_io_req->port = port;
865
abts_io_req->tgt = tgt;
866
abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
868
abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
869
memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
872
fc_hdr = &(abts_req->req_fc_hdr);
874
/* Obtain oxid and rxid for the original exchange to be aborted */
875
fc_hdr->fh_ox_id = htons(io_req->xid);
876
fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id);
879
did = rport->port_id;
881
__fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
882
FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
885
xid = abts_io_req->xid;
886
BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
887
task_idx = xid/BNX2FC_TASKS_PER_PAGE;
888
index = xid % BNX2FC_TASKS_PER_PAGE;
890
/* Initialize task context for this IO request */
891
task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
892
task = &(task_page[index]);
893
bnx2fc_init_mp_task(abts_io_req, task);
896
* ABTS task is a temporary task that will be cleaned up
897
* irrespective of ABTS response. We need to start the timer
898
* for the original exchange, as the CQE is posted for the original
901
* Timer for ABTS is started only when it is originated by a
902
* TM request. For the ABTS issued as part of ULP timeout,
903
* scsi-ml maintains the timers.
906
/* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
907
bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
909
/* Obtain free SQ entry */
910
bnx2fc_add_2_sq(tgt, xid);
913
bnx2fc_ring_doorbell(tgt);
919
int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
921
struct fc_lport *lport;
922
struct bnx2fc_rport *tgt = io_req->tgt;
923
struct bnx2fc_hba *hba;
924
struct fcoe_port *port;
925
struct bnx2fc_cmd *cleanup_io_req;
926
struct fcoe_task_ctx_entry *task;
927
struct fcoe_task_ctx_entry *task_page;
932
/* ASSUMPTION: called with tgt_lock held */
933
BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
939
cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
940
if (!cleanup_io_req) {
941
printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
946
/* Initialize rest of io_req fields */
947
cleanup_io_req->sc_cmd = NULL;
948
cleanup_io_req->port = port;
949
cleanup_io_req->tgt = tgt;
950
cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
952
xid = cleanup_io_req->xid;
954
task_idx = xid/BNX2FC_TASKS_PER_PAGE;
955
index = xid % BNX2FC_TASKS_PER_PAGE;
957
/* Initialize task context for this IO request */
958
task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
959
task = &(task_page[index]);
960
orig_xid = io_req->xid;
962
BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
964
bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
966
/* Obtain free SQ entry */
967
bnx2fc_add_2_sq(tgt, xid);
970
bnx2fc_ring_doorbell(tgt);
977
* bnx2fc_eh_target_reset: Reset a target
979
* @sc_cmd: SCSI command
981
* Set from SCSI host template to send task mgmt command to the target
982
* and wait for the response
984
int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
986
return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
990
* bnx2fc_eh_device_reset - Reset a single LUN
992
* @sc_cmd: SCSI command
994
* Set from SCSI host template to send task mgmt command to the target
995
* and wait for the response
997
int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
999
return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
1003
* bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
1006
* @sc_cmd: SCSI_ML command pointer
1008
* SCSI abort request handler
1010
int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1012
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1013
struct fc_rport_libfc_priv *rp = rport->dd_data;
1014
struct bnx2fc_cmd *io_req;
1015
struct fc_lport *lport;
1016
struct bnx2fc_rport *tgt;
1020
rc = fc_block_scsi_eh(sc_cmd);
1024
lport = shost_priv(sc_cmd->device->host);
1025
if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1026
printk(KERN_ALERT PFX "eh_abort: link not ready\n");
1030
tgt = (struct bnx2fc_rport *)&rp[1];
1032
BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
1034
spin_lock_bh(&tgt->tgt_lock);
1035
io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
1037
/* Command might have just completed */
1038
printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
1039
spin_unlock_bh(&tgt->tgt_lock);
1042
BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
1043
io_req->refcount.refcount.counter);
1045
/* Hold IO request across abort processing */
1046
kref_get(&io_req->refcount);
1048
BUG_ON(tgt != io_req->tgt);
1050
/* Remove the io_req from the active_q. */
1052
* Task Mgmt functions (LUN RESET & TGT RESET) will not
1053
* issue an ABTS on this particular IO req, as the
1054
* io_req is no longer in the active_q.
1056
if (tgt->flush_in_prog) {
1057
printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
1058
"flush in progress\n", io_req->xid);
1059
kref_put(&io_req->refcount, bnx2fc_cmd_release);
1060
spin_unlock_bh(&tgt->tgt_lock);
1064
if (io_req->on_active_queue == 0) {
1065
printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
1066
"not on active_q\n", io_req->xid);
1068
* This condition can happen only due to the FW bug,
1069
* where we do not receive cleanup response from
1070
* the FW. Handle this case gracefully by erroring
1071
* back the IO request to SCSI-ml
1073
bnx2fc_scsi_done(io_req, DID_ABORT);
1075
kref_put(&io_req->refcount, bnx2fc_cmd_release);
1076
spin_unlock_bh(&tgt->tgt_lock);
1081
* Only eh_abort processing will remove the IO from
1082
* active_cmd_q before processing the request. this is
1083
* done to avoid race conditions between IOs aborted
1084
* as part of task management completion and eh_abort
1087
list_del_init(&io_req->link);
1088
io_req->on_active_queue = 0;
1089
/* Move IO req to retire queue */
1090
list_add_tail(&io_req->link, &tgt->io_retire_queue);
1092
init_completion(&io_req->tm_done);
1093
io_req->wait_for_comp = 1;
1095
if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
1096
/* Cancel the current timer running on this io_req */
1097
if (cancel_delayed_work(&io_req->timeout_work))
1098
kref_put(&io_req->refcount,
1099
bnx2fc_cmd_release); /* drop timer hold */
1100
set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1101
rc = bnx2fc_initiate_abts(io_req);
1103
printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
1104
"already in abts processing\n", io_req->xid);
1105
kref_put(&io_req->refcount, bnx2fc_cmd_release);
1106
spin_unlock_bh(&tgt->tgt_lock);
1110
kref_put(&io_req->refcount, bnx2fc_cmd_release);
1111
spin_unlock_bh(&tgt->tgt_lock);
1114
spin_unlock_bh(&tgt->tgt_lock);
1116
wait_for_completion(&io_req->tm_done);
1118
spin_lock_bh(&tgt->tgt_lock);
1119
io_req->wait_for_comp = 0;
1120
if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1121
&io_req->req_flags))) {
1122
/* Let the scsi-ml try to recover this command */
1123
printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1128
* We come here even when there was a race condition
1129
* between timeout and abts completion, and abts
1130
* completion happens just in time.
1132
BNX2FC_IO_DBG(io_req, "abort succeeded\n");
1134
bnx2fc_scsi_done(io_req, DID_ABORT);
1135
kref_put(&io_req->refcount, bnx2fc_cmd_release);
1138
/* release the reference taken in eh_abort */
1139
kref_put(&io_req->refcount, bnx2fc_cmd_release);
1140
spin_unlock_bh(&tgt->tgt_lock);
1144
void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
1145
struct fcoe_task_ctx_entry *task,
1148
BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
1149
"refcnt = %d, cmd_type = %d\n",
1150
io_req->refcount.refcount.counter, io_req->cmd_type);
1151
bnx2fc_scsi_done(io_req, DID_ERROR);
1152
kref_put(&io_req->refcount, bnx2fc_cmd_release);
1155
void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
1156
struct fcoe_task_ctx_entry *task,
1160
u32 r_a_tov = FC_DEF_R_A_TOV;
1162
struct bnx2fc_rport *tgt = io_req->tgt;
1164
BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
1165
"refcnt = %d, cmd_type = %d\n",
1167
io_req->refcount.refcount.counter, io_req->cmd_type);
1169
if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1170
&io_req->req_flags)) {
1171
BNX2FC_IO_DBG(io_req, "Timer context finished processing"
1176
/* Do not issue RRQ as this IO is already cleanedup */
1177
if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
1178
&io_req->req_flags))
1182
* For ABTS issued due to SCSI eh_abort_handler, timeout
1183
* values are maintained by scsi-ml itself. Cancel timeout
1184
* in case ABTS issued as part of task management function
1185
* or due to FW error.
1187
if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
1188
if (cancel_delayed_work(&io_req->timeout_work))
1189
kref_put(&io_req->refcount,
1190
bnx2fc_cmd_release); /* drop timer hold */
1192
r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl;
1195
case FC_RCTL_BA_ACC:
1197
* Dont release this cmd yet. It will be relesed
1198
* after we get RRQ response
1200
BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
1204
case FC_RCTL_BA_RJT:
1205
BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
1208
printk(KERN_ERR PFX "Unknown ABTS response\n");
1213
BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
1214
set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
1216
set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
1217
bnx2fc_cmd_timer_set(io_req, r_a_tov);
1220
if (io_req->wait_for_comp) {
1221
if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1222
&io_req->req_flags))
1223
complete(&io_req->tm_done);
1226
* We end up here when ABTS is issued as
1227
* in asynchronous context, i.e., as part
1228
* of task management completion, or
1229
* when FW error is received or when the
1230
* ABTS is issued when the IO is timed
1234
if (io_req->on_active_queue) {
1235
list_del_init(&io_req->link);
1236
io_req->on_active_queue = 0;
1237
/* Move IO req to retire queue */
1238
list_add_tail(&io_req->link, &tgt->io_retire_queue);
1240
bnx2fc_scsi_done(io_req, DID_ERROR);
1241
kref_put(&io_req->refcount, bnx2fc_cmd_release);
1245
static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
1247
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1248
struct bnx2fc_rport *tgt = io_req->tgt;
1249
struct list_head *list;
1250
struct list_head *tmp;
1251
struct bnx2fc_cmd *cmd;
1252
int tm_lun = sc_cmd->device->lun;
1256
/* called with tgt_lock held */
1257
BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
1259
* Walk thru the active_ios queue and ABORT the IO
1260
* that matches with the LUN that was reset
1262
list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
1263
BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
1264
cmd = (struct bnx2fc_cmd *)list;
1265
lun = cmd->sc_cmd->device->lun;
1266
if (lun == tm_lun) {
1267
/* Initiate ABTS on this cmd */
1268
if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1270
/* cancel the IO timeout */
1271
if (cancel_delayed_work(&io_req->timeout_work))
1272
kref_put(&io_req->refcount,
1273
bnx2fc_cmd_release);
1275
rc = bnx2fc_initiate_abts(cmd);
1276
/* abts shouldn't fail in this context */
1277
WARN_ON(rc != SUCCESS);
1279
printk(KERN_ERR PFX "lun_rst: abts already in"
1280
" progress for this IO 0x%x\n",
1286
static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
1288
struct bnx2fc_rport *tgt = io_req->tgt;
1289
struct list_head *list;
1290
struct list_head *tmp;
1291
struct bnx2fc_cmd *cmd;
1294
/* called with tgt_lock held */
1295
BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
1297
* Walk thru the active_ios queue and ABORT the IO
1298
* that matches with the LUN that was reset
1300
list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
1301
BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
1302
cmd = (struct bnx2fc_cmd *)list;
1304
if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1306
/* cancel the IO timeout */
1307
if (cancel_delayed_work(&io_req->timeout_work))
1308
kref_put(&io_req->refcount,
1309
bnx2fc_cmd_release); /* timer hold */
1310
rc = bnx2fc_initiate_abts(cmd);
1311
/* abts shouldn't fail in this context */
1312
WARN_ON(rc != SUCCESS);
1315
printk(KERN_ERR PFX "tgt_rst: abts already in progress"
1316
" for this IO 0x%x\n", cmd->xid);
1320
void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1321
struct fcoe_task_ctx_entry *task, u8 num_rq)
1323
struct bnx2fc_mp_req *tm_req;
1324
struct fc_frame_header *fc_hdr;
1325
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1330
/* Called with tgt_lock held */
1331
BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
1333
if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
1334
set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
1336
/* TM has already timed out and we got
1337
* delayed completion. Ignore completion
1343
tm_req = &(io_req->mp_req);
1344
fc_hdr = &(tm_req->resp_fc_hdr);
1345
hdr = (u64 *)fc_hdr;
1347
&task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
1348
hdr[0] = cpu_to_be64(temp_hdr[0]);
1349
hdr[1] = cpu_to_be64(temp_hdr[1]);
1350
hdr[2] = cpu_to_be64(temp_hdr[2]);
1352
tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
1354
rsp_buf = tm_req->resp_buf;
1356
if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
1357
bnx2fc_parse_fcp_rsp(io_req,
1358
(struct fcoe_fcp_rsp_payload *)
1360
if (io_req->fcp_rsp_code == 0) {
1362
if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
1363
bnx2fc_lun_reset_cmpl(io_req);
1364
else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
1365
bnx2fc_tgt_reset_cmpl(io_req);
1368
printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
1371
if (!sc_cmd->SCp.ptr) {
1372
printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n");
1375
switch (io_req->fcp_status) {
1377
if (io_req->cdb_status == 0) {
1378
/* Good IO completion */
1379
sc_cmd->result = DID_OK << 16;
1381
/* Transport status is good, SCSI status not good */
1382
sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1384
if (io_req->fcp_resid)
1385
scsi_set_resid(sc_cmd, io_req->fcp_resid);
1389
BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
1390
io_req->fcp_status);
1394
sc_cmd = io_req->sc_cmd;
1395
io_req->sc_cmd = NULL;
1397
/* check if the io_req exists in tgt's tmf_q */
1398
if (io_req->on_tmf_queue) {
1400
list_del_init(&io_req->link);
1401
io_req->on_tmf_queue = 0;
1404
printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n");
1408
sc_cmd->SCp.ptr = NULL;
1409
sc_cmd->scsi_done(sc_cmd);
1411
kref_put(&io_req->refcount, bnx2fc_cmd_release);
1412
if (io_req->wait_for_comp) {
1413
BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
1414
complete(&io_req->tm_done);
1418
static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
1421
struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1422
int frag_size, sg_frags;
1426
if (sg_len >= BNX2FC_BD_SPLIT_SZ)
1427
frag_size = BNX2FC_BD_SPLIT_SZ;
1430
bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
1431
bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
1432
bd[bd_index + sg_frags].buf_len = (u16)frag_size;
1433
bd[bd_index + sg_frags].flags = 0;
1435
addr += (u64) frag_size;
1437
sg_len -= frag_size;
1443
static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
1445
struct scsi_cmnd *sc = io_req->sc_cmd;
1446
struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1447
struct scatterlist *sg;
1452
unsigned int sg_len;
1456
sg_count = scsi_dma_map(sc);
1457
scsi_for_each_sg(sc, sg, sg_count, i) {
1458
sg_len = sg_dma_len(sg);
1459
addr = sg_dma_address(sg);
1460
if (sg_len > BNX2FC_MAX_BD_LEN) {
1461
sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
1466
bd[bd_count].buf_addr_lo = addr & 0xffffffff;
1467
bd[bd_count].buf_addr_hi = addr >> 32;
1468
bd[bd_count].buf_len = (u16)sg_len;
1469
bd[bd_count].flags = 0;
1471
bd_count += sg_frags;
1472
byte_count += sg_len;
1474
if (byte_count != scsi_bufflen(sc))
1475
printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
1476
"task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
1481
static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
1483
struct scsi_cmnd *sc = io_req->sc_cmd;
1484
struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1487
if (scsi_sg_count(sc))
1488
bd_count = bnx2fc_map_sg(io_req);
1491
bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
1492
bd[0].buf_len = bd[0].flags = 0;
1494
io_req->bd_tbl->bd_valid = bd_count;
1497
static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
1499
struct scsi_cmnd *sc = io_req->sc_cmd;
1501
if (io_req->bd_tbl->bd_valid && sc) {
1503
io_req->bd_tbl->bd_valid = 0;
1507
void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
1508
struct fcp_cmnd *fcp_cmnd)
1510
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1513
memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1515
int_to_scsilun(sc_cmd->device->lun,
1516
(struct scsi_lun *) fcp_cmnd->fc_lun);
1519
fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
1520
memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
1522
fcp_cmnd->fc_cmdref = 0;
1523
fcp_cmnd->fc_pri_ta = 0;
1524
fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
1525
fcp_cmnd->fc_flags = io_req->io_req_flags;
1527
if (scsi_populate_tag_msg(sc_cmd, tag)) {
1529
case HEAD_OF_QUEUE_TAG:
1530
fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ;
1532
case ORDERED_QUEUE_TAG:
1533
fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED;
1536
fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1540
fcp_cmnd->fc_pri_ta = 0;
1544
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
1545
struct fcoe_fcp_rsp_payload *fcp_rsp,
1548
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1549
struct bnx2fc_rport *tgt = io_req->tgt;
1550
u8 rsp_flags = fcp_rsp->fcp_flags.flags;
1551
u32 rq_buff_len = 0;
1553
unsigned char *rq_data;
1554
unsigned char *dummy;
1555
int fcp_sns_len = 0;
1556
int fcp_rsp_len = 0;
1558
io_req->fcp_status = FC_GOOD;
1559
io_req->fcp_resid = fcp_rsp->fcp_resid;
1561
io_req->scsi_comp_flags = rsp_flags;
1562
CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1563
fcp_rsp->scsi_status_code;
1565
/* Fetch fcp_rsp_info and fcp_sns_info if available */
1569
* We do not anticipate num_rq >1, as the linux defined
1570
* SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
1571
* 256 bytes of single rq buffer is good enough to hold this.
1575
FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
1576
fcp_rsp_len = rq_buff_len
1577
= fcp_rsp->fcp_rsp_len;
1581
FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
1582
fcp_sns_len = fcp_rsp->fcp_sns_len;
1583
rq_buff_len += fcp_rsp->fcp_sns_len;
1586
io_req->fcp_rsp_len = fcp_rsp_len;
1587
io_req->fcp_sns_len = fcp_sns_len;
1589
if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
1590
/* Invalid sense sense length. */
1591
printk(KERN_ALERT PFX "invalid sns length %d\n",
1593
/* reset rq_buff_len */
1594
rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
1597
rq_data = bnx2fc_get_next_rqe(tgt, 1);
1600
/* We do not need extra sense data */
1601
for (i = 1; i < num_rq; i++)
1602
dummy = bnx2fc_get_next_rqe(tgt, 1);
1605
/* fetch fcp_rsp_code */
1606
if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1607
/* Only for task management function */
1608
io_req->fcp_rsp_code = rq_data[3];
1609
printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
1610
io_req->fcp_rsp_code);
1613
/* fetch sense data */
1614
rq_data += fcp_rsp_len;
1616
if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1617
printk(KERN_ERR PFX "Truncating sense buffer\n");
1618
fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1621
memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
1623
memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
1625
/* return RQ entries */
1626
for (i = 0; i < num_rq; i++)
1627
bnx2fc_return_rqe(tgt, 1);
1632
* bnx2fc_queuecommand - Queuecommand function of the scsi template
1634
* @host: The Scsi_Host the command was issued to
1635
* @sc_cmd: struct scsi_cmnd to be executed
1637
* This is the IO strategy routine, called by SCSI-ML
1639
int bnx2fc_queuecommand(struct Scsi_Host *host,
1640
struct scsi_cmnd *sc_cmd)
1642
struct fc_lport *lport = shost_priv(host);
1643
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1644
struct fc_rport_libfc_priv *rp = rport->dd_data;
1645
struct bnx2fc_rport *tgt;
1646
struct bnx2fc_cmd *io_req;
1650
rval = fc_remote_port_chkready(rport);
1652
sc_cmd->result = rval;
1653
sc_cmd->scsi_done(sc_cmd);
1657
if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1658
rc = SCSI_MLQUEUE_HOST_BUSY;
1662
/* rport and tgt are allocated together, so tgt should be non-NULL */
1663
tgt = (struct bnx2fc_rport *)&rp[1];
1665
if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1666
if (test_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags)) {
1667
sc_cmd->result = DID_NO_CONNECT << 16;
1668
sc_cmd->scsi_done(sc_cmd);
1673
* Session is not offloaded yet. Let SCSI-ml retry
1676
rc = SCSI_MLQUEUE_TARGET_BUSY;
1680
io_req = bnx2fc_cmd_alloc(tgt);
1682
rc = SCSI_MLQUEUE_HOST_BUSY;
1685
io_req->sc_cmd = sc_cmd;
1687
if (bnx2fc_post_io_req(tgt, io_req)) {
1688
printk(KERN_ERR PFX "Unable to post io_req\n");
1689
rc = SCSI_MLQUEUE_HOST_BUSY;
1696
void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1697
struct fcoe_task_ctx_entry *task,
1700
struct fcoe_fcp_rsp_payload *fcp_rsp;
1701
struct bnx2fc_rport *tgt = io_req->tgt;
1702
struct scsi_cmnd *sc_cmd;
1703
struct Scsi_Host *host;
1706
/* scsi_cmd_cmpl is called with tgt lock held */
1708
if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
1709
/* we will not receive ABTS response for this IO */
1710
BNX2FC_IO_DBG(io_req, "Timer context finished processing "
1714
/* Cancel the timeout_work, as we received IO completion */
1715
if (cancel_delayed_work(&io_req->timeout_work))
1716
kref_put(&io_req->refcount,
1717
bnx2fc_cmd_release); /* drop timer hold */
1719
sc_cmd = io_req->sc_cmd;
1720
if (sc_cmd == NULL) {
1721
printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
1725
/* Fetch fcp_rsp from task context and perform cmd completion */
1726
fcp_rsp = (struct fcoe_fcp_rsp_payload *)
1727
&(task->cmn.general.rsp_info.fcp_rsp.payload);
1729
/* parse fcp_rsp and obtain sense data from RQ if available */
1730
bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
1732
host = sc_cmd->device->host;
1733
if (!sc_cmd->SCp.ptr) {
1734
printk(KERN_ERR PFX "SCp.ptr is NULL\n");
1737
io_req->sc_cmd = NULL;
1739
if (io_req->on_active_queue) {
1740
list_del_init(&io_req->link);
1741
io_req->on_active_queue = 0;
1742
/* Move IO req to retire queue */
1743
list_add_tail(&io_req->link, &tgt->io_retire_queue);
1745
/* This should not happen, but could have been pulled
1746
* by bnx2fc_flush_active_ios(), or during a race
1747
* between command abort and (late) completion.
1749
BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
1750
if (io_req->wait_for_comp)
1751
if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1752
&io_req->req_flags))
1753
complete(&io_req->tm_done);
1756
bnx2fc_unmap_sg_list(io_req);
1758
switch (io_req->fcp_status) {
1760
if (io_req->cdb_status == 0) {
1761
/* Good IO completion */
1762
sc_cmd->result = DID_OK << 16;
1764
/* Transport status is good, SCSI status not good */
1765
BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
1766
" fcp_resid = 0x%x\n",
1767
io_req->cdb_status, io_req->fcp_resid);
1768
sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1770
if (io_req->fcp_resid)
1771
scsi_set_resid(sc_cmd, io_req->fcp_resid);
1774
printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n",
1775
io_req->fcp_status);
1778
sc_cmd->SCp.ptr = NULL;
1779
sc_cmd->scsi_done(sc_cmd);
1780
kref_put(&io_req->refcount, bnx2fc_cmd_release);
1783
static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1784
struct bnx2fc_cmd *io_req)
1786
struct fcoe_task_ctx_entry *task;
1787
struct fcoe_task_ctx_entry *task_page;
1788
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1789
struct fcoe_port *port = tgt->port;
1790
struct bnx2fc_hba *hba = port->priv;
1791
struct fc_lport *lport = port->lport;
1792
struct fcoe_dev_stats *stats;
1793
int task_idx, index;
1796
/* Initialize rest of io_req fields */
1797
io_req->cmd_type = BNX2FC_SCSI_CMD;
1798
io_req->port = port;
1800
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
1801
sc_cmd->SCp.ptr = (char *)io_req;
1803
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1804
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1805
io_req->io_req_flags = BNX2FC_READ;
1806
stats->InputRequests++;
1807
stats->InputBytes += io_req->data_xfer_len;
1808
} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
1809
io_req->io_req_flags = BNX2FC_WRITE;
1810
stats->OutputRequests++;
1811
stats->OutputBytes += io_req->data_xfer_len;
1813
io_req->io_req_flags = 0;
1814
stats->ControlRequests++;
1820
/* Build buffer descriptor list for firmware from sg list */
1821
bnx2fc_build_bd_list_from_sg(io_req);
1823
task_idx = xid / BNX2FC_TASKS_PER_PAGE;
1824
index = xid % BNX2FC_TASKS_PER_PAGE;
1826
/* Initialize task context for this IO request */
1827
task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
1828
task = &(task_page[index]);
1829
bnx2fc_init_task(io_req, task);
1831
spin_lock_bh(&tgt->tgt_lock);
1833
if (tgt->flush_in_prog) {
1834
printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
1835
kref_put(&io_req->refcount, bnx2fc_cmd_release);
1836
spin_unlock_bh(&tgt->tgt_lock);
1840
if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1841
printk(KERN_ERR PFX "Session not ready...post_io\n");
1842
kref_put(&io_req->refcount, bnx2fc_cmd_release);
1843
spin_unlock_bh(&tgt->tgt_lock);
1848
bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
1849
/* Obtain free SQ entry */
1850
bnx2fc_add_2_sq(tgt, xid);
1852
/* Enqueue the io_req to active_cmd_queue */
1854
io_req->on_active_queue = 1;
1855
/* move io_req from pending_queue to active_queue */
1856
list_add_tail(&io_req->link, &tgt->active_cmd_queue);
1859
bnx2fc_ring_doorbell(tgt);
1860
spin_unlock_bh(&tgt->tgt_lock);