2
* QLogic iSCSI HBA Driver
3
* Copyright (c) 2011 QLogic Corporation
5
* See LICENSE.qla4xxx for copyright and licensing details.
13
qla4xxx_read_flash(struct bsg_job *bsg_job)
15
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
16
struct scsi_qla_host *ha = to_qla_host(host);
17
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
18
struct iscsi_bsg_request *bsg_req = bsg_job->request;
22
uint8_t *flash = NULL;
25
bsg_reply->reply_payload_rcv_len = 0;
27
if (unlikely(pci_channel_offline(ha->pdev)))
30
if (ql4xxx_reset_active(ha)) {
31
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
36
if (ha->flash_state != QLFLASH_WAITING) {
37
ql4_printk(KERN_ERR, ha, "%s: another flash operation "
38
"active\n", __func__);
43
ha->flash_state = QLFLASH_READING;
44
offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
45
length = bsg_job->reply_payload.payload_len;
47
flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
50
ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
56
rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
58
ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
59
bsg_reply->result = DID_ERROR << 16;
62
bsg_reply->reply_payload_rcv_len =
63
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
64
bsg_job->reply_payload.sg_cnt,
66
bsg_reply->result = DID_OK << 16;
69
bsg_job_done(bsg_job, bsg_reply->result,
70
bsg_reply->reply_payload_rcv_len);
71
dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
73
ha->flash_state = QLFLASH_WAITING;
78
qla4xxx_update_flash(struct bsg_job *bsg_job)
80
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
81
struct scsi_qla_host *ha = to_qla_host(host);
82
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
83
struct iscsi_bsg_request *bsg_req = bsg_job->request;
88
uint8_t *flash = NULL;
91
bsg_reply->reply_payload_rcv_len = 0;
93
if (unlikely(pci_channel_offline(ha->pdev)))
96
if (ql4xxx_reset_active(ha)) {
97
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
102
if (ha->flash_state != QLFLASH_WAITING) {
103
ql4_printk(KERN_ERR, ha, "%s: another flash operation "
104
"active\n", __func__);
109
ha->flash_state = QLFLASH_WRITING;
110
length = bsg_job->request_payload.payload_len;
111
offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
112
options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
114
flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
117
ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
123
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
124
bsg_job->request_payload.sg_cnt, flash, length);
126
rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
128
ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
129
bsg_reply->result = DID_ERROR << 16;
132
bsg_reply->result = DID_OK << 16;
134
bsg_job_done(bsg_job, bsg_reply->result,
135
bsg_reply->reply_payload_rcv_len);
136
dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
138
ha->flash_state = QLFLASH_WAITING;
143
qla4xxx_get_acb_state(struct bsg_job *bsg_job)
145
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
146
struct scsi_qla_host *ha = to_qla_host(host);
147
struct iscsi_bsg_request *bsg_req = bsg_job->request;
148
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
149
uint32_t status[MBOX_REG_COUNT];
154
bsg_reply->reply_payload_rcv_len = 0;
156
if (unlikely(pci_channel_offline(ha->pdev)))
159
/* Only 4022 and above adapters are supported */
163
if (ql4xxx_reset_active(ha)) {
164
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
169
if (bsg_job->reply_payload.payload_len < sizeof(status)) {
170
ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
171
__func__, bsg_job->reply_payload.payload_len);
176
acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
177
ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
179
rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
181
ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
183
bsg_reply->result = DID_ERROR << 16;
186
bsg_reply->reply_payload_rcv_len =
187
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
188
bsg_job->reply_payload.sg_cnt,
189
status, sizeof(status));
190
bsg_reply->result = DID_OK << 16;
193
bsg_job_done(bsg_job, bsg_reply->result,
194
bsg_reply->reply_payload_rcv_len);
200
qla4xxx_read_nvram(struct bsg_job *bsg_job)
202
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
203
struct scsi_qla_host *ha = to_qla_host(host);
204
struct iscsi_bsg_request *bsg_req = bsg_job->request;
205
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
208
uint32_t total_len = 0;
209
dma_addr_t nvram_dma;
210
uint8_t *nvram = NULL;
213
bsg_reply->reply_payload_rcv_len = 0;
215
if (unlikely(pci_channel_offline(ha->pdev)))
218
/* Only 40xx adapters are supported */
219
if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
222
if (ql4xxx_reset_active(ha)) {
223
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
228
offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
229
len = bsg_job->reply_payload.payload_len;
230
total_len = offset + len;
232
/* total len should not be greater than max NVRAM size */
233
if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
234
((is_qla4022(ha) || is_qla4032(ha)) &&
235
total_len > QL40X2_NVRAM_SIZE)) {
236
ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
237
" nvram size, offset=%d len=%d\n",
238
__func__, offset, len);
242
nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
245
ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
251
rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
253
ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
254
bsg_reply->result = DID_ERROR << 16;
257
bsg_reply->reply_payload_rcv_len =
258
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
259
bsg_job->reply_payload.sg_cnt,
261
bsg_reply->result = DID_OK << 16;
264
bsg_job_done(bsg_job, bsg_reply->result,
265
bsg_reply->reply_payload_rcv_len);
266
dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
272
qla4xxx_update_nvram(struct bsg_job *bsg_job)
274
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
275
struct scsi_qla_host *ha = to_qla_host(host);
276
struct iscsi_bsg_request *bsg_req = bsg_job->request;
277
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
280
uint32_t total_len = 0;
281
dma_addr_t nvram_dma;
282
uint8_t *nvram = NULL;
285
bsg_reply->reply_payload_rcv_len = 0;
287
if (unlikely(pci_channel_offline(ha->pdev)))
290
if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
293
if (ql4xxx_reset_active(ha)) {
294
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
299
offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
300
len = bsg_job->request_payload.payload_len;
301
total_len = offset + len;
303
/* total len should not be greater than max NVRAM size */
304
if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
305
((is_qla4022(ha) || is_qla4032(ha)) &&
306
total_len > QL40X2_NVRAM_SIZE)) {
307
ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
308
" nvram size, offset=%d len=%d\n",
309
__func__, offset, len);
313
nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
316
ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
322
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
323
bsg_job->request_payload.sg_cnt, nvram, len);
325
rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
327
ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
328
bsg_reply->result = DID_ERROR << 16;
331
bsg_reply->result = DID_OK << 16;
333
bsg_job_done(bsg_job, bsg_reply->result,
334
bsg_reply->reply_payload_rcv_len);
335
dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
341
qla4xxx_restore_defaults(struct bsg_job *bsg_job)
343
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
344
struct scsi_qla_host *ha = to_qla_host(host);
345
struct iscsi_bsg_request *bsg_req = bsg_job->request;
346
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
352
bsg_reply->reply_payload_rcv_len = 0;
354
if (unlikely(pci_channel_offline(ha->pdev)))
360
if (ql4xxx_reset_active(ha)) {
361
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
366
region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
367
field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
368
field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
370
rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
372
ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
373
bsg_reply->result = DID_ERROR << 16;
376
bsg_reply->result = DID_OK << 16;
378
bsg_job_done(bsg_job, bsg_reply->result,
379
bsg_reply->reply_payload_rcv_len);
385
qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
387
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
388
struct scsi_qla_host *ha = to_qla_host(host);
389
struct iscsi_bsg_request *bsg_req = bsg_job->request;
390
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
391
uint32_t acb_type = 0;
397
bsg_reply->reply_payload_rcv_len = 0;
399
if (unlikely(pci_channel_offline(ha->pdev)))
402
/* Only 4022 and above adapters are supported */
406
if (ql4xxx_reset_active(ha)) {
407
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
412
acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
413
len = bsg_job->reply_payload.payload_len;
414
if (len < sizeof(struct addr_ctrl_blk)) {
415
ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
421
acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
423
ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
429
rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
431
ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
432
bsg_reply->result = DID_ERROR << 16;
435
bsg_reply->reply_payload_rcv_len =
436
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
437
bsg_job->reply_payload.sg_cnt,
439
bsg_reply->result = DID_OK << 16;
442
bsg_job_done(bsg_job, bsg_reply->result,
443
bsg_reply->reply_payload_rcv_len);
444
dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
450
* qla4xxx_process_vendor_specific - handle vendor specific bsg request
451
* @job: iscsi_bsg_job to handle
453
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
455
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
456
struct iscsi_bsg_request *bsg_req = bsg_job->request;
457
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
458
struct scsi_qla_host *ha = to_qla_host(host);
460
switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
461
case QLISCSI_VND_READ_FLASH:
462
return qla4xxx_read_flash(bsg_job);
464
case QLISCSI_VND_UPDATE_FLASH:
465
return qla4xxx_update_flash(bsg_job);
467
case QLISCSI_VND_GET_ACB_STATE:
468
return qla4xxx_get_acb_state(bsg_job);
470
case QLISCSI_VND_READ_NVRAM:
471
return qla4xxx_read_nvram(bsg_job);
473
case QLISCSI_VND_UPDATE_NVRAM:
474
return qla4xxx_update_nvram(bsg_job);
476
case QLISCSI_VND_RESTORE_DEFAULTS:
477
return qla4xxx_restore_defaults(bsg_job);
479
case QLISCSI_VND_GET_ACB:
480
return qla4xxx_bsg_get_acb(bsg_job);
483
ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
484
"0x%x\n", __func__, bsg_req->msgcode);
485
bsg_reply->result = (DID_ERROR << 16);
486
bsg_reply->reply_payload_rcv_len = 0;
487
bsg_job_done(bsg_job, bsg_reply->result,
488
bsg_reply->reply_payload_rcv_len);
494
* qla4xxx_bsg_request - handle bsg request from ISCSI transport
495
* @job: iscsi_bsg_job to handle
497
int qla4xxx_bsg_request(struct bsg_job *bsg_job)
499
struct iscsi_bsg_request *bsg_req = bsg_job->request;
500
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
501
struct scsi_qla_host *ha = to_qla_host(host);
503
switch (bsg_req->msgcode) {
504
case ISCSI_BSG_HST_VENDOR:
505
return qla4xxx_process_vendor_specific(bsg_job);
508
ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
509
__func__, bsg_req->msgcode);