2
* Handling of internal CCW device requests.
4
* Copyright IBM Corp. 2009, 2011
5
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8
#define KMSG_COMPONENT "cio"
9
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
#include <linux/types.h>
12
#include <linux/err.h>
13
#include <asm/ccwdev.h>
19
#include "cio_debug.h"
22
* lpm_adjust - adjust path mask
23
* @lpm: path mask to adjust
24
* @mask: mask of available paths
26
* Shift @lpm right until @lpm and @mask have at least one bit in common or
27
* until @lpm is zero. Return the resulting lpm.
29
int lpm_adjust(int lpm, int mask)
31
while (lpm && ((lpm & mask) == 0))
37
* Adjust path mask to use next path and reset retry count. Return resulting
40
static u16 ccwreq_next_path(struct ccw_device *cdev)
42
struct ccw_request *req = &cdev->private->req;
44
if (!req->singlepath) {
48
req->retries = req->maxretries;
49
req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
55
* Clean up device state and report to callback.
57
static void ccwreq_stop(struct ccw_device *cdev, int rc)
59
struct ccw_request *req = &cdev->private->req;
64
ccw_device_set_timeout(cdev, 0);
65
memset(&cdev->private->irb, 0, sizeof(struct irb));
66
if (rc && rc != -ENODEV && req->drc)
68
req->callback(cdev, req->data, rc);
72
* (Re-)Start the operation until retries and paths are exhausted.
74
static void ccwreq_do(struct ccw_device *cdev)
76
struct ccw_request *req = &cdev->private->req;
77
struct subchannel *sch = to_subchannel(cdev->dev.parent);
78
struct ccw1 *cp = req->cp;
82
if (req->retries-- == 0) {
83
/* Retries exhausted, try next path. */
84
ccwreq_next_path(cdev);
87
/* Perform start function. */
88
memset(&cdev->private->irb, 0, sizeof(struct irb));
89
rc = cio_start(sch, cp, (u8) req->mask);
91
/* I/O started successfully. */
92
ccw_device_set_timeout(cdev, req->timeout);
96
/* Permanent device error. */
100
/* Permant path error. */
101
ccwreq_next_path(cdev);
104
/* Temporary improper status. */
110
ccwreq_stop(cdev, rc);
114
* ccw_request_start - perform I/O request
117
* Perform the I/O request specified by cdev->req.
119
void ccw_request_start(struct ccw_device *cdev)
121
struct ccw_request *req = &cdev->private->req;
123
if (req->singlepath) {
124
/* Try all paths twice to counter link flapping. */
127
req->mask = req->lpm;
129
req->retries = req->maxretries;
130
req->mask = lpm_adjust(req->mask, req->lpm);
140
ccwreq_stop(cdev, -EACCES);
144
* ccw_request_cancel - cancel running I/O request
147
* Cancel the I/O request specified by cdev->req. Return non-zero if request
148
* has already finished, zero otherwise.
150
int ccw_request_cancel(struct ccw_device *cdev)
152
struct subchannel *sch = to_subchannel(cdev->dev.parent);
153
struct ccw_request *req = &cdev->private->req;
161
ccwreq_stop(cdev, rc);
166
* Return the status of the internal I/O started on the specified ccw device.
167
* Perform BASIC SENSE if required.
169
static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
171
struct irb *irb = &cdev->private->irb;
172
struct cmd_scsw *scsw = &irb->scsw.cmd;
175
/* Perform BASIC SENSE if needed. */
176
if (ccw_device_accumulate_and_sense(cdev, lcirb))
178
/* Check for halt/clear interrupt. */
179
if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
181
/* Check for path error. */
182
if (scsw->cc == 3 || scsw->pno)
183
return IO_PATH_ERROR;
184
/* Handle BASIC SENSE data. */
185
if (irb->esw.esw0.erw.cons) {
186
CIO_TRACE_EVENT(2, "sensedata");
187
CIO_HEX_EVENT(2, &cdev->private->dev_id,
188
sizeof(struct ccw_dev_id));
189
CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
190
/* Check for command reject. */
191
if (irb->ecw[0] & SNS0_CMD_REJECT)
193
/* Ask the driver what to do */
194
if (cdev->drv && cdev->drv->uc_handler) {
195
todo = cdev->drv->uc_handler(cdev, lcirb);
196
CIO_TRACE_EVENT(2, "uc_response");
197
CIO_HEX_EVENT(2, &todo, sizeof(todo));
200
return IO_STATUS_ERROR;
201
case UC_TODO_RETRY_ON_NEW_PATH:
202
return IO_PATH_ERROR;
206
return IO_STATUS_ERROR;
209
/* Assume that unexpected SENSE data implies an error. */
210
return IO_STATUS_ERROR;
212
/* Check for channel errors. */
213
if (scsw->cstat != 0)
214
return IO_STATUS_ERROR;
215
/* Check for device errors. */
216
if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
217
return IO_STATUS_ERROR;
218
/* Check for final state. */
219
if (!(scsw->dstat & DEV_STAT_DEV_END))
221
/* Check for other improper status. */
222
if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
223
return IO_STATUS_ERROR;
228
* Log ccw request status.
230
static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
232
struct ccw_request *req = &cdev->private->req;
234
struct ccw_dev_id dev_id;
238
} __attribute__ ((packed)) data;
239
data.dev_id = cdev->private->dev_id;
240
data.retries = req->retries;
241
data.lpm = (u8) req->mask;
242
data.status = (u8) status;
243
CIO_TRACE_EVENT(2, "reqstat");
244
CIO_HEX_EVENT(2, &data, sizeof(data));
248
* ccw_request_handler - interrupt handler for I/O request procedure.
251
* Handle interrupt during I/O request procedure.
253
void ccw_request_handler(struct ccw_device *cdev)
255
struct irb *irb = (struct irb *)&S390_lowcore.irb;
256
struct ccw_request *req = &cdev->private->req;
257
enum io_status status;
258
int rc = -EOPNOTSUPP;
260
/* Check status of I/O request. */
261
status = ccwreq_status(cdev, irb);
263
status = req->filter(cdev, req->data, irb, status);
264
if (status != IO_RUNNING)
265
ccw_device_set_timeout(cdev, 0);
266
if (status != IO_DONE && status != IO_RUNNING)
267
ccwreq_log_status(cdev, status);
277
case IO_STATUS_ERROR:
280
/* Check if request was cancelled on purpose. */
287
/* Check back with request initiator. */
290
switch (req->check(cdev, req->data)) {
301
ccwreq_stop(cdev, 0);
305
/* Try next path and restart I/O. */
306
if (!ccwreq_next_path(cdev)) {
315
ccwreq_stop(cdev, rc);
320
* ccw_request_timeout - timeout handler for I/O request procedure
323
* Handle timeout during I/O request procedure.
325
void ccw_request_timeout(struct ccw_device *cdev)
327
struct subchannel *sch = to_subchannel(cdev->dev.parent);
328
struct ccw_request *req = &cdev->private->req;
329
int rc = -ENODEV, chp;
331
if (cio_update_schib(sch))
334
for (chp = 0; chp < 8; chp++) {
335
if ((0x80 >> chp) & sch->schib.pmcw.lpum)
336
pr_warning("%s: No interrupt was received within %lus "
337
"(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
338
dev_name(&cdev->dev), req->timeout / HZ,
339
scsw_cstat(&sch->schib.scsw),
340
scsw_dstat(&sch->schib.scsw),
342
sch->schib.pmcw.chpid[chp]);
345
if (!ccwreq_next_path(cdev)) {
346
/* set the final return code for this request */
355
ccwreq_stop(cdev, rc);
359
* ccw_request_notoper - notoper handler for I/O request procedure
362
* Handle notoper during I/O request procedure.
364
void ccw_request_notoper(struct ccw_device *cdev)
366
ccwreq_stop(cdev, -ENODEV);