1 /* 2 * Handling of internal CCW device requests. 3 * 4 * Copyright IBM Corp. 2009 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/err.h> 10 #include <asm/ccwdev.h> 11 #include <asm/cio.h> 12 13 #include "io_sch.h" 14 #include "cio.h" 15 #include "device.h" 16 #include "cio_debug.h" 17 18 /** 19 * lpm_adjust - adjust path mask 20 * @lpm: path mask to adjust 21 * @mask: mask of available paths 22 * 23 * Shift @lpm right until @lpm and @mask have at least one bit in common or 24 * until @lpm is zero. Return the resulting lpm. 25 */ 26 int lpm_adjust(int lpm, int mask) 27 { 28 while (lpm && ((lpm & mask) == 0)) 29 lpm >>= 1; 30 return lpm; 31 } 32 33 /* 34 * Adjust path mask to use next path and reset retry count. Return resulting 35 * path mask. 36 */ 37 static u16 ccwreq_next_path(struct ccw_device *cdev) 38 { 39 struct ccw_request *req = &cdev->private->req; 40 41 req->retries = req->maxretries; 42 req->mask = lpm_adjust(req->mask >>= 1, req->lpm); 43 44 return req->mask; 45 } 46 47 /* 48 * Clean up device state and report to callback. 49 */ 50 static void ccwreq_stop(struct ccw_device *cdev, int rc) 51 { 52 struct ccw_request *req = &cdev->private->req; 53 54 if (req->done) 55 return; 56 req->done = 1; 57 ccw_device_set_timeout(cdev, 0); 58 memset(&cdev->private->irb, 0, sizeof(struct irb)); 59 if (rc && rc != -ENODEV && req->drc) 60 rc = req->drc; 61 req->callback(cdev, req->data, rc); 62 } 63 64 /* 65 * (Re-)Start the operation until retries and paths are exhausted. 66 */ 67 static void ccwreq_do(struct ccw_device *cdev) 68 { 69 struct ccw_request *req = &cdev->private->req; 70 struct subchannel *sch = to_subchannel(cdev->dev.parent); 71 struct ccw1 *cp = req->cp; 72 int rc = -EACCES; 73 74 while (req->mask) { 75 if (req->retries-- == 0) { 76 /* Retries exhausted, try next path. */ 77 ccwreq_next_path(cdev); 78 continue; 79 } 80 /* Perform start function. */ 81 memset(&cdev->private->irb, 0, sizeof(struct irb)); 82 rc = cio_start(sch, cp, (u8) req->mask); 83 if (rc == 0) { 84 /* I/O started successfully. */ 85 ccw_device_set_timeout(cdev, req->timeout); 86 return; 87 } 88 if (rc == -ENODEV) { 89 /* Permanent device error. */ 90 break; 91 } 92 if (rc == -EACCES) { 93 /* Permant path error. */ 94 ccwreq_next_path(cdev); 95 continue; 96 } 97 /* Temporary improper status. */ 98 rc = cio_clear(sch); 99 if (rc) 100 break; 101 return; 102 } 103 ccwreq_stop(cdev, rc); 104 } 105 106 /** 107 * ccw_request_start - perform I/O request 108 * @cdev: ccw device 109 * 110 * Perform the I/O request specified by cdev->req. 111 */ 112 void ccw_request_start(struct ccw_device *cdev) 113 { 114 struct ccw_request *req = &cdev->private->req; 115 116 /* Try all paths twice to counter link flapping. */ 117 req->mask = 0x8080; 118 req->retries = req->maxretries; 119 req->mask = lpm_adjust(req->mask, req->lpm); 120 req->drc = 0; 121 req->done = 0; 122 req->cancel = 0; 123 if (!req->mask) 124 goto out_nopath; 125 ccwreq_do(cdev); 126 return; 127 128 out_nopath: 129 ccwreq_stop(cdev, -EACCES); 130 } 131 132 /** 133 * ccw_request_cancel - cancel running I/O request 134 * @cdev: ccw device 135 * 136 * Cancel the I/O request specified by cdev->req. Return non-zero if request 137 * has already finished, zero otherwise. 138 */ 139 int ccw_request_cancel(struct ccw_device *cdev) 140 { 141 struct subchannel *sch = to_subchannel(cdev->dev.parent); 142 struct ccw_request *req = &cdev->private->req; 143 int rc; 144 145 if (req->done) 146 return 1; 147 req->cancel = 1; 148 rc = cio_clear(sch); 149 if (rc) 150 ccwreq_stop(cdev, rc); 151 return 0; 152 } 153 154 /* 155 * Return the status of the internal I/O started on the specified ccw device. 156 * Perform BASIC SENSE if required. 157 */ 158 static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) 159 { 160 struct irb *irb = &cdev->private->irb; 161 struct cmd_scsw *scsw = &irb->scsw.cmd; 162 163 /* Perform BASIC SENSE if needed. */ 164 if (ccw_device_accumulate_and_sense(cdev, lcirb)) 165 return IO_RUNNING; 166 /* Check for halt/clear interrupt. */ 167 if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) 168 return IO_KILLED; 169 /* Check for path error. */ 170 if (scsw->cc == 3 || scsw->pno) 171 return IO_PATH_ERROR; 172 /* Handle BASIC SENSE data. */ 173 if (irb->esw.esw0.erw.cons) { 174 CIO_TRACE_EVENT(2, "sensedata"); 175 CIO_HEX_EVENT(2, &cdev->private->dev_id, 176 sizeof(struct ccw_dev_id)); 177 CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT); 178 /* Check for command reject. */ 179 if (irb->ecw[0] & SNS0_CMD_REJECT) 180 return IO_REJECTED; 181 /* Assume that unexpected SENSE data implies an error. */ 182 return IO_STATUS_ERROR; 183 } 184 /* Check for channel errors. */ 185 if (scsw->cstat != 0) 186 return IO_STATUS_ERROR; 187 /* Check for device errors. */ 188 if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 189 return IO_STATUS_ERROR; 190 /* Check for final state. */ 191 if (!(scsw->dstat & DEV_STAT_DEV_END)) 192 return IO_RUNNING; 193 /* Check for other improper status. */ 194 if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS)) 195 return IO_STATUS_ERROR; 196 return IO_DONE; 197 } 198 199 /* 200 * Log ccw request status. 201 */ 202 static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status) 203 { 204 struct ccw_request *req = &cdev->private->req; 205 struct { 206 struct ccw_dev_id dev_id; 207 u16 retries; 208 u8 lpm; 209 u8 status; 210 } __attribute__ ((packed)) data; 211 data.dev_id = cdev->private->dev_id; 212 data.retries = req->retries; 213 data.lpm = (u8) req->mask; 214 data.status = (u8) status; 215 CIO_TRACE_EVENT(2, "reqstat"); 216 CIO_HEX_EVENT(2, &data, sizeof(data)); 217 } 218 219 /** 220 * ccw_request_handler - interrupt handler for I/O request procedure. 221 * @cdev: ccw device 222 * 223 * Handle interrupt during I/O request procedure. 224 */ 225 void ccw_request_handler(struct ccw_device *cdev) 226 { 227 struct ccw_request *req = &cdev->private->req; 228 struct irb *irb = (struct irb *) __LC_IRB; 229 enum io_status status; 230 int rc = -EOPNOTSUPP; 231 232 /* Check status of I/O request. */ 233 status = ccwreq_status(cdev, irb); 234 if (req->filter) 235 status = req->filter(cdev, req->data, irb, status); 236 if (status != IO_RUNNING) 237 ccw_device_set_timeout(cdev, 0); 238 if (status != IO_DONE && status != IO_RUNNING) 239 ccwreq_log_status(cdev, status); 240 switch (status) { 241 case IO_DONE: 242 break; 243 case IO_RUNNING: 244 return; 245 case IO_REJECTED: 246 goto err; 247 case IO_PATH_ERROR: 248 goto out_next_path; 249 case IO_STATUS_ERROR: 250 goto out_restart; 251 case IO_KILLED: 252 /* Check if request was cancelled on purpose. */ 253 if (req->cancel) { 254 rc = -EIO; 255 goto err; 256 } 257 goto out_restart; 258 } 259 /* Check back with request initiator. */ 260 if (!req->check) 261 goto out; 262 switch (req->check(cdev, req->data)) { 263 case 0: 264 break; 265 case -EAGAIN: 266 goto out_restart; 267 case -EACCES: 268 goto out_next_path; 269 default: 270 goto err; 271 } 272 out: 273 ccwreq_stop(cdev, 0); 274 return; 275 276 out_next_path: 277 /* Try next path and restart I/O. */ 278 if (!ccwreq_next_path(cdev)) { 279 rc = -EACCES; 280 goto err; 281 } 282 out_restart: 283 /* Restart. */ 284 ccwreq_do(cdev); 285 return; 286 err: 287 ccwreq_stop(cdev, rc); 288 } 289 290 291 /** 292 * ccw_request_timeout - timeout handler for I/O request procedure 293 * @cdev: ccw device 294 * 295 * Handle timeout during I/O request procedure. 296 */ 297 void ccw_request_timeout(struct ccw_device *cdev) 298 { 299 struct subchannel *sch = to_subchannel(cdev->dev.parent); 300 struct ccw_request *req = &cdev->private->req; 301 int rc; 302 303 if (!ccwreq_next_path(cdev)) { 304 /* set the final return code for this request */ 305 req->drc = -ETIME; 306 } 307 rc = cio_clear(sch); 308 if (rc) 309 goto err; 310 return; 311 312 err: 313 ccwreq_stop(cdev, rc); 314 } 315 316 /** 317 * ccw_request_notoper - notoper handler for I/O request procedure 318 * @cdev: ccw device 319 * 320 * Handle timeout during I/O request procedure. 321 */ 322 void ccw_request_notoper(struct ccw_device *cdev) 323 { 324 ccwreq_stop(cdev, -ENODEV); 325 } 326