1 /* 2 * Handling of internal CCW device requests. 3 * 4 * Copyright IBM Corp. 2009 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/err.h> 10 #include <asm/ccwdev.h> 11 #include <asm/cio.h> 12 13 #include "io_sch.h" 14 #include "cio.h" 15 #include "device.h" 16 #include "cio_debug.h" 17 18 /** 19 * lpm_adjust - adjust path mask 20 * @lpm: path mask to adjust 21 * @mask: mask of available paths 22 * 23 * Shift @lpm right until @lpm and @mask have at least one bit in common or 24 * until @lpm is zero. Return the resulting lpm. 25 */ 26 int lpm_adjust(int lpm, int mask) 27 { 28 while (lpm && ((lpm & mask) == 0)) 29 lpm >>= 1; 30 return lpm; 31 } 32 33 /* 34 * Adjust path mask to use next path and reset retry count. Return resulting 35 * path mask. 36 */ 37 static u16 ccwreq_next_path(struct ccw_device *cdev) 38 { 39 struct ccw_request *req = &cdev->private->req; 40 41 if (!req->singlepath) { 42 req->mask = 0; 43 goto out; 44 } 45 req->retries = req->maxretries; 46 req->mask = lpm_adjust(req->mask >>= 1, req->lpm); 47 out: 48 return req->mask; 49 } 50 51 /* 52 * Clean up device state and report to callback. 53 */ 54 static void ccwreq_stop(struct ccw_device *cdev, int rc) 55 { 56 struct ccw_request *req = &cdev->private->req; 57 58 if (req->done) 59 return; 60 req->done = 1; 61 ccw_device_set_timeout(cdev, 0); 62 memset(&cdev->private->irb, 0, sizeof(struct irb)); 63 if (rc && rc != -ENODEV && req->drc) 64 rc = req->drc; 65 req->callback(cdev, req->data, rc); 66 } 67 68 /* 69 * (Re-)Start the operation until retries and paths are exhausted. 70 */ 71 static void ccwreq_do(struct ccw_device *cdev) 72 { 73 struct ccw_request *req = &cdev->private->req; 74 struct subchannel *sch = to_subchannel(cdev->dev.parent); 75 struct ccw1 *cp = req->cp; 76 int rc = -EACCES; 77 78 while (req->mask) { 79 if (req->retries-- == 0) { 80 /* Retries exhausted, try next path. */ 81 ccwreq_next_path(cdev); 82 continue; 83 } 84 /* Perform start function. */ 85 memset(&cdev->private->irb, 0, sizeof(struct irb)); 86 rc = cio_start(sch, cp, (u8) req->mask); 87 if (rc == 0) { 88 /* I/O started successfully. */ 89 ccw_device_set_timeout(cdev, req->timeout); 90 return; 91 } 92 if (rc == -ENODEV) { 93 /* Permanent device error. */ 94 break; 95 } 96 if (rc == -EACCES) { 97 /* Permant path error. */ 98 ccwreq_next_path(cdev); 99 continue; 100 } 101 /* Temporary improper status. */ 102 rc = cio_clear(sch); 103 if (rc) 104 break; 105 return; 106 } 107 ccwreq_stop(cdev, rc); 108 } 109 110 /** 111 * ccw_request_start - perform I/O request 112 * @cdev: ccw device 113 * 114 * Perform the I/O request specified by cdev->req. 115 */ 116 void ccw_request_start(struct ccw_device *cdev) 117 { 118 struct ccw_request *req = &cdev->private->req; 119 120 if (req->singlepath) { 121 /* Try all paths twice to counter link flapping. */ 122 req->mask = 0x8080; 123 } else 124 req->mask = req->lpm; 125 126 req->retries = req->maxretries; 127 req->mask = lpm_adjust(req->mask, req->lpm); 128 req->drc = 0; 129 req->done = 0; 130 req->cancel = 0; 131 if (!req->mask) 132 goto out_nopath; 133 ccwreq_do(cdev); 134 return; 135 136 out_nopath: 137 ccwreq_stop(cdev, -EACCES); 138 } 139 140 /** 141 * ccw_request_cancel - cancel running I/O request 142 * @cdev: ccw device 143 * 144 * Cancel the I/O request specified by cdev->req. Return non-zero if request 145 * has already finished, zero otherwise. 146 */ 147 int ccw_request_cancel(struct ccw_device *cdev) 148 { 149 struct subchannel *sch = to_subchannel(cdev->dev.parent); 150 struct ccw_request *req = &cdev->private->req; 151 int rc; 152 153 if (req->done) 154 return 1; 155 req->cancel = 1; 156 rc = cio_clear(sch); 157 if (rc) 158 ccwreq_stop(cdev, rc); 159 return 0; 160 } 161 162 /* 163 * Return the status of the internal I/O started on the specified ccw device. 164 * Perform BASIC SENSE if required. 165 */ 166 static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) 167 { 168 struct irb *irb = &cdev->private->irb; 169 struct cmd_scsw *scsw = &irb->scsw.cmd; 170 enum uc_todo todo; 171 172 /* Perform BASIC SENSE if needed. */ 173 if (ccw_device_accumulate_and_sense(cdev, lcirb)) 174 return IO_RUNNING; 175 /* Check for halt/clear interrupt. */ 176 if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) 177 return IO_KILLED; 178 /* Check for path error. */ 179 if (scsw->cc == 3 || scsw->pno) 180 return IO_PATH_ERROR; 181 /* Handle BASIC SENSE data. */ 182 if (irb->esw.esw0.erw.cons) { 183 CIO_TRACE_EVENT(2, "sensedata"); 184 CIO_HEX_EVENT(2, &cdev->private->dev_id, 185 sizeof(struct ccw_dev_id)); 186 CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT); 187 /* Check for command reject. */ 188 if (irb->ecw[0] & SNS0_CMD_REJECT) 189 return IO_REJECTED; 190 /* Ask the driver what to do */ 191 if (cdev->drv && cdev->drv->uc_handler) { 192 todo = cdev->drv->uc_handler(cdev, lcirb); 193 CIO_TRACE_EVENT(2, "uc_response"); 194 CIO_HEX_EVENT(2, &todo, sizeof(todo)); 195 switch (todo) { 196 case UC_TODO_RETRY: 197 return IO_STATUS_ERROR; 198 case UC_TODO_RETRY_ON_NEW_PATH: 199 return IO_PATH_ERROR; 200 case UC_TODO_STOP: 201 return IO_REJECTED; 202 default: 203 return IO_STATUS_ERROR; 204 } 205 } 206 /* Assume that unexpected SENSE data implies an error. */ 207 return IO_STATUS_ERROR; 208 } 209 /* Check for channel errors. */ 210 if (scsw->cstat != 0) 211 return IO_STATUS_ERROR; 212 /* Check for device errors. */ 213 if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 214 return IO_STATUS_ERROR; 215 /* Check for final state. */ 216 if (!(scsw->dstat & DEV_STAT_DEV_END)) 217 return IO_RUNNING; 218 /* Check for other improper status. */ 219 if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS)) 220 return IO_STATUS_ERROR; 221 return IO_DONE; 222 } 223 224 /* 225 * Log ccw request status. 226 */ 227 static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status) 228 { 229 struct ccw_request *req = &cdev->private->req; 230 struct { 231 struct ccw_dev_id dev_id; 232 u16 retries; 233 u8 lpm; 234 u8 status; 235 } __attribute__ ((packed)) data; 236 data.dev_id = cdev->private->dev_id; 237 data.retries = req->retries; 238 data.lpm = (u8) req->mask; 239 data.status = (u8) status; 240 CIO_TRACE_EVENT(2, "reqstat"); 241 CIO_HEX_EVENT(2, &data, sizeof(data)); 242 } 243 244 /** 245 * ccw_request_handler - interrupt handler for I/O request procedure. 246 * @cdev: ccw device 247 * 248 * Handle interrupt during I/O request procedure. 249 */ 250 void ccw_request_handler(struct ccw_device *cdev) 251 { 252 struct irb *irb = (struct irb *)&S390_lowcore.irb; 253 struct ccw_request *req = &cdev->private->req; 254 enum io_status status; 255 int rc = -EOPNOTSUPP; 256 257 /* Check status of I/O request. */ 258 status = ccwreq_status(cdev, irb); 259 if (req->filter) 260 status = req->filter(cdev, req->data, irb, status); 261 if (status != IO_RUNNING) 262 ccw_device_set_timeout(cdev, 0); 263 if (status != IO_DONE && status != IO_RUNNING) 264 ccwreq_log_status(cdev, status); 265 switch (status) { 266 case IO_DONE: 267 break; 268 case IO_RUNNING: 269 return; 270 case IO_REJECTED: 271 goto err; 272 case IO_PATH_ERROR: 273 goto out_next_path; 274 case IO_STATUS_ERROR: 275 goto out_restart; 276 case IO_KILLED: 277 /* Check if request was cancelled on purpose. */ 278 if (req->cancel) { 279 rc = -EIO; 280 goto err; 281 } 282 goto out_restart; 283 } 284 /* Check back with request initiator. */ 285 if (!req->check) 286 goto out; 287 switch (req->check(cdev, req->data)) { 288 case 0: 289 break; 290 case -EAGAIN: 291 goto out_restart; 292 case -EACCES: 293 goto out_next_path; 294 default: 295 goto err; 296 } 297 out: 298 ccwreq_stop(cdev, 0); 299 return; 300 301 out_next_path: 302 /* Try next path and restart I/O. */ 303 if (!ccwreq_next_path(cdev)) { 304 rc = -EACCES; 305 goto err; 306 } 307 out_restart: 308 /* Restart. */ 309 ccwreq_do(cdev); 310 return; 311 err: 312 ccwreq_stop(cdev, rc); 313 } 314 315 316 /** 317 * ccw_request_timeout - timeout handler for I/O request procedure 318 * @cdev: ccw device 319 * 320 * Handle timeout during I/O request procedure. 321 */ 322 void ccw_request_timeout(struct ccw_device *cdev) 323 { 324 struct subchannel *sch = to_subchannel(cdev->dev.parent); 325 struct ccw_request *req = &cdev->private->req; 326 int rc; 327 328 if (!ccwreq_next_path(cdev)) { 329 /* set the final return code for this request */ 330 req->drc = -ETIME; 331 } 332 rc = cio_clear(sch); 333 if (rc) 334 goto err; 335 return; 336 337 err: 338 ccwreq_stop(cdev, rc); 339 } 340 341 /** 342 * ccw_request_notoper - notoper handler for I/O request procedure 343 * @cdev: ccw device 344 * 345 * Handle timeout during I/O request procedure. 346 */ 347 void ccw_request_notoper(struct ccw_device *cdev) 348 { 349 ccwreq_stop(cdev, -ENODEV); 350 } 351