1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * CCW device PGID and path verification I/O handling. 4 * 5 * Copyright IBM Corp. 2002, 2009 6 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include <linux/bitops.h> 14 #include <linux/types.h> 15 #include <linux/errno.h> 16 #include <linux/slab.h> 17 #include <asm/ccwdev.h> 18 #include <asm/cio.h> 19 20 #include "cio.h" 21 #include "cio_debug.h" 22 #include "device.h" 23 #include "io_sch.h" 24 25 #define PGID_RETRIES 256 26 #define PGID_TIMEOUT (10 * HZ) 27 28 static void verify_start(struct ccw_device *cdev); 29 30 /* 31 * Process path verification data and report result. 32 */ 33 static void verify_done(struct ccw_device *cdev, int rc) 34 { 35 struct subchannel *sch = to_subchannel(cdev->dev.parent); 36 struct ccw_dev_id *id = &cdev->private->dev_id; 37 int mpath = cdev->private->flags.mpath; 38 int pgroup = cdev->private->flags.pgroup; 39 40 if (rc) 41 goto out; 42 /* Ensure consistent multipathing state at device and channel. */ 43 if (sch->config.mp != mpath) { 44 sch->config.mp = mpath; 45 rc = cio_commit_config(sch); 46 } 47 out: 48 CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d " 49 "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath, 50 sch->vpm); 51 ccw_device_verify_done(cdev, rc); 52 } 53 54 /* 55 * Create channel program to perform a NOOP. 56 */ 57 static void nop_build_cp(struct ccw_device *cdev) 58 { 59 struct ccw_request *req = &cdev->private->req; 60 struct ccw1 *cp = cdev->private->iccws; 61 62 cp->cmd_code = CCW_CMD_NOOP; 63 cp->cda = 0; 64 cp->count = 0; 65 cp->flags = CCW_FLAG_SLI; 66 req->cp = cp; 67 } 68 69 /* 70 * Perform NOOP on a single path. 71 */ 72 static void nop_do(struct ccw_device *cdev) 73 { 74 struct subchannel *sch = to_subchannel(cdev->dev.parent); 75 struct ccw_request *req = &cdev->private->req; 76 77 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & 78 ~cdev->private->path_noirq_mask); 79 if (!req->lpm) 80 goto out_nopath; 81 nop_build_cp(cdev); 82 ccw_request_start(cdev); 83 return; 84 85 out_nopath: 86 verify_done(cdev, sch->vpm ? 0 : -EACCES); 87 } 88 89 /* 90 * Adjust NOOP I/O status. 91 */ 92 static enum io_status nop_filter(struct ccw_device *cdev, void *data, 93 struct irb *irb, enum io_status status) 94 { 95 /* Only subchannel status might indicate a path error. */ 96 if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0) 97 return IO_DONE; 98 return status; 99 } 100 101 /* 102 * Process NOOP request result for a single path. 103 */ 104 static void nop_callback(struct ccw_device *cdev, void *data, int rc) 105 { 106 struct subchannel *sch = to_subchannel(cdev->dev.parent); 107 struct ccw_request *req = &cdev->private->req; 108 109 switch (rc) { 110 case 0: 111 sch->vpm |= req->lpm; 112 break; 113 case -ETIME: 114 cdev->private->path_noirq_mask |= req->lpm; 115 break; 116 case -EACCES: 117 cdev->private->path_notoper_mask |= req->lpm; 118 break; 119 default: 120 goto err; 121 } 122 /* Continue on the next path. */ 123 req->lpm >>= 1; 124 nop_do(cdev); 125 return; 126 127 err: 128 verify_done(cdev, rc); 129 } 130 131 /* 132 * Create channel program to perform SET PGID on a single path. 133 */ 134 static void spid_build_cp(struct ccw_device *cdev, u8 fn) 135 { 136 struct ccw_request *req = &cdev->private->req; 137 struct ccw1 *cp = cdev->private->iccws; 138 int i = pathmask_to_pos(req->lpm); 139 struct pgid *pgid = &cdev->private->pgid[i]; 140 141 pgid->inf.fc = fn; 142 cp->cmd_code = CCW_CMD_SET_PGID; 143 cp->cda = (u32) (addr_t) pgid; 144 cp->count = sizeof(*pgid); 145 cp->flags = CCW_FLAG_SLI; 146 req->cp = cp; 147 } 148 149 static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc) 150 { 151 if (rc) { 152 /* We don't know the path groups' state. Abort. */ 153 verify_done(cdev, rc); 154 return; 155 } 156 /* 157 * Path groups have been reset. Restart path verification but 158 * leave paths in path_noirq_mask out. 159 */ 160 cdev->private->flags.pgid_unknown = 0; 161 verify_start(cdev); 162 } 163 164 /* 165 * Reset pathgroups and restart path verification, leave unusable paths out. 166 */ 167 static void pgid_wipeout_start(struct ccw_device *cdev) 168 { 169 struct subchannel *sch = to_subchannel(cdev->dev.parent); 170 struct ccw_dev_id *id = &cdev->private->dev_id; 171 struct ccw_request *req = &cdev->private->req; 172 u8 fn; 173 174 CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n", 175 id->ssid, id->devno, cdev->private->pgid_valid_mask, 176 cdev->private->path_noirq_mask); 177 178 /* Initialize request data. */ 179 memset(req, 0, sizeof(*req)); 180 req->timeout = PGID_TIMEOUT; 181 req->maxretries = PGID_RETRIES; 182 req->lpm = sch->schib.pmcw.pam; 183 req->callback = pgid_wipeout_callback; 184 fn = SPID_FUNC_DISBAND; 185 if (cdev->private->flags.mpath) 186 fn |= SPID_FUNC_MULTI_PATH; 187 spid_build_cp(cdev, fn); 188 ccw_request_start(cdev); 189 } 190 191 /* 192 * Perform establish/resign SET PGID on a single path. 193 */ 194 static void spid_do(struct ccw_device *cdev) 195 { 196 struct subchannel *sch = to_subchannel(cdev->dev.parent); 197 struct ccw_request *req = &cdev->private->req; 198 u8 fn; 199 200 /* Use next available path that is not already in correct state. */ 201 req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask); 202 if (!req->lpm) 203 goto out_nopath; 204 /* Channel program setup. */ 205 if (req->lpm & sch->opm) 206 fn = SPID_FUNC_ESTABLISH; 207 else 208 fn = SPID_FUNC_RESIGN; 209 if (cdev->private->flags.mpath) 210 fn |= SPID_FUNC_MULTI_PATH; 211 spid_build_cp(cdev, fn); 212 ccw_request_start(cdev); 213 return; 214 215 out_nopath: 216 if (cdev->private->flags.pgid_unknown) { 217 /* At least one SPID could be partially done. */ 218 pgid_wipeout_start(cdev); 219 return; 220 } 221 verify_done(cdev, sch->vpm ? 0 : -EACCES); 222 } 223 224 /* 225 * Process SET PGID request result for a single path. 226 */ 227 static void spid_callback(struct ccw_device *cdev, void *data, int rc) 228 { 229 struct subchannel *sch = to_subchannel(cdev->dev.parent); 230 struct ccw_request *req = &cdev->private->req; 231 232 switch (rc) { 233 case 0: 234 sch->vpm |= req->lpm & sch->opm; 235 break; 236 case -ETIME: 237 cdev->private->flags.pgid_unknown = 1; 238 cdev->private->path_noirq_mask |= req->lpm; 239 break; 240 case -EACCES: 241 cdev->private->path_notoper_mask |= req->lpm; 242 break; 243 case -EOPNOTSUPP: 244 if (cdev->private->flags.mpath) { 245 /* Try without multipathing. */ 246 cdev->private->flags.mpath = 0; 247 goto out_restart; 248 } 249 /* Try without pathgrouping. */ 250 cdev->private->flags.pgroup = 0; 251 goto out_restart; 252 default: 253 goto err; 254 } 255 req->lpm >>= 1; 256 spid_do(cdev); 257 return; 258 259 out_restart: 260 verify_start(cdev); 261 return; 262 err: 263 verify_done(cdev, rc); 264 } 265 266 static void spid_start(struct ccw_device *cdev) 267 { 268 struct ccw_request *req = &cdev->private->req; 269 270 /* Initialize request data. */ 271 memset(req, 0, sizeof(*req)); 272 req->timeout = PGID_TIMEOUT; 273 req->maxretries = PGID_RETRIES; 274 req->lpm = 0x80; 275 req->singlepath = 1; 276 req->callback = spid_callback; 277 spid_do(cdev); 278 } 279 280 static int pgid_is_reset(struct pgid *p) 281 { 282 char *c; 283 284 for (c = (char *)p + 1; c < (char *)(p + 1); c++) { 285 if (*c != 0) 286 return 0; 287 } 288 return 1; 289 } 290 291 static int pgid_cmp(struct pgid *p1, struct pgid *p2) 292 { 293 return memcmp((char *) p1 + 1, (char *) p2 + 1, 294 sizeof(struct pgid) - 1); 295 } 296 297 /* 298 * Determine pathgroup state from PGID data. 299 */ 300 static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, 301 int *mismatch, u8 *reserved, u8 *reset) 302 { 303 struct pgid *pgid = &cdev->private->pgid[0]; 304 struct pgid *first = NULL; 305 int lpm; 306 int i; 307 308 *mismatch = 0; 309 *reserved = 0; 310 *reset = 0; 311 for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) { 312 if ((cdev->private->pgid_valid_mask & lpm) == 0) 313 continue; 314 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) 315 *reserved |= lpm; 316 if (pgid_is_reset(pgid)) { 317 *reset |= lpm; 318 continue; 319 } 320 if (!first) { 321 first = pgid; 322 continue; 323 } 324 if (pgid_cmp(pgid, first) != 0) 325 *mismatch = 1; 326 } 327 if (!first) 328 first = &channel_subsystems[0]->global_pgid; 329 *p = first; 330 } 331 332 static u8 pgid_to_donepm(struct ccw_device *cdev) 333 { 334 struct subchannel *sch = to_subchannel(cdev->dev.parent); 335 struct pgid *pgid; 336 int i; 337 int lpm; 338 u8 donepm = 0; 339 340 /* Set bits for paths which are already in the target state. */ 341 for (i = 0; i < 8; i++) { 342 lpm = 0x80 >> i; 343 if ((cdev->private->pgid_valid_mask & lpm) == 0) 344 continue; 345 pgid = &cdev->private->pgid[i]; 346 if (sch->opm & lpm) { 347 if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED) 348 continue; 349 } else { 350 if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED) 351 continue; 352 } 353 if (cdev->private->flags.mpath) { 354 if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH) 355 continue; 356 } else { 357 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH) 358 continue; 359 } 360 donepm |= lpm; 361 } 362 363 return donepm; 364 } 365 366 static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid) 367 { 368 int i; 369 370 for (i = 0; i < 8; i++) 371 memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid)); 372 } 373 374 /* 375 * Process SENSE PGID data and report result. 376 */ 377 static void snid_done(struct ccw_device *cdev, int rc) 378 { 379 struct ccw_dev_id *id = &cdev->private->dev_id; 380 struct subchannel *sch = to_subchannel(cdev->dev.parent); 381 struct pgid *pgid; 382 int mismatch = 0; 383 u8 reserved = 0; 384 u8 reset = 0; 385 u8 donepm; 386 387 if (rc) 388 goto out; 389 pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset); 390 if (reserved == cdev->private->pgid_valid_mask) 391 rc = -EUSERS; 392 else if (mismatch) 393 rc = -EOPNOTSUPP; 394 else { 395 donepm = pgid_to_donepm(cdev); 396 sch->vpm = donepm & sch->opm; 397 cdev->private->pgid_reset_mask |= reset; 398 cdev->private->pgid_todo_mask &= 399 ~(donepm | cdev->private->path_noirq_mask); 400 pgid_fill(cdev, pgid); 401 } 402 out: 403 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " 404 "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid, 405 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, 406 cdev->private->pgid_todo_mask, mismatch, reserved, reset); 407 switch (rc) { 408 case 0: 409 if (cdev->private->flags.pgid_unknown) { 410 pgid_wipeout_start(cdev); 411 return; 412 } 413 /* Anything left to do? */ 414 if (cdev->private->pgid_todo_mask == 0) { 415 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); 416 return; 417 } 418 /* Perform path-grouping. */ 419 spid_start(cdev); 420 break; 421 case -EOPNOTSUPP: 422 /* Path-grouping not supported. */ 423 cdev->private->flags.pgroup = 0; 424 cdev->private->flags.mpath = 0; 425 verify_start(cdev); 426 break; 427 default: 428 verify_done(cdev, rc); 429 } 430 } 431 432 /* 433 * Create channel program to perform a SENSE PGID on a single path. 434 */ 435 static void snid_build_cp(struct ccw_device *cdev) 436 { 437 struct ccw_request *req = &cdev->private->req; 438 struct ccw1 *cp = cdev->private->iccws; 439 int i = pathmask_to_pos(req->lpm); 440 441 /* Channel program setup. */ 442 cp->cmd_code = CCW_CMD_SENSE_PGID; 443 cp->cda = (u32) (addr_t) &cdev->private->pgid[i]; 444 cp->count = sizeof(struct pgid); 445 cp->flags = CCW_FLAG_SLI; 446 req->cp = cp; 447 } 448 449 /* 450 * Perform SENSE PGID on a single path. 451 */ 452 static void snid_do(struct ccw_device *cdev) 453 { 454 struct subchannel *sch = to_subchannel(cdev->dev.parent); 455 struct ccw_request *req = &cdev->private->req; 456 int ret; 457 458 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & 459 ~cdev->private->path_noirq_mask); 460 if (!req->lpm) 461 goto out_nopath; 462 snid_build_cp(cdev); 463 ccw_request_start(cdev); 464 return; 465 466 out_nopath: 467 if (cdev->private->pgid_valid_mask) 468 ret = 0; 469 else if (cdev->private->path_noirq_mask) 470 ret = -ETIME; 471 else 472 ret = -EACCES; 473 snid_done(cdev, ret); 474 } 475 476 /* 477 * Process SENSE PGID request result for single path. 478 */ 479 static void snid_callback(struct ccw_device *cdev, void *data, int rc) 480 { 481 struct ccw_request *req = &cdev->private->req; 482 483 switch (rc) { 484 case 0: 485 cdev->private->pgid_valid_mask |= req->lpm; 486 break; 487 case -ETIME: 488 cdev->private->flags.pgid_unknown = 1; 489 cdev->private->path_noirq_mask |= req->lpm; 490 break; 491 case -EACCES: 492 cdev->private->path_notoper_mask |= req->lpm; 493 break; 494 default: 495 goto err; 496 } 497 /* Continue on the next path. */ 498 req->lpm >>= 1; 499 snid_do(cdev); 500 return; 501 502 err: 503 snid_done(cdev, rc); 504 } 505 506 /* 507 * Perform path verification. 508 */ 509 static void verify_start(struct ccw_device *cdev) 510 { 511 struct subchannel *sch = to_subchannel(cdev->dev.parent); 512 struct ccw_request *req = &cdev->private->req; 513 struct ccw_dev_id *devid = &cdev->private->dev_id; 514 515 sch->vpm = 0; 516 sch->lpm = sch->schib.pmcw.pam; 517 518 /* Initialize PGID data. */ 519 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); 520 cdev->private->pgid_valid_mask = 0; 521 cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; 522 cdev->private->path_notoper_mask = 0; 523 524 /* Initialize request data. */ 525 memset(req, 0, sizeof(*req)); 526 req->timeout = PGID_TIMEOUT; 527 req->maxretries = PGID_RETRIES; 528 req->lpm = 0x80; 529 req->singlepath = 1; 530 if (cdev->private->flags.pgroup) { 531 CIO_TRACE_EVENT(4, "snid"); 532 CIO_HEX_EVENT(4, devid, sizeof(*devid)); 533 req->callback = snid_callback; 534 snid_do(cdev); 535 } else { 536 CIO_TRACE_EVENT(4, "nop"); 537 CIO_HEX_EVENT(4, devid, sizeof(*devid)); 538 req->filter = nop_filter; 539 req->callback = nop_callback; 540 nop_do(cdev); 541 } 542 } 543 544 /** 545 * ccw_device_verify_start - perform path verification 546 * @cdev: ccw device 547 * 548 * Perform an I/O on each available channel path to @cdev to determine which 549 * paths are operational. The resulting path mask is stored in sch->vpm. 550 * If device options specify pathgrouping, establish a pathgroup for the 551 * operational paths. When finished, call ccw_device_verify_done with a 552 * return code specifying the result. 553 */ 554 void ccw_device_verify_start(struct ccw_device *cdev) 555 { 556 CIO_TRACE_EVENT(4, "vrfy"); 557 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); 558 /* 559 * Initialize pathgroup and multipath state with target values. 560 * They may change in the course of path verification. 561 */ 562 cdev->private->flags.pgroup = cdev->private->options.pgroup; 563 cdev->private->flags.mpath = cdev->private->options.mpath; 564 cdev->private->flags.doverify = 0; 565 cdev->private->path_noirq_mask = 0; 566 verify_start(cdev); 567 } 568 569 /* 570 * Process disband SET PGID request result. 571 */ 572 static void disband_callback(struct ccw_device *cdev, void *data, int rc) 573 { 574 struct subchannel *sch = to_subchannel(cdev->dev.parent); 575 struct ccw_dev_id *id = &cdev->private->dev_id; 576 577 if (rc) 578 goto out; 579 /* Ensure consistent multipathing state at device and channel. */ 580 cdev->private->flags.mpath = 0; 581 if (sch->config.mp) { 582 sch->config.mp = 0; 583 rc = cio_commit_config(sch); 584 } 585 out: 586 CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno, 587 rc); 588 ccw_device_disband_done(cdev, rc); 589 } 590 591 /** 592 * ccw_device_disband_start - disband pathgroup 593 * @cdev: ccw device 594 * 595 * Execute a SET PGID channel program on @cdev to disband a previously 596 * established pathgroup. When finished, call ccw_device_disband_done with 597 * a return code specifying the result. 598 */ 599 void ccw_device_disband_start(struct ccw_device *cdev) 600 { 601 struct subchannel *sch = to_subchannel(cdev->dev.parent); 602 struct ccw_request *req = &cdev->private->req; 603 u8 fn; 604 605 CIO_TRACE_EVENT(4, "disb"); 606 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); 607 /* Request setup. */ 608 memset(req, 0, sizeof(*req)); 609 req->timeout = PGID_TIMEOUT; 610 req->maxretries = PGID_RETRIES; 611 req->lpm = sch->schib.pmcw.pam & sch->opm; 612 req->singlepath = 1; 613 req->callback = disband_callback; 614 fn = SPID_FUNC_DISBAND; 615 if (cdev->private->flags.mpath) 616 fn |= SPID_FUNC_MULTI_PATH; 617 spid_build_cp(cdev, fn); 618 ccw_request_start(cdev); 619 } 620 621 struct stlck_data { 622 struct completion done; 623 int rc; 624 }; 625 626 static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2) 627 { 628 struct ccw_request *req = &cdev->private->req; 629 struct ccw1 *cp = cdev->private->iccws; 630 631 cp[0].cmd_code = CCW_CMD_STLCK; 632 cp[0].cda = (u32) (addr_t) buf1; 633 cp[0].count = 32; 634 cp[0].flags = CCW_FLAG_CC; 635 cp[1].cmd_code = CCW_CMD_RELEASE; 636 cp[1].cda = (u32) (addr_t) buf2; 637 cp[1].count = 32; 638 cp[1].flags = 0; 639 req->cp = cp; 640 } 641 642 static void stlck_callback(struct ccw_device *cdev, void *data, int rc) 643 { 644 struct stlck_data *sdata = data; 645 646 sdata->rc = rc; 647 complete(&sdata->done); 648 } 649 650 /** 651 * ccw_device_stlck_start - perform unconditional release 652 * @cdev: ccw device 653 * @data: data pointer to be passed to ccw_device_stlck_done 654 * @buf1: data pointer used in channel program 655 * @buf2: data pointer used in channel program 656 * 657 * Execute a channel program on @cdev to release an existing PGID reservation. 658 */ 659 static void ccw_device_stlck_start(struct ccw_device *cdev, void *data, 660 void *buf1, void *buf2) 661 { 662 struct subchannel *sch = to_subchannel(cdev->dev.parent); 663 struct ccw_request *req = &cdev->private->req; 664 665 CIO_TRACE_EVENT(4, "stlck"); 666 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); 667 /* Request setup. */ 668 memset(req, 0, sizeof(*req)); 669 req->timeout = PGID_TIMEOUT; 670 req->maxretries = PGID_RETRIES; 671 req->lpm = sch->schib.pmcw.pam & sch->opm; 672 req->data = data; 673 req->callback = stlck_callback; 674 stlck_build_cp(cdev, buf1, buf2); 675 ccw_request_start(cdev); 676 } 677 678 /* 679 * Perform unconditional reserve + release. 680 */ 681 int ccw_device_stlck(struct ccw_device *cdev) 682 { 683 struct subchannel *sch = to_subchannel(cdev->dev.parent); 684 struct stlck_data data; 685 u8 *buffer; 686 int rc; 687 688 /* Check if steal lock operation is valid for this device. */ 689 if (cdev->drv) { 690 if (!cdev->private->options.force) 691 return -EINVAL; 692 } 693 buffer = kzalloc(64, GFP_DMA | GFP_KERNEL); 694 if (!buffer) 695 return -ENOMEM; 696 init_completion(&data.done); 697 data.rc = -EIO; 698 spin_lock_irq(sch->lock); 699 rc = cio_enable_subchannel(sch, (u32) (addr_t) sch); 700 if (rc) 701 goto out_unlock; 702 /* Perform operation. */ 703 cdev->private->state = DEV_STATE_STEAL_LOCK; 704 ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]); 705 spin_unlock_irq(sch->lock); 706 /* Wait for operation to finish. */ 707 if (wait_for_completion_interruptible(&data.done)) { 708 /* Got a signal. */ 709 spin_lock_irq(sch->lock); 710 ccw_request_cancel(cdev); 711 spin_unlock_irq(sch->lock); 712 wait_for_completion(&data.done); 713 } 714 rc = data.rc; 715 /* Check results. */ 716 spin_lock_irq(sch->lock); 717 cio_disable_subchannel(sch); 718 cdev->private->state = DEV_STATE_BOXED; 719 out_unlock: 720 spin_unlock_irq(sch->lock); 721 kfree(buffer); 722 723 return rc; 724 } 725