1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PAV alias management for the DASD ECKD discipline 4 * 5 * Copyright IBM Corp. 2007 6 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 7 */ 8 9 #define KMSG_COMPONENT "dasd-eckd" 10 11 #include <linux/list.h> 12 #include <linux/slab.h> 13 #include <asm/ebcdic.h> 14 #include "dasd_int.h" 15 #include "dasd_eckd.h" 16 17 #ifdef PRINTK_HEADER 18 #undef PRINTK_HEADER 19 #endif /* PRINTK_HEADER */ 20 #define PRINTK_HEADER "dasd(eckd):" 21 22 23 /* 24 * General concept of alias management: 25 * - PAV and DASD alias management is specific to the eckd discipline. 26 * - A device is connected to an lcu as long as the device exists. 27 * dasd_alias_make_device_known_to_lcu will be called wenn the 28 * device is checked by the eckd discipline and 29 * dasd_alias_disconnect_device_from_lcu will be called 30 * before the device is deleted. 31 * - The dasd_alias_add_device / dasd_alias_remove_device 32 * functions mark the point when a device is 'ready for service'. 33 * - A summary unit check is a rare occasion, but it is mandatory to 34 * support it. It requires some complex recovery actions before the 35 * devices can be used again (see dasd_alias_handle_summary_unit_check). 36 * - dasd_alias_get_start_dev will find an alias device that can be used 37 * instead of the base device and does some (very simple) load balancing. 38 * This is the function that gets called for each I/O, so when improving 39 * something, this function should get faster or better, the rest has just 40 * to be correct. 41 */ 42 43 44 static void summary_unit_check_handling_work(struct work_struct *); 45 static void lcu_update_work(struct work_struct *); 46 static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *); 47 48 static struct alias_root aliastree = { 49 .serverlist = LIST_HEAD_INIT(aliastree.serverlist), 50 .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock), 51 }; 52 53 static struct alias_server *_find_server(struct dasd_uid *uid) 54 { 55 struct alias_server *pos; 56 list_for_each_entry(pos, &aliastree.serverlist, server) { 57 if (!strncmp(pos->uid.vendor, uid->vendor, 58 sizeof(uid->vendor)) 59 && !strncmp(pos->uid.serial, uid->serial, 60 sizeof(uid->serial))) 61 return pos; 62 } 63 return NULL; 64 } 65 66 static struct alias_lcu *_find_lcu(struct alias_server *server, 67 struct dasd_uid *uid) 68 { 69 struct alias_lcu *pos; 70 list_for_each_entry(pos, &server->lculist, lcu) { 71 if (pos->uid.ssid == uid->ssid) 72 return pos; 73 } 74 return NULL; 75 } 76 77 static struct alias_pav_group *_find_group(struct alias_lcu *lcu, 78 struct dasd_uid *uid) 79 { 80 struct alias_pav_group *pos; 81 __u8 search_unit_addr; 82 83 /* for hyper pav there is only one group */ 84 if (lcu->pav == HYPER_PAV) { 85 if (list_empty(&lcu->grouplist)) 86 return NULL; 87 else 88 return list_first_entry(&lcu->grouplist, 89 struct alias_pav_group, group); 90 } 91 92 /* for base pav we have to find the group that matches the base */ 93 if (uid->type == UA_BASE_DEVICE) 94 search_unit_addr = uid->real_unit_addr; 95 else 96 search_unit_addr = uid->base_unit_addr; 97 list_for_each_entry(pos, &lcu->grouplist, group) { 98 if (pos->uid.base_unit_addr == search_unit_addr && 99 !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit))) 100 return pos; 101 } 102 return NULL; 103 } 104 105 static struct alias_server *_allocate_server(struct dasd_uid *uid) 106 { 107 struct alias_server *server; 108 109 server = kzalloc(sizeof(*server), GFP_KERNEL); 110 if (!server) 111 return ERR_PTR(-ENOMEM); 112 memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor)); 113 memcpy(server->uid.serial, uid->serial, sizeof(uid->serial)); 114 INIT_LIST_HEAD(&server->server); 115 INIT_LIST_HEAD(&server->lculist); 116 return server; 117 } 118 119 static void _free_server(struct alias_server *server) 120 { 121 kfree(server); 122 } 123 124 static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid) 125 { 126 struct alias_lcu *lcu; 127 128 lcu = kzalloc(sizeof(*lcu), GFP_KERNEL); 129 if (!lcu) 130 return ERR_PTR(-ENOMEM); 131 lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA); 132 if (!lcu->uac) 133 goto out_err1; 134 lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA); 135 if (!lcu->rsu_cqr) 136 goto out_err2; 137 lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1), 138 GFP_KERNEL | GFP_DMA); 139 if (!lcu->rsu_cqr->cpaddr) 140 goto out_err3; 141 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA); 142 if (!lcu->rsu_cqr->data) 143 goto out_err4; 144 145 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor)); 146 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial)); 147 lcu->uid.ssid = uid->ssid; 148 lcu->pav = NO_PAV; 149 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING; 150 INIT_LIST_HEAD(&lcu->lcu); 151 INIT_LIST_HEAD(&lcu->inactive_devices); 152 INIT_LIST_HEAD(&lcu->active_devices); 153 INIT_LIST_HEAD(&lcu->grouplist); 154 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work); 155 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work); 156 spin_lock_init(&lcu->lock); 157 init_completion(&lcu->lcu_setup); 158 return lcu; 159 160 out_err4: 161 kfree(lcu->rsu_cqr->cpaddr); 162 out_err3: 163 kfree(lcu->rsu_cqr); 164 out_err2: 165 kfree(lcu->uac); 166 out_err1: 167 kfree(lcu); 168 return ERR_PTR(-ENOMEM); 169 } 170 171 static void _free_lcu(struct alias_lcu *lcu) 172 { 173 kfree(lcu->rsu_cqr->data); 174 kfree(lcu->rsu_cqr->cpaddr); 175 kfree(lcu->rsu_cqr); 176 kfree(lcu->uac); 177 kfree(lcu); 178 } 179 180 /* 181 * This is the function that will allocate all the server and lcu data, 182 * so this function must be called first for a new device. 183 * If the return value is 1, the lcu was already known before, if it 184 * is 0, this is a new lcu. 185 * Negative return code indicates that something went wrong (e.g. -ENOMEM) 186 */ 187 int dasd_alias_make_device_known_to_lcu(struct dasd_device *device) 188 { 189 struct dasd_eckd_private *private = device->private; 190 unsigned long flags; 191 struct alias_server *server, *newserver; 192 struct alias_lcu *lcu, *newlcu; 193 struct dasd_uid uid; 194 195 device->discipline->get_uid(device, &uid); 196 spin_lock_irqsave(&aliastree.lock, flags); 197 server = _find_server(&uid); 198 if (!server) { 199 spin_unlock_irqrestore(&aliastree.lock, flags); 200 newserver = _allocate_server(&uid); 201 if (IS_ERR(newserver)) 202 return PTR_ERR(newserver); 203 spin_lock_irqsave(&aliastree.lock, flags); 204 server = _find_server(&uid); 205 if (!server) { 206 list_add(&newserver->server, &aliastree.serverlist); 207 server = newserver; 208 } else { 209 /* someone was faster */ 210 _free_server(newserver); 211 } 212 } 213 214 lcu = _find_lcu(server, &uid); 215 if (!lcu) { 216 spin_unlock_irqrestore(&aliastree.lock, flags); 217 newlcu = _allocate_lcu(&uid); 218 if (IS_ERR(newlcu)) 219 return PTR_ERR(newlcu); 220 spin_lock_irqsave(&aliastree.lock, flags); 221 lcu = _find_lcu(server, &uid); 222 if (!lcu) { 223 list_add(&newlcu->lcu, &server->lculist); 224 lcu = newlcu; 225 } else { 226 /* someone was faster */ 227 _free_lcu(newlcu); 228 } 229 } 230 spin_lock(&lcu->lock); 231 list_add(&device->alias_list, &lcu->inactive_devices); 232 private->lcu = lcu; 233 spin_unlock(&lcu->lock); 234 spin_unlock_irqrestore(&aliastree.lock, flags); 235 236 return 0; 237 } 238 239 /* 240 * This function removes a device from the scope of alias management. 241 * The complicated part is to make sure that it is not in use by 242 * any of the workers. If necessary cancel the work. 243 */ 244 void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) 245 { 246 struct dasd_eckd_private *private = device->private; 247 unsigned long flags; 248 struct alias_lcu *lcu; 249 struct alias_server *server; 250 int was_pending; 251 struct dasd_uid uid; 252 253 lcu = private->lcu; 254 /* nothing to do if already disconnected */ 255 if (!lcu) 256 return; 257 device->discipline->get_uid(device, &uid); 258 spin_lock_irqsave(&lcu->lock, flags); 259 list_del_init(&device->alias_list); 260 /* make sure that the workers don't use this device */ 261 if (device == lcu->suc_data.device) { 262 spin_unlock_irqrestore(&lcu->lock, flags); 263 cancel_work_sync(&lcu->suc_data.worker); 264 spin_lock_irqsave(&lcu->lock, flags); 265 if (device == lcu->suc_data.device) { 266 dasd_put_device(device); 267 lcu->suc_data.device = NULL; 268 } 269 } 270 was_pending = 0; 271 if (device == lcu->ruac_data.device) { 272 spin_unlock_irqrestore(&lcu->lock, flags); 273 was_pending = 1; 274 cancel_delayed_work_sync(&lcu->ruac_data.dwork); 275 spin_lock_irqsave(&lcu->lock, flags); 276 if (device == lcu->ruac_data.device) { 277 dasd_put_device(device); 278 lcu->ruac_data.device = NULL; 279 } 280 } 281 private->lcu = NULL; 282 spin_unlock_irqrestore(&lcu->lock, flags); 283 284 spin_lock_irqsave(&aliastree.lock, flags); 285 spin_lock(&lcu->lock); 286 if (list_empty(&lcu->grouplist) && 287 list_empty(&lcu->active_devices) && 288 list_empty(&lcu->inactive_devices)) { 289 list_del(&lcu->lcu); 290 spin_unlock(&lcu->lock); 291 _free_lcu(lcu); 292 lcu = NULL; 293 } else { 294 if (was_pending) 295 _schedule_lcu_update(lcu, NULL); 296 spin_unlock(&lcu->lock); 297 } 298 server = _find_server(&uid); 299 if (server && list_empty(&server->lculist)) { 300 list_del(&server->server); 301 _free_server(server); 302 } 303 spin_unlock_irqrestore(&aliastree.lock, flags); 304 } 305 306 /* 307 * This function assumes that the unit address configuration stored 308 * in the lcu is up to date and will update the device uid before 309 * adding it to a pav group. 310 */ 311 312 static int _add_device_to_lcu(struct alias_lcu *lcu, 313 struct dasd_device *device, 314 struct dasd_device *pos) 315 { 316 317 struct dasd_eckd_private *private = device->private; 318 struct alias_pav_group *group; 319 struct dasd_uid uid; 320 321 spin_lock(get_ccwdev_lock(device->cdev)); 322 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type; 323 private->uid.base_unit_addr = 324 lcu->uac->unit[private->uid.real_unit_addr].base_ua; 325 uid = private->uid; 326 spin_unlock(get_ccwdev_lock(device->cdev)); 327 /* if we have no PAV anyway, we don't need to bother with PAV groups */ 328 if (lcu->pav == NO_PAV) { 329 list_move(&device->alias_list, &lcu->active_devices); 330 return 0; 331 } 332 group = _find_group(lcu, &uid); 333 if (!group) { 334 group = kzalloc(sizeof(*group), GFP_ATOMIC); 335 if (!group) 336 return -ENOMEM; 337 memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor)); 338 memcpy(group->uid.serial, uid.serial, sizeof(uid.serial)); 339 group->uid.ssid = uid.ssid; 340 if (uid.type == UA_BASE_DEVICE) 341 group->uid.base_unit_addr = uid.real_unit_addr; 342 else 343 group->uid.base_unit_addr = uid.base_unit_addr; 344 memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit)); 345 INIT_LIST_HEAD(&group->group); 346 INIT_LIST_HEAD(&group->baselist); 347 INIT_LIST_HEAD(&group->aliaslist); 348 list_add(&group->group, &lcu->grouplist); 349 } 350 if (uid.type == UA_BASE_DEVICE) 351 list_move(&device->alias_list, &group->baselist); 352 else 353 list_move(&device->alias_list, &group->aliaslist); 354 private->pavgroup = group; 355 return 0; 356 }; 357 358 static void _remove_device_from_lcu(struct alias_lcu *lcu, 359 struct dasd_device *device) 360 { 361 struct dasd_eckd_private *private = device->private; 362 struct alias_pav_group *group; 363 364 list_move(&device->alias_list, &lcu->inactive_devices); 365 group = private->pavgroup; 366 if (!group) 367 return; 368 private->pavgroup = NULL; 369 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) { 370 list_del(&group->group); 371 kfree(group); 372 return; 373 } 374 if (group->next == device) 375 group->next = NULL; 376 }; 377 378 static int 379 suborder_not_supported(struct dasd_ccw_req *cqr) 380 { 381 char *sense; 382 char reason; 383 char msg_format; 384 char msg_no; 385 386 sense = dasd_get_sense(&cqr->irb); 387 if (!sense) 388 return 0; 389 390 reason = sense[0]; 391 msg_format = (sense[7] & 0xF0); 392 msg_no = (sense[7] & 0x0F); 393 394 /* command reject, Format 0 MSG 4 - invalid parameter */ 395 if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04)) 396 return 1; 397 398 return 0; 399 } 400 401 static int read_unit_address_configuration(struct dasd_device *device, 402 struct alias_lcu *lcu) 403 { 404 struct dasd_psf_prssd_data *prssdp; 405 struct dasd_ccw_req *cqr; 406 struct ccw1 *ccw; 407 int rc; 408 unsigned long flags; 409 410 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 411 (sizeof(struct dasd_psf_prssd_data)), 412 device); 413 if (IS_ERR(cqr)) 414 return PTR_ERR(cqr); 415 cqr->startdev = device; 416 cqr->memdev = device; 417 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 418 cqr->retries = 10; 419 cqr->expires = 20 * HZ; 420 421 /* Prepare for Read Subsystem Data */ 422 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 423 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 424 prssdp->order = PSF_ORDER_PRSSD; 425 prssdp->suborder = 0x0e; /* Read unit address configuration */ 426 /* all other bytes of prssdp must be zero */ 427 428 ccw = cqr->cpaddr; 429 ccw->cmd_code = DASD_ECKD_CCW_PSF; 430 ccw->count = sizeof(struct dasd_psf_prssd_data); 431 ccw->flags |= CCW_FLAG_CC; 432 ccw->cda = (__u32)(addr_t) prssdp; 433 434 /* Read Subsystem Data - feature codes */ 435 memset(lcu->uac, 0, sizeof(*(lcu->uac))); 436 437 ccw++; 438 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 439 ccw->count = sizeof(*(lcu->uac)); 440 ccw->cda = (__u32)(addr_t) lcu->uac; 441 442 cqr->buildclk = get_tod_clock(); 443 cqr->status = DASD_CQR_FILLED; 444 445 /* need to unset flag here to detect race with summary unit check */ 446 spin_lock_irqsave(&lcu->lock, flags); 447 lcu->flags &= ~NEED_UAC_UPDATE; 448 spin_unlock_irqrestore(&lcu->lock, flags); 449 450 do { 451 rc = dasd_sleep_on(cqr); 452 if (rc && suborder_not_supported(cqr)) 453 return -EOPNOTSUPP; 454 } while (rc && (cqr->retries > 0)); 455 if (rc) { 456 spin_lock_irqsave(&lcu->lock, flags); 457 lcu->flags |= NEED_UAC_UPDATE; 458 spin_unlock_irqrestore(&lcu->lock, flags); 459 } 460 dasd_kfree_request(cqr, cqr->memdev); 461 return rc; 462 } 463 464 static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) 465 { 466 unsigned long flags; 467 struct alias_pav_group *pavgroup, *tempgroup; 468 struct dasd_device *device, *tempdev; 469 int i, rc; 470 struct dasd_eckd_private *private; 471 472 spin_lock_irqsave(&lcu->lock, flags); 473 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) { 474 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist, 475 alias_list) { 476 list_move(&device->alias_list, &lcu->active_devices); 477 private = device->private; 478 private->pavgroup = NULL; 479 } 480 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist, 481 alias_list) { 482 list_move(&device->alias_list, &lcu->active_devices); 483 private = device->private; 484 private->pavgroup = NULL; 485 } 486 list_del(&pavgroup->group); 487 kfree(pavgroup); 488 } 489 spin_unlock_irqrestore(&lcu->lock, flags); 490 491 rc = read_unit_address_configuration(refdev, lcu); 492 if (rc) 493 return rc; 494 495 spin_lock_irqsave(&lcu->lock, flags); 496 lcu->pav = NO_PAV; 497 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { 498 switch (lcu->uac->unit[i].ua_type) { 499 case UA_BASE_PAV_ALIAS: 500 lcu->pav = BASE_PAV; 501 break; 502 case UA_HYPER_PAV_ALIAS: 503 lcu->pav = HYPER_PAV; 504 break; 505 } 506 if (lcu->pav != NO_PAV) 507 break; 508 } 509 510 list_for_each_entry_safe(device, tempdev, &lcu->active_devices, 511 alias_list) { 512 _add_device_to_lcu(lcu, device, refdev); 513 } 514 spin_unlock_irqrestore(&lcu->lock, flags); 515 return 0; 516 } 517 518 static void lcu_update_work(struct work_struct *work) 519 { 520 struct alias_lcu *lcu; 521 struct read_uac_work_data *ruac_data; 522 struct dasd_device *device; 523 unsigned long flags; 524 int rc; 525 526 ruac_data = container_of(work, struct read_uac_work_data, dwork.work); 527 lcu = container_of(ruac_data, struct alias_lcu, ruac_data); 528 device = ruac_data->device; 529 rc = _lcu_update(device, lcu); 530 /* 531 * Need to check flags again, as there could have been another 532 * prepare_update or a new device a new device while we were still 533 * processing the data 534 */ 535 spin_lock_irqsave(&lcu->lock, flags); 536 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { 537 DBF_DEV_EVENT(DBF_WARNING, device, "could not update" 538 " alias data in lcu (rc = %d), retry later", rc); 539 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ)) 540 dasd_put_device(device); 541 } else { 542 dasd_put_device(device); 543 lcu->ruac_data.device = NULL; 544 lcu->flags &= ~UPDATE_PENDING; 545 } 546 spin_unlock_irqrestore(&lcu->lock, flags); 547 } 548 549 static int _schedule_lcu_update(struct alias_lcu *lcu, 550 struct dasd_device *device) 551 { 552 struct dasd_device *usedev = NULL; 553 struct alias_pav_group *group; 554 555 lcu->flags |= NEED_UAC_UPDATE; 556 if (lcu->ruac_data.device) { 557 /* already scheduled or running */ 558 return 0; 559 } 560 if (device && !list_empty(&device->alias_list)) 561 usedev = device; 562 563 if (!usedev && !list_empty(&lcu->grouplist)) { 564 group = list_first_entry(&lcu->grouplist, 565 struct alias_pav_group, group); 566 if (!list_empty(&group->baselist)) 567 usedev = list_first_entry(&group->baselist, 568 struct dasd_device, 569 alias_list); 570 else if (!list_empty(&group->aliaslist)) 571 usedev = list_first_entry(&group->aliaslist, 572 struct dasd_device, 573 alias_list); 574 } 575 if (!usedev && !list_empty(&lcu->active_devices)) { 576 usedev = list_first_entry(&lcu->active_devices, 577 struct dasd_device, alias_list); 578 } 579 /* 580 * if we haven't found a proper device yet, give up for now, the next 581 * device that will be set active will trigger an lcu update 582 */ 583 if (!usedev) 584 return -EINVAL; 585 dasd_get_device(usedev); 586 lcu->ruac_data.device = usedev; 587 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0)) 588 dasd_put_device(usedev); 589 return 0; 590 } 591 592 int dasd_alias_add_device(struct dasd_device *device) 593 { 594 struct dasd_eckd_private *private = device->private; 595 struct alias_lcu *lcu; 596 unsigned long flags; 597 int rc; 598 599 lcu = private->lcu; 600 rc = 0; 601 spin_lock_irqsave(&lcu->lock, flags); 602 if (!(lcu->flags & UPDATE_PENDING)) { 603 rc = _add_device_to_lcu(lcu, device, device); 604 if (rc) 605 lcu->flags |= UPDATE_PENDING; 606 } 607 if (lcu->flags & UPDATE_PENDING) { 608 list_move(&device->alias_list, &lcu->active_devices); 609 _schedule_lcu_update(lcu, device); 610 } 611 spin_unlock_irqrestore(&lcu->lock, flags); 612 return rc; 613 } 614 615 int dasd_alias_update_add_device(struct dasd_device *device) 616 { 617 struct dasd_eckd_private *private = device->private; 618 619 private->lcu->flags |= UPDATE_PENDING; 620 return dasd_alias_add_device(device); 621 } 622 623 int dasd_alias_remove_device(struct dasd_device *device) 624 { 625 struct dasd_eckd_private *private = device->private; 626 struct alias_lcu *lcu = private->lcu; 627 unsigned long flags; 628 629 /* nothing to do if already removed */ 630 if (!lcu) 631 return 0; 632 spin_lock_irqsave(&lcu->lock, flags); 633 _remove_device_from_lcu(lcu, device); 634 spin_unlock_irqrestore(&lcu->lock, flags); 635 return 0; 636 } 637 638 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) 639 { 640 struct dasd_eckd_private *alias_priv, *private = base_device->private; 641 struct alias_pav_group *group = private->pavgroup; 642 struct alias_lcu *lcu = private->lcu; 643 struct dasd_device *alias_device; 644 unsigned long flags; 645 646 if (!group || !lcu) 647 return NULL; 648 if (lcu->pav == NO_PAV || 649 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING)) 650 return NULL; 651 if (unlikely(!(private->features.feature[8] & 0x01))) { 652 /* 653 * PAV enabled but prefix not, very unlikely 654 * seems to be a lost pathgroup 655 * use base device to do IO 656 */ 657 DBF_DEV_EVENT(DBF_ERR, base_device, "%s", 658 "Prefix not enabled with PAV enabled\n"); 659 return NULL; 660 } 661 662 spin_lock_irqsave(&lcu->lock, flags); 663 alias_device = group->next; 664 if (!alias_device) { 665 if (list_empty(&group->aliaslist)) { 666 spin_unlock_irqrestore(&lcu->lock, flags); 667 return NULL; 668 } else { 669 alias_device = list_first_entry(&group->aliaslist, 670 struct dasd_device, 671 alias_list); 672 } 673 } 674 if (list_is_last(&alias_device->alias_list, &group->aliaslist)) 675 group->next = list_first_entry(&group->aliaslist, 676 struct dasd_device, alias_list); 677 else 678 group->next = list_first_entry(&alias_device->alias_list, 679 struct dasd_device, alias_list); 680 spin_unlock_irqrestore(&lcu->lock, flags); 681 alias_priv = alias_device->private; 682 if ((alias_priv->count < private->count) && !alias_device->stopped && 683 !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags)) 684 return alias_device; 685 else 686 return NULL; 687 } 688 689 /* 690 * Summary unit check handling depends on the way alias devices 691 * are handled so it is done here rather then in dasd_eckd.c 692 */ 693 static int reset_summary_unit_check(struct alias_lcu *lcu, 694 struct dasd_device *device, 695 char reason) 696 { 697 struct dasd_ccw_req *cqr; 698 int rc = 0; 699 struct ccw1 *ccw; 700 701 cqr = lcu->rsu_cqr; 702 strncpy((char *) &cqr->magic, "ECKD", 4); 703 ASCEBC((char *) &cqr->magic, 4); 704 ccw = cqr->cpaddr; 705 ccw->cmd_code = DASD_ECKD_CCW_RSCK; 706 ccw->flags = CCW_FLAG_SLI; 707 ccw->count = 16; 708 ccw->cda = (__u32)(addr_t) cqr->data; 709 ((char *)cqr->data)[0] = reason; 710 711 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 712 cqr->retries = 255; /* set retry counter to enable basic ERP */ 713 cqr->startdev = device; 714 cqr->memdev = device; 715 cqr->block = NULL; 716 cqr->expires = 5 * HZ; 717 cqr->buildclk = get_tod_clock(); 718 cqr->status = DASD_CQR_FILLED; 719 720 rc = dasd_sleep_on_immediatly(cqr); 721 return rc; 722 } 723 724 static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu) 725 { 726 struct alias_pav_group *pavgroup; 727 struct dasd_device *device; 728 struct dasd_eckd_private *private; 729 730 /* active and inactive list can contain alias as well as base devices */ 731 list_for_each_entry(device, &lcu->active_devices, alias_list) { 732 private = device->private; 733 if (private->uid.type != UA_BASE_DEVICE) 734 continue; 735 dasd_schedule_block_bh(device->block); 736 dasd_schedule_device_bh(device); 737 } 738 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { 739 private = device->private; 740 if (private->uid.type != UA_BASE_DEVICE) 741 continue; 742 dasd_schedule_block_bh(device->block); 743 dasd_schedule_device_bh(device); 744 } 745 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 746 list_for_each_entry(device, &pavgroup->baselist, alias_list) { 747 dasd_schedule_block_bh(device->block); 748 dasd_schedule_device_bh(device); 749 } 750 } 751 } 752 753 static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu) 754 { 755 struct alias_pav_group *pavgroup; 756 struct dasd_device *device, *temp; 757 struct dasd_eckd_private *private; 758 unsigned long flags; 759 LIST_HEAD(active); 760 761 /* 762 * Problem here ist that dasd_flush_device_queue may wait 763 * for termination of a request to complete. We can't keep 764 * the lcu lock during that time, so we must assume that 765 * the lists may have changed. 766 * Idea: first gather all active alias devices in a separate list, 767 * then flush the first element of this list unlocked, and afterwards 768 * check if it is still on the list before moving it to the 769 * active_devices list. 770 */ 771 772 spin_lock_irqsave(&lcu->lock, flags); 773 list_for_each_entry_safe(device, temp, &lcu->active_devices, 774 alias_list) { 775 private = device->private; 776 if (private->uid.type == UA_BASE_DEVICE) 777 continue; 778 list_move(&device->alias_list, &active); 779 } 780 781 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 782 list_splice_init(&pavgroup->aliaslist, &active); 783 } 784 while (!list_empty(&active)) { 785 device = list_first_entry(&active, struct dasd_device, 786 alias_list); 787 spin_unlock_irqrestore(&lcu->lock, flags); 788 dasd_flush_device_queue(device); 789 spin_lock_irqsave(&lcu->lock, flags); 790 /* 791 * only move device around if it wasn't moved away while we 792 * were waiting for the flush 793 */ 794 if (device == list_first_entry(&active, 795 struct dasd_device, alias_list)) { 796 list_move(&device->alias_list, &lcu->active_devices); 797 private = device->private; 798 private->pavgroup = NULL; 799 } 800 } 801 spin_unlock_irqrestore(&lcu->lock, flags); 802 } 803 804 static void _stop_all_devices_on_lcu(struct alias_lcu *lcu) 805 { 806 struct alias_pav_group *pavgroup; 807 struct dasd_device *device; 808 809 list_for_each_entry(device, &lcu->active_devices, alias_list) { 810 spin_lock(get_ccwdev_lock(device->cdev)); 811 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 812 spin_unlock(get_ccwdev_lock(device->cdev)); 813 } 814 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { 815 spin_lock(get_ccwdev_lock(device->cdev)); 816 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 817 spin_unlock(get_ccwdev_lock(device->cdev)); 818 } 819 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 820 list_for_each_entry(device, &pavgroup->baselist, alias_list) { 821 spin_lock(get_ccwdev_lock(device->cdev)); 822 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 823 spin_unlock(get_ccwdev_lock(device->cdev)); 824 } 825 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { 826 spin_lock(get_ccwdev_lock(device->cdev)); 827 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 828 spin_unlock(get_ccwdev_lock(device->cdev)); 829 } 830 } 831 } 832 833 static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu) 834 { 835 struct alias_pav_group *pavgroup; 836 struct dasd_device *device; 837 838 list_for_each_entry(device, &lcu->active_devices, alias_list) { 839 spin_lock(get_ccwdev_lock(device->cdev)); 840 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 841 spin_unlock(get_ccwdev_lock(device->cdev)); 842 } 843 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { 844 spin_lock(get_ccwdev_lock(device->cdev)); 845 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 846 spin_unlock(get_ccwdev_lock(device->cdev)); 847 } 848 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 849 list_for_each_entry(device, &pavgroup->baselist, alias_list) { 850 spin_lock(get_ccwdev_lock(device->cdev)); 851 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 852 spin_unlock(get_ccwdev_lock(device->cdev)); 853 } 854 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { 855 spin_lock(get_ccwdev_lock(device->cdev)); 856 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 857 spin_unlock(get_ccwdev_lock(device->cdev)); 858 } 859 } 860 } 861 862 static void summary_unit_check_handling_work(struct work_struct *work) 863 { 864 struct alias_lcu *lcu; 865 struct summary_unit_check_work_data *suc_data; 866 unsigned long flags; 867 struct dasd_device *device; 868 869 suc_data = container_of(work, struct summary_unit_check_work_data, 870 worker); 871 lcu = container_of(suc_data, struct alias_lcu, suc_data); 872 device = suc_data->device; 873 874 /* 1. flush alias devices */ 875 flush_all_alias_devices_on_lcu(lcu); 876 877 /* 2. reset summary unit check */ 878 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 879 dasd_device_remove_stop_bits(device, 880 (DASD_STOPPED_SU | DASD_STOPPED_PENDING)); 881 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 882 reset_summary_unit_check(lcu, device, suc_data->reason); 883 884 spin_lock_irqsave(&lcu->lock, flags); 885 _unstop_all_devices_on_lcu(lcu); 886 _restart_all_base_devices_on_lcu(lcu); 887 /* 3. read new alias configuration */ 888 _schedule_lcu_update(lcu, device); 889 lcu->suc_data.device = NULL; 890 dasd_put_device(device); 891 spin_unlock_irqrestore(&lcu->lock, flags); 892 } 893 894 void dasd_alias_handle_summary_unit_check(struct work_struct *work) 895 { 896 struct dasd_device *device = container_of(work, struct dasd_device, 897 suc_work); 898 struct dasd_eckd_private *private = device->private; 899 struct alias_lcu *lcu; 900 unsigned long flags; 901 902 lcu = private->lcu; 903 if (!lcu) { 904 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 905 "device not ready to handle summary" 906 " unit check (no lcu structure)"); 907 goto out; 908 } 909 spin_lock_irqsave(&lcu->lock, flags); 910 /* If this device is about to be removed just return and wait for 911 * the next interrupt on a different device 912 */ 913 if (list_empty(&device->alias_list)) { 914 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 915 "device is in offline processing," 916 " don't do summary unit check handling"); 917 goto out_unlock; 918 } 919 if (lcu->suc_data.device) { 920 /* already scheduled or running */ 921 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 922 "previous instance of summary unit check worker" 923 " still pending"); 924 goto out_unlock; 925 } 926 _stop_all_devices_on_lcu(lcu); 927 /* prepare for lcu_update */ 928 lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING; 929 lcu->suc_data.reason = private->suc_reason; 930 lcu->suc_data.device = device; 931 dasd_get_device(device); 932 if (!schedule_work(&lcu->suc_data.worker)) 933 dasd_put_device(device); 934 out_unlock: 935 spin_unlock_irqrestore(&lcu->lock, flags); 936 out: 937 clear_bit(DASD_FLAG_SUC, &device->flags); 938 dasd_put_device(device); 939 }; 940