xref: /openbmc/linux/drivers/s390/block/dasd_alias.c (revision 565d76cb)
1 /*
2  * PAV alias management for the DASD ECKD discipline
3  *
4  * Copyright IBM Corporation, 2007
5  * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6  */
7 
8 #define KMSG_COMPONENT "dasd-eckd"
9 
10 #include <linux/list.h>
11 #include <linux/slab.h>
12 #include <asm/ebcdic.h>
13 #include "dasd_int.h"
14 #include "dasd_eckd.h"
15 
16 #ifdef PRINTK_HEADER
17 #undef PRINTK_HEADER
18 #endif				/* PRINTK_HEADER */
19 #define PRINTK_HEADER "dasd(eckd):"
20 
21 
22 /*
23  * General concept of alias management:
24  * - PAV and DASD alias management is specific to the eckd discipline.
25  * - A device is connected to an lcu as long as the device exists.
26  *   dasd_alias_make_device_known_to_lcu will be called wenn the
27  *   device is checked by the eckd discipline and
28  *   dasd_alias_disconnect_device_from_lcu will be called
29  *   before the device is deleted.
30  * - The dasd_alias_add_device / dasd_alias_remove_device
31  *   functions mark the point when a device is 'ready for service'.
32  * - A summary unit check is a rare occasion, but it is mandatory to
33  *   support it. It requires some complex recovery actions before the
34  *   devices can be used again (see dasd_alias_handle_summary_unit_check).
35  * - dasd_alias_get_start_dev will find an alias device that can be used
36  *   instead of the base device and does some (very simple) load balancing.
37  *   This is the function that gets called for each I/O, so when improving
38  *   something, this function should get faster or better, the rest has just
39  *   to be correct.
40  */
41 
42 
43 static void summary_unit_check_handling_work(struct work_struct *);
44 static void lcu_update_work(struct work_struct *);
45 static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
46 
47 static struct alias_root aliastree = {
48 	.serverlist = LIST_HEAD_INIT(aliastree.serverlist),
49 	.lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
50 };
51 
52 static struct alias_server *_find_server(struct dasd_uid *uid)
53 {
54 	struct alias_server *pos;
55 	list_for_each_entry(pos, &aliastree.serverlist, server) {
56 		if (!strncmp(pos->uid.vendor, uid->vendor,
57 			     sizeof(uid->vendor))
58 		    && !strncmp(pos->uid.serial, uid->serial,
59 				sizeof(uid->serial)))
60 			return pos;
61 	};
62 	return NULL;
63 }
64 
65 static struct alias_lcu *_find_lcu(struct alias_server *server,
66 				   struct dasd_uid *uid)
67 {
68 	struct alias_lcu *pos;
69 	list_for_each_entry(pos, &server->lculist, lcu) {
70 		if (pos->uid.ssid == uid->ssid)
71 			return pos;
72 	};
73 	return NULL;
74 }
75 
76 static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
77 					   struct dasd_uid *uid)
78 {
79 	struct alias_pav_group *pos;
80 	__u8 search_unit_addr;
81 
82 	/* for hyper pav there is only one group */
83 	if (lcu->pav == HYPER_PAV) {
84 		if (list_empty(&lcu->grouplist))
85 			return NULL;
86 		else
87 			return list_first_entry(&lcu->grouplist,
88 						struct alias_pav_group, group);
89 	}
90 
91 	/* for base pav we have to find the group that matches the base */
92 	if (uid->type == UA_BASE_DEVICE)
93 		search_unit_addr = uid->real_unit_addr;
94 	else
95 		search_unit_addr = uid->base_unit_addr;
96 	list_for_each_entry(pos, &lcu->grouplist, group) {
97 		if (pos->uid.base_unit_addr == search_unit_addr &&
98 		    !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
99 			return pos;
100 	};
101 	return NULL;
102 }
103 
104 static struct alias_server *_allocate_server(struct dasd_uid *uid)
105 {
106 	struct alias_server *server;
107 
108 	server = kzalloc(sizeof(*server), GFP_KERNEL);
109 	if (!server)
110 		return ERR_PTR(-ENOMEM);
111 	memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
112 	memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
113 	INIT_LIST_HEAD(&server->server);
114 	INIT_LIST_HEAD(&server->lculist);
115 	return server;
116 }
117 
118 static void _free_server(struct alias_server *server)
119 {
120 	kfree(server);
121 }
122 
123 static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
124 {
125 	struct alias_lcu *lcu;
126 
127 	lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
128 	if (!lcu)
129 		return ERR_PTR(-ENOMEM);
130 	lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
131 	if (!lcu->uac)
132 		goto out_err1;
133 	lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
134 	if (!lcu->rsu_cqr)
135 		goto out_err2;
136 	lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
137 				       GFP_KERNEL | GFP_DMA);
138 	if (!lcu->rsu_cqr->cpaddr)
139 		goto out_err3;
140 	lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
141 	if (!lcu->rsu_cqr->data)
142 		goto out_err4;
143 
144 	memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
145 	memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
146 	lcu->uid.ssid = uid->ssid;
147 	lcu->pav = NO_PAV;
148 	lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
149 	INIT_LIST_HEAD(&lcu->lcu);
150 	INIT_LIST_HEAD(&lcu->inactive_devices);
151 	INIT_LIST_HEAD(&lcu->active_devices);
152 	INIT_LIST_HEAD(&lcu->grouplist);
153 	INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
154 	INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
155 	spin_lock_init(&lcu->lock);
156 	init_completion(&lcu->lcu_setup);
157 	return lcu;
158 
159 out_err4:
160 	kfree(lcu->rsu_cqr->cpaddr);
161 out_err3:
162 	kfree(lcu->rsu_cqr);
163 out_err2:
164 	kfree(lcu->uac);
165 out_err1:
166 	kfree(lcu);
167 	return ERR_PTR(-ENOMEM);
168 }
169 
170 static void _free_lcu(struct alias_lcu *lcu)
171 {
172 	kfree(lcu->rsu_cqr->data);
173 	kfree(lcu->rsu_cqr->cpaddr);
174 	kfree(lcu->rsu_cqr);
175 	kfree(lcu->uac);
176 	kfree(lcu);
177 }
178 
179 /*
180  * This is the function that will allocate all the server and lcu data,
181  * so this function must be called first for a new device.
182  * If the return value is 1, the lcu was already known before, if it
183  * is 0, this is a new lcu.
184  * Negative return code indicates that something went wrong (e.g. -ENOMEM)
185  */
186 int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
187 {
188 	struct dasd_eckd_private *private;
189 	unsigned long flags;
190 	struct alias_server *server, *newserver;
191 	struct alias_lcu *lcu, *newlcu;
192 	int is_lcu_known;
193 	struct dasd_uid uid;
194 
195 	private = (struct dasd_eckd_private *) device->private;
196 
197 	device->discipline->get_uid(device, &uid);
198 	spin_lock_irqsave(&aliastree.lock, flags);
199 	is_lcu_known = 1;
200 	server = _find_server(&uid);
201 	if (!server) {
202 		spin_unlock_irqrestore(&aliastree.lock, flags);
203 		newserver = _allocate_server(&uid);
204 		if (IS_ERR(newserver))
205 			return PTR_ERR(newserver);
206 		spin_lock_irqsave(&aliastree.lock, flags);
207 		server = _find_server(&uid);
208 		if (!server) {
209 			list_add(&newserver->server, &aliastree.serverlist);
210 			server = newserver;
211 			is_lcu_known = 0;
212 		} else {
213 			/* someone was faster */
214 			_free_server(newserver);
215 		}
216 	}
217 
218 	lcu = _find_lcu(server, &uid);
219 	if (!lcu) {
220 		spin_unlock_irqrestore(&aliastree.lock, flags);
221 		newlcu = _allocate_lcu(&uid);
222 		if (IS_ERR(newlcu))
223 			return PTR_ERR(newlcu);
224 		spin_lock_irqsave(&aliastree.lock, flags);
225 		lcu = _find_lcu(server, &uid);
226 		if (!lcu) {
227 			list_add(&newlcu->lcu, &server->lculist);
228 			lcu = newlcu;
229 			is_lcu_known = 0;
230 		} else {
231 			/* someone was faster */
232 			_free_lcu(newlcu);
233 		}
234 		is_lcu_known = 0;
235 	}
236 	spin_lock(&lcu->lock);
237 	list_add(&device->alias_list, &lcu->inactive_devices);
238 	private->lcu = lcu;
239 	spin_unlock(&lcu->lock);
240 	spin_unlock_irqrestore(&aliastree.lock, flags);
241 
242 	return is_lcu_known;
243 }
244 
245 /*
246  * The first device to be registered on an LCU will have to do
247  * some additional setup steps to configure that LCU on the
248  * storage server. All further devices should wait with their
249  * initialization until the first device is done.
250  * To synchronize this work, the first device will call
251  * dasd_alias_lcu_setup_complete when it is done, and all
252  * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
253  */
254 void dasd_alias_lcu_setup_complete(struct dasd_device *device)
255 {
256 	struct dasd_eckd_private *private;
257 	unsigned long flags;
258 	struct alias_server *server;
259 	struct alias_lcu *lcu;
260 	struct dasd_uid uid;
261 
262 	private = (struct dasd_eckd_private *) device->private;
263 	device->discipline->get_uid(device, &uid);
264 	lcu = NULL;
265 	spin_lock_irqsave(&aliastree.lock, flags);
266 	server = _find_server(&uid);
267 	if (server)
268 		lcu = _find_lcu(server, &uid);
269 	spin_unlock_irqrestore(&aliastree.lock, flags);
270 	if (!lcu) {
271 		DBF_EVENT_DEVID(DBF_ERR, device->cdev,
272 				"could not find lcu for %04x %02x",
273 				uid.ssid, uid.real_unit_addr);
274 		WARN_ON(1);
275 		return;
276 	}
277 	complete_all(&lcu->lcu_setup);
278 }
279 
280 void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
281 {
282 	struct dasd_eckd_private *private;
283 	unsigned long flags;
284 	struct alias_server *server;
285 	struct alias_lcu *lcu;
286 	struct dasd_uid uid;
287 
288 	private = (struct dasd_eckd_private *) device->private;
289 	device->discipline->get_uid(device, &uid);
290 	lcu = NULL;
291 	spin_lock_irqsave(&aliastree.lock, flags);
292 	server = _find_server(&uid);
293 	if (server)
294 		lcu = _find_lcu(server, &uid);
295 	spin_unlock_irqrestore(&aliastree.lock, flags);
296 	if (!lcu) {
297 		DBF_EVENT_DEVID(DBF_ERR, device->cdev,
298 				"could not find lcu for %04x %02x",
299 				uid.ssid, uid.real_unit_addr);
300 		WARN_ON(1);
301 		return;
302 	}
303 	wait_for_completion(&lcu->lcu_setup);
304 }
305 
306 /*
307  * This function removes a device from the scope of alias management.
308  * The complicated part is to make sure that it is not in use by
309  * any of the workers. If necessary cancel the work.
310  */
311 void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
312 {
313 	struct dasd_eckd_private *private;
314 	unsigned long flags;
315 	struct alias_lcu *lcu;
316 	struct alias_server *server;
317 	int was_pending;
318 	struct dasd_uid uid;
319 
320 	private = (struct dasd_eckd_private *) device->private;
321 	lcu = private->lcu;
322 	/* nothing to do if already disconnected */
323 	if (!lcu)
324 		return;
325 	device->discipline->get_uid(device, &uid);
326 	spin_lock_irqsave(&lcu->lock, flags);
327 	list_del_init(&device->alias_list);
328 	/* make sure that the workers don't use this device */
329 	if (device == lcu->suc_data.device) {
330 		spin_unlock_irqrestore(&lcu->lock, flags);
331 		cancel_work_sync(&lcu->suc_data.worker);
332 		spin_lock_irqsave(&lcu->lock, flags);
333 		if (device == lcu->suc_data.device)
334 			lcu->suc_data.device = NULL;
335 	}
336 	was_pending = 0;
337 	if (device == lcu->ruac_data.device) {
338 		spin_unlock_irqrestore(&lcu->lock, flags);
339 		was_pending = 1;
340 		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
341 		spin_lock_irqsave(&lcu->lock, flags);
342 		if (device == lcu->ruac_data.device)
343 			lcu->ruac_data.device = NULL;
344 	}
345 	private->lcu = NULL;
346 	spin_unlock_irqrestore(&lcu->lock, flags);
347 
348 	spin_lock_irqsave(&aliastree.lock, flags);
349 	spin_lock(&lcu->lock);
350 	if (list_empty(&lcu->grouplist) &&
351 	    list_empty(&lcu->active_devices) &&
352 	    list_empty(&lcu->inactive_devices)) {
353 		list_del(&lcu->lcu);
354 		spin_unlock(&lcu->lock);
355 		_free_lcu(lcu);
356 		lcu = NULL;
357 	} else {
358 		if (was_pending)
359 			_schedule_lcu_update(lcu, NULL);
360 		spin_unlock(&lcu->lock);
361 	}
362 	server = _find_server(&uid);
363 	if (server && list_empty(&server->lculist)) {
364 		list_del(&server->server);
365 		_free_server(server);
366 	}
367 	spin_unlock_irqrestore(&aliastree.lock, flags);
368 }
369 
370 /*
371  * This function assumes that the unit address configuration stored
372  * in the lcu is up to date and will update the device uid before
373  * adding it to a pav group.
374  */
375 
376 static int _add_device_to_lcu(struct alias_lcu *lcu,
377 			      struct dasd_device *device,
378 			      struct dasd_device *pos)
379 {
380 
381 	struct dasd_eckd_private *private;
382 	struct alias_pav_group *group;
383 	struct dasd_uid uid;
384 	unsigned long flags;
385 
386 	private = (struct dasd_eckd_private *) device->private;
387 
388 	/* only lock if not already locked */
389 	if (device != pos)
390 		spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
391 					 CDEV_NESTED_SECOND);
392 	private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
393 	private->uid.base_unit_addr =
394 		lcu->uac->unit[private->uid.real_unit_addr].base_ua;
395 	uid = private->uid;
396 
397 	if (device != pos)
398 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
399 
400 	/* if we have no PAV anyway, we don't need to bother with PAV groups */
401 	if (lcu->pav == NO_PAV) {
402 		list_move(&device->alias_list, &lcu->active_devices);
403 		return 0;
404 	}
405 
406 	group = _find_group(lcu, &uid);
407 	if (!group) {
408 		group = kzalloc(sizeof(*group), GFP_ATOMIC);
409 		if (!group)
410 			return -ENOMEM;
411 		memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
412 		memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
413 		group->uid.ssid = uid.ssid;
414 		if (uid.type == UA_BASE_DEVICE)
415 			group->uid.base_unit_addr = uid.real_unit_addr;
416 		else
417 			group->uid.base_unit_addr = uid.base_unit_addr;
418 		memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
419 		INIT_LIST_HEAD(&group->group);
420 		INIT_LIST_HEAD(&group->baselist);
421 		INIT_LIST_HEAD(&group->aliaslist);
422 		list_add(&group->group, &lcu->grouplist);
423 	}
424 	if (uid.type == UA_BASE_DEVICE)
425 		list_move(&device->alias_list, &group->baselist);
426 	else
427 		list_move(&device->alias_list, &group->aliaslist);
428 	private->pavgroup = group;
429 	return 0;
430 };
431 
432 static void _remove_device_from_lcu(struct alias_lcu *lcu,
433 				    struct dasd_device *device)
434 {
435 	struct dasd_eckd_private *private;
436 	struct alias_pav_group *group;
437 
438 	private = (struct dasd_eckd_private *) device->private;
439 	list_move(&device->alias_list, &lcu->inactive_devices);
440 	group = private->pavgroup;
441 	if (!group)
442 		return;
443 	private->pavgroup = NULL;
444 	if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
445 		list_del(&group->group);
446 		kfree(group);
447 		return;
448 	}
449 	if (group->next == device)
450 		group->next = NULL;
451 };
452 
453 static int read_unit_address_configuration(struct dasd_device *device,
454 					   struct alias_lcu *lcu)
455 {
456 	struct dasd_psf_prssd_data *prssdp;
457 	struct dasd_ccw_req *cqr;
458 	struct ccw1 *ccw;
459 	int rc;
460 	unsigned long flags;
461 
462 	cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
463 				   (sizeof(struct dasd_psf_prssd_data)),
464 				   device);
465 	if (IS_ERR(cqr))
466 		return PTR_ERR(cqr);
467 	cqr->startdev = device;
468 	cqr->memdev = device;
469 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
470 	cqr->retries = 10;
471 	cqr->expires = 20 * HZ;
472 
473 	/* Prepare for Read Subsystem Data */
474 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
475 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
476 	prssdp->order = PSF_ORDER_PRSSD;
477 	prssdp->suborder = 0x0e;	/* Read unit address configuration */
478 	/* all other bytes of prssdp must be zero */
479 
480 	ccw = cqr->cpaddr;
481 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
482 	ccw->count = sizeof(struct dasd_psf_prssd_data);
483 	ccw->flags |= CCW_FLAG_CC;
484 	ccw->cda = (__u32)(addr_t) prssdp;
485 
486 	/* Read Subsystem Data - feature codes */
487 	memset(lcu->uac, 0, sizeof(*(lcu->uac)));
488 
489 	ccw++;
490 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
491 	ccw->count = sizeof(*(lcu->uac));
492 	ccw->cda = (__u32)(addr_t) lcu->uac;
493 
494 	cqr->buildclk = get_clock();
495 	cqr->status = DASD_CQR_FILLED;
496 
497 	/* need to unset flag here to detect race with summary unit check */
498 	spin_lock_irqsave(&lcu->lock, flags);
499 	lcu->flags &= ~NEED_UAC_UPDATE;
500 	spin_unlock_irqrestore(&lcu->lock, flags);
501 
502 	do {
503 		rc = dasd_sleep_on(cqr);
504 	} while (rc && (cqr->retries > 0));
505 	if (rc) {
506 		spin_lock_irqsave(&lcu->lock, flags);
507 		lcu->flags |= NEED_UAC_UPDATE;
508 		spin_unlock_irqrestore(&lcu->lock, flags);
509 	}
510 	dasd_kfree_request(cqr, cqr->memdev);
511 	return rc;
512 }
513 
514 static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
515 {
516 	unsigned long flags;
517 	struct alias_pav_group *pavgroup, *tempgroup;
518 	struct dasd_device *device, *tempdev;
519 	int i, rc;
520 	struct dasd_eckd_private *private;
521 
522 	spin_lock_irqsave(&lcu->lock, flags);
523 	list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
524 		list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
525 					 alias_list) {
526 			list_move(&device->alias_list, &lcu->active_devices);
527 			private = (struct dasd_eckd_private *) device->private;
528 			private->pavgroup = NULL;
529 		}
530 		list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
531 					 alias_list) {
532 			list_move(&device->alias_list, &lcu->active_devices);
533 			private = (struct dasd_eckd_private *) device->private;
534 			private->pavgroup = NULL;
535 		}
536 		list_del(&pavgroup->group);
537 		kfree(pavgroup);
538 	}
539 	spin_unlock_irqrestore(&lcu->lock, flags);
540 
541 	rc = read_unit_address_configuration(refdev, lcu);
542 	if (rc)
543 		return rc;
544 
545 	/* need to take cdev lock before lcu lock */
546 	spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
547 				 CDEV_NESTED_FIRST);
548 	spin_lock(&lcu->lock);
549 	lcu->pav = NO_PAV;
550 	for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
551 		switch (lcu->uac->unit[i].ua_type) {
552 		case UA_BASE_PAV_ALIAS:
553 			lcu->pav = BASE_PAV;
554 			break;
555 		case UA_HYPER_PAV_ALIAS:
556 			lcu->pav = HYPER_PAV;
557 			break;
558 		}
559 		if (lcu->pav != NO_PAV)
560 			break;
561 	}
562 
563 	list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
564 				 alias_list) {
565 		_add_device_to_lcu(lcu, device, refdev);
566 	}
567 	spin_unlock(&lcu->lock);
568 	spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
569 	return 0;
570 }
571 
572 static void lcu_update_work(struct work_struct *work)
573 {
574 	struct alias_lcu *lcu;
575 	struct read_uac_work_data *ruac_data;
576 	struct dasd_device *device;
577 	unsigned long flags;
578 	int rc;
579 
580 	ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
581 	lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
582 	device = ruac_data->device;
583 	rc = _lcu_update(device, lcu);
584 	/*
585 	 * Need to check flags again, as there could have been another
586 	 * prepare_update or a new device a new device while we were still
587 	 * processing the data
588 	 */
589 	spin_lock_irqsave(&lcu->lock, flags);
590 	if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
591 		DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
592 			    " alias data in lcu (rc = %d), retry later", rc);
593 		schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
594 	} else {
595 		lcu->ruac_data.device = NULL;
596 		lcu->flags &= ~UPDATE_PENDING;
597 	}
598 	spin_unlock_irqrestore(&lcu->lock, flags);
599 }
600 
601 static int _schedule_lcu_update(struct alias_lcu *lcu,
602 				struct dasd_device *device)
603 {
604 	struct dasd_device *usedev = NULL;
605 	struct alias_pav_group *group;
606 
607 	lcu->flags |= NEED_UAC_UPDATE;
608 	if (lcu->ruac_data.device) {
609 		/* already scheduled or running */
610 		return 0;
611 	}
612 	if (device && !list_empty(&device->alias_list))
613 		usedev = device;
614 
615 	if (!usedev && !list_empty(&lcu->grouplist)) {
616 		group = list_first_entry(&lcu->grouplist,
617 					 struct alias_pav_group, group);
618 		if (!list_empty(&group->baselist))
619 			usedev = list_first_entry(&group->baselist,
620 						  struct dasd_device,
621 						  alias_list);
622 		else if (!list_empty(&group->aliaslist))
623 			usedev = list_first_entry(&group->aliaslist,
624 						  struct dasd_device,
625 						  alias_list);
626 	}
627 	if (!usedev && !list_empty(&lcu->active_devices)) {
628 		usedev = list_first_entry(&lcu->active_devices,
629 					  struct dasd_device, alias_list);
630 	}
631 	/*
632 	 * if we haven't found a proper device yet, give up for now, the next
633 	 * device that will be set active will trigger an lcu update
634 	 */
635 	if (!usedev)
636 		return -EINVAL;
637 	lcu->ruac_data.device = usedev;
638 	schedule_delayed_work(&lcu->ruac_data.dwork, 0);
639 	return 0;
640 }
641 
642 int dasd_alias_add_device(struct dasd_device *device)
643 {
644 	struct dasd_eckd_private *private;
645 	struct alias_lcu *lcu;
646 	unsigned long flags;
647 	int rc;
648 
649 	private = (struct dasd_eckd_private *) device->private;
650 	lcu = private->lcu;
651 	rc = 0;
652 
653 	/* need to take cdev lock before lcu lock */
654 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
655 	spin_lock(&lcu->lock);
656 	if (!(lcu->flags & UPDATE_PENDING)) {
657 		rc = _add_device_to_lcu(lcu, device, device);
658 		if (rc)
659 			lcu->flags |= UPDATE_PENDING;
660 	}
661 	if (lcu->flags & UPDATE_PENDING) {
662 		list_move(&device->alias_list, &lcu->active_devices);
663 		_schedule_lcu_update(lcu, device);
664 	}
665 	spin_unlock(&lcu->lock);
666 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
667 	return rc;
668 }
669 
670 int dasd_alias_update_add_device(struct dasd_device *device)
671 {
672 	struct dasd_eckd_private *private;
673 	private = (struct dasd_eckd_private *) device->private;
674 	private->lcu->flags |= UPDATE_PENDING;
675 	return dasd_alias_add_device(device);
676 }
677 
678 int dasd_alias_remove_device(struct dasd_device *device)
679 {
680 	struct dasd_eckd_private *private;
681 	struct alias_lcu *lcu;
682 	unsigned long flags;
683 
684 	private = (struct dasd_eckd_private *) device->private;
685 	lcu = private->lcu;
686 	/* nothing to do if already removed */
687 	if (!lcu)
688 		return 0;
689 	spin_lock_irqsave(&lcu->lock, flags);
690 	_remove_device_from_lcu(lcu, device);
691 	spin_unlock_irqrestore(&lcu->lock, flags);
692 	return 0;
693 }
694 
695 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
696 {
697 
698 	struct dasd_device *alias_device;
699 	struct alias_pav_group *group;
700 	struct alias_lcu *lcu;
701 	struct dasd_eckd_private *private, *alias_priv;
702 	unsigned long flags;
703 
704 	private = (struct dasd_eckd_private *) base_device->private;
705 	group = private->pavgroup;
706 	lcu = private->lcu;
707 	if (!group || !lcu)
708 		return NULL;
709 	if (lcu->pav == NO_PAV ||
710 	    lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
711 		return NULL;
712 
713 	spin_lock_irqsave(&lcu->lock, flags);
714 	alias_device = group->next;
715 	if (!alias_device) {
716 		if (list_empty(&group->aliaslist)) {
717 			spin_unlock_irqrestore(&lcu->lock, flags);
718 			return NULL;
719 		} else {
720 			alias_device = list_first_entry(&group->aliaslist,
721 							struct dasd_device,
722 							alias_list);
723 		}
724 	}
725 	if (list_is_last(&alias_device->alias_list, &group->aliaslist))
726 		group->next = list_first_entry(&group->aliaslist,
727 					       struct dasd_device, alias_list);
728 	else
729 		group->next = list_first_entry(&alias_device->alias_list,
730 					       struct dasd_device, alias_list);
731 	spin_unlock_irqrestore(&lcu->lock, flags);
732 	alias_priv = (struct dasd_eckd_private *) alias_device->private;
733 	if ((alias_priv->count < private->count) && !alias_device->stopped)
734 		return alias_device;
735 	else
736 		return NULL;
737 }
738 
739 /*
740  * Summary unit check handling depends on the way alias devices
741  * are handled so it is done here rather then in dasd_eckd.c
742  */
743 static int reset_summary_unit_check(struct alias_lcu *lcu,
744 				    struct dasd_device *device,
745 				    char reason)
746 {
747 	struct dasd_ccw_req *cqr;
748 	int rc = 0;
749 	struct ccw1 *ccw;
750 
751 	cqr = lcu->rsu_cqr;
752 	strncpy((char *) &cqr->magic, "ECKD", 4);
753 	ASCEBC((char *) &cqr->magic, 4);
754 	ccw = cqr->cpaddr;
755 	ccw->cmd_code = DASD_ECKD_CCW_RSCK;
756 	ccw->flags = 0 ;
757 	ccw->count = 16;
758 	ccw->cda = (__u32)(addr_t) cqr->data;
759 	((char *)cqr->data)[0] = reason;
760 
761 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
762 	cqr->retries = 255;	/* set retry counter to enable basic ERP */
763 	cqr->startdev = device;
764 	cqr->memdev = device;
765 	cqr->block = NULL;
766 	cqr->expires = 5 * HZ;
767 	cqr->buildclk = get_clock();
768 	cqr->status = DASD_CQR_FILLED;
769 
770 	rc = dasd_sleep_on_immediatly(cqr);
771 	return rc;
772 }
773 
774 static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
775 {
776 	struct alias_pav_group *pavgroup;
777 	struct dasd_device *device;
778 	struct dasd_eckd_private *private;
779 	unsigned long flags;
780 
781 	/* active and inactive list can contain alias as well as base devices */
782 	list_for_each_entry(device, &lcu->active_devices, alias_list) {
783 		private = (struct dasd_eckd_private *) device->private;
784 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
785 		if (private->uid.type != UA_BASE_DEVICE) {
786 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
787 					       flags);
788 			continue;
789 		}
790 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
791 		dasd_schedule_block_bh(device->block);
792 		dasd_schedule_device_bh(device);
793 	}
794 	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
795 		private = (struct dasd_eckd_private *) device->private;
796 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
797 		if (private->uid.type != UA_BASE_DEVICE) {
798 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
799 					       flags);
800 			continue;
801 		}
802 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
803 		dasd_schedule_block_bh(device->block);
804 		dasd_schedule_device_bh(device);
805 	}
806 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
807 		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
808 			dasd_schedule_block_bh(device->block);
809 			dasd_schedule_device_bh(device);
810 		}
811 	}
812 }
813 
814 static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
815 {
816 	struct alias_pav_group *pavgroup;
817 	struct dasd_device *device, *temp;
818 	struct dasd_eckd_private *private;
819 	int rc;
820 	unsigned long flags;
821 	LIST_HEAD(active);
822 
823 	/*
824 	 * Problem here ist that dasd_flush_device_queue may wait
825 	 * for termination of a request to complete. We can't keep
826 	 * the lcu lock during that time, so we must assume that
827 	 * the lists may have changed.
828 	 * Idea: first gather all active alias devices in a separate list,
829 	 * then flush the first element of this list unlocked, and afterwards
830 	 * check if it is still on the list before moving it to the
831 	 * active_devices list.
832 	 */
833 
834 	spin_lock_irqsave(&lcu->lock, flags);
835 	list_for_each_entry_safe(device, temp, &lcu->active_devices,
836 				 alias_list) {
837 		private = (struct dasd_eckd_private *) device->private;
838 		if (private->uid.type == UA_BASE_DEVICE)
839 			continue;
840 		list_move(&device->alias_list, &active);
841 	}
842 
843 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
844 		list_splice_init(&pavgroup->aliaslist, &active);
845 	}
846 	while (!list_empty(&active)) {
847 		device = list_first_entry(&active, struct dasd_device,
848 					  alias_list);
849 		spin_unlock_irqrestore(&lcu->lock, flags);
850 		rc = dasd_flush_device_queue(device);
851 		spin_lock_irqsave(&lcu->lock, flags);
852 		/*
853 		 * only move device around if it wasn't moved away while we
854 		 * were waiting for the flush
855 		 */
856 		if (device == list_first_entry(&active,
857 					       struct dasd_device, alias_list))
858 			list_move(&device->alias_list, &lcu->active_devices);
859 	}
860 	spin_unlock_irqrestore(&lcu->lock, flags);
861 }
862 
863 static void __stop_device_on_lcu(struct dasd_device *device,
864 				 struct dasd_device *pos)
865 {
866 	/* If pos == device then device is already locked! */
867 	if (pos == device) {
868 		dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
869 		return;
870 	}
871 	spin_lock(get_ccwdev_lock(pos->cdev));
872 	dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
873 	spin_unlock(get_ccwdev_lock(pos->cdev));
874 }
875 
876 /*
877  * This function is called in interrupt context, so the
878  * cdev lock for device is already locked!
879  */
880 static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
881 				     struct dasd_device *device)
882 {
883 	struct alias_pav_group *pavgroup;
884 	struct dasd_device *pos;
885 
886 	list_for_each_entry(pos, &lcu->active_devices, alias_list)
887 		__stop_device_on_lcu(device, pos);
888 	list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
889 		__stop_device_on_lcu(device, pos);
890 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
891 		list_for_each_entry(pos, &pavgroup->baselist, alias_list)
892 			__stop_device_on_lcu(device, pos);
893 		list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
894 			__stop_device_on_lcu(device, pos);
895 	}
896 }
897 
898 static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
899 {
900 	struct alias_pav_group *pavgroup;
901 	struct dasd_device *device;
902 	unsigned long flags;
903 
904 	list_for_each_entry(device, &lcu->active_devices, alias_list) {
905 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
906 		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
907 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
908 	}
909 
910 	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
911 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
912 		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
913 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
914 	}
915 
916 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
917 		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
918 			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
919 			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
920 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
921 					       flags);
922 		}
923 		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
924 			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
925 			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
926 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
927 					       flags);
928 		}
929 	}
930 }
931 
932 static void summary_unit_check_handling_work(struct work_struct *work)
933 {
934 	struct alias_lcu *lcu;
935 	struct summary_unit_check_work_data *suc_data;
936 	unsigned long flags;
937 	struct dasd_device *device;
938 
939 	suc_data = container_of(work, struct summary_unit_check_work_data,
940 				worker);
941 	lcu = container_of(suc_data, struct alias_lcu, suc_data);
942 	device = suc_data->device;
943 
944 	/* 1. flush alias devices */
945 	flush_all_alias_devices_on_lcu(lcu);
946 
947 	/* 2. reset summary unit check */
948 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
949 	dasd_device_remove_stop_bits(device,
950 				     (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
951 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
952 	reset_summary_unit_check(lcu, device, suc_data->reason);
953 
954 	spin_lock_irqsave(&lcu->lock, flags);
955 	_unstop_all_devices_on_lcu(lcu);
956 	_restart_all_base_devices_on_lcu(lcu);
957 	/* 3. read new alias configuration */
958 	_schedule_lcu_update(lcu, device);
959 	lcu->suc_data.device = NULL;
960 	spin_unlock_irqrestore(&lcu->lock, flags);
961 }
962 
963 /*
964  * note: this will be called from int handler context (cdev locked)
965  */
966 void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
967 					  struct irb *irb)
968 {
969 	struct alias_lcu *lcu;
970 	char reason;
971 	struct dasd_eckd_private *private;
972 	char *sense;
973 
974 	private = (struct dasd_eckd_private *) device->private;
975 
976 	sense = dasd_get_sense(irb);
977 	if (sense) {
978 		reason = sense[8];
979 		DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
980 			    "eckd handle summary unit check: reason", reason);
981 	} else {
982 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
983 			    "eckd handle summary unit check:"
984 			    " no reason code available");
985 		return;
986 	}
987 
988 	lcu = private->lcu;
989 	if (!lcu) {
990 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
991 			    "device not ready to handle summary"
992 			    " unit check (no lcu structure)");
993 		return;
994 	}
995 	spin_lock(&lcu->lock);
996 	_stop_all_devices_on_lcu(lcu, device);
997 	/* prepare for lcu_update */
998 	private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
999 	/* If this device is about to be removed just return and wait for
1000 	 * the next interrupt on a different device
1001 	 */
1002 	if (list_empty(&device->alias_list)) {
1003 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1004 			    "device is in offline processing,"
1005 			    " don't do summary unit check handling");
1006 		spin_unlock(&lcu->lock);
1007 		return;
1008 	}
1009 	if (lcu->suc_data.device) {
1010 		/* already scheduled or running */
1011 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1012 			    "previous instance of summary unit check worker"
1013 			    " still pending");
1014 		spin_unlock(&lcu->lock);
1015 		return ;
1016 	}
1017 	lcu->suc_data.reason = reason;
1018 	lcu->suc_data.device = device;
1019 	spin_unlock(&lcu->lock);
1020 	schedule_work(&lcu->suc_data.worker);
1021 };
1022