xref: /openbmc/linux/drivers/s390/block/dasd_alias.c (revision 7dd65feb)
1 /*
2  * PAV alias management for the DASD ECKD discipline
3  *
4  * Copyright IBM Corporation, 2007
5  * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6  */
7 
8 #define KMSG_COMPONENT "dasd-eckd"
9 
10 #include <linux/list.h>
11 #include <asm/ebcdic.h>
12 #include "dasd_int.h"
13 #include "dasd_eckd.h"
14 
15 #ifdef PRINTK_HEADER
16 #undef PRINTK_HEADER
17 #endif				/* PRINTK_HEADER */
18 #define PRINTK_HEADER "dasd(eckd):"
19 
20 
21 /*
22  * General concept of alias management:
23  * - PAV and DASD alias management is specific to the eckd discipline.
24  * - A device is connected to an lcu as long as the device exists.
25  *   dasd_alias_make_device_known_to_lcu will be called wenn the
26  *   device is checked by the eckd discipline and
27  *   dasd_alias_disconnect_device_from_lcu will be called
28  *   before the device is deleted.
29  * - The dasd_alias_add_device / dasd_alias_remove_device
30  *   functions mark the point when a device is 'ready for service'.
31  * - A summary unit check is a rare occasion, but it is mandatory to
32  *   support it. It requires some complex recovery actions before the
33  *   devices can be used again (see dasd_alias_handle_summary_unit_check).
34  * - dasd_alias_get_start_dev will find an alias device that can be used
35  *   instead of the base device and does some (very simple) load balancing.
36  *   This is the function that gets called for each I/O, so when improving
37  *   something, this function should get faster or better, the rest has just
38  *   to be correct.
39  */
40 
41 
42 static void summary_unit_check_handling_work(struct work_struct *);
43 static void lcu_update_work(struct work_struct *);
44 static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
45 
46 static struct alias_root aliastree = {
47 	.serverlist = LIST_HEAD_INIT(aliastree.serverlist),
48 	.lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
49 };
50 
51 static struct alias_server *_find_server(struct dasd_uid *uid)
52 {
53 	struct alias_server *pos;
54 	list_for_each_entry(pos, &aliastree.serverlist, server) {
55 		if (!strncmp(pos->uid.vendor, uid->vendor,
56 			     sizeof(uid->vendor))
57 		    && !strncmp(pos->uid.serial, uid->serial,
58 				sizeof(uid->serial)))
59 			return pos;
60 	};
61 	return NULL;
62 }
63 
64 static struct alias_lcu *_find_lcu(struct alias_server *server,
65 				   struct dasd_uid *uid)
66 {
67 	struct alias_lcu *pos;
68 	list_for_each_entry(pos, &server->lculist, lcu) {
69 		if (pos->uid.ssid == uid->ssid)
70 			return pos;
71 	};
72 	return NULL;
73 }
74 
75 static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
76 					   struct dasd_uid *uid)
77 {
78 	struct alias_pav_group *pos;
79 	__u8 search_unit_addr;
80 
81 	/* for hyper pav there is only one group */
82 	if (lcu->pav == HYPER_PAV) {
83 		if (list_empty(&lcu->grouplist))
84 			return NULL;
85 		else
86 			return list_first_entry(&lcu->grouplist,
87 						struct alias_pav_group, group);
88 	}
89 
90 	/* for base pav we have to find the group that matches the base */
91 	if (uid->type == UA_BASE_DEVICE)
92 		search_unit_addr = uid->real_unit_addr;
93 	else
94 		search_unit_addr = uid->base_unit_addr;
95 	list_for_each_entry(pos, &lcu->grouplist, group) {
96 		if (pos->uid.base_unit_addr == search_unit_addr &&
97 		    !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
98 			return pos;
99 	};
100 	return NULL;
101 }
102 
103 static struct alias_server *_allocate_server(struct dasd_uid *uid)
104 {
105 	struct alias_server *server;
106 
107 	server = kzalloc(sizeof(*server), GFP_KERNEL);
108 	if (!server)
109 		return ERR_PTR(-ENOMEM);
110 	memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
111 	memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
112 	INIT_LIST_HEAD(&server->server);
113 	INIT_LIST_HEAD(&server->lculist);
114 	return server;
115 }
116 
117 static void _free_server(struct alias_server *server)
118 {
119 	kfree(server);
120 }
121 
122 static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
123 {
124 	struct alias_lcu *lcu;
125 
126 	lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
127 	if (!lcu)
128 		return ERR_PTR(-ENOMEM);
129 	lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
130 	if (!lcu->uac)
131 		goto out_err1;
132 	lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
133 	if (!lcu->rsu_cqr)
134 		goto out_err2;
135 	lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
136 				       GFP_KERNEL | GFP_DMA);
137 	if (!lcu->rsu_cqr->cpaddr)
138 		goto out_err3;
139 	lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
140 	if (!lcu->rsu_cqr->data)
141 		goto out_err4;
142 
143 	memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
144 	memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
145 	lcu->uid.ssid = uid->ssid;
146 	lcu->pav = NO_PAV;
147 	lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
148 	INIT_LIST_HEAD(&lcu->lcu);
149 	INIT_LIST_HEAD(&lcu->inactive_devices);
150 	INIT_LIST_HEAD(&lcu->active_devices);
151 	INIT_LIST_HEAD(&lcu->grouplist);
152 	INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
153 	INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
154 	spin_lock_init(&lcu->lock);
155 	init_completion(&lcu->lcu_setup);
156 	return lcu;
157 
158 out_err4:
159 	kfree(lcu->rsu_cqr->cpaddr);
160 out_err3:
161 	kfree(lcu->rsu_cqr);
162 out_err2:
163 	kfree(lcu->uac);
164 out_err1:
165 	kfree(lcu);
166 	return ERR_PTR(-ENOMEM);
167 }
168 
169 static void _free_lcu(struct alias_lcu *lcu)
170 {
171 	kfree(lcu->rsu_cqr->data);
172 	kfree(lcu->rsu_cqr->cpaddr);
173 	kfree(lcu->rsu_cqr);
174 	kfree(lcu->uac);
175 	kfree(lcu);
176 }
177 
178 /*
179  * This is the function that will allocate all the server and lcu data,
180  * so this function must be called first for a new device.
181  * If the return value is 1, the lcu was already known before, if it
182  * is 0, this is a new lcu.
183  * Negative return code indicates that something went wrong (e.g. -ENOMEM)
184  */
185 int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
186 {
187 	struct dasd_eckd_private *private;
188 	unsigned long flags;
189 	struct alias_server *server, *newserver;
190 	struct alias_lcu *lcu, *newlcu;
191 	int is_lcu_known;
192 	struct dasd_uid *uid;
193 
194 	private = (struct dasd_eckd_private *) device->private;
195 	uid = &private->uid;
196 	spin_lock_irqsave(&aliastree.lock, flags);
197 	is_lcu_known = 1;
198 	server = _find_server(uid);
199 	if (!server) {
200 		spin_unlock_irqrestore(&aliastree.lock, flags);
201 		newserver = _allocate_server(uid);
202 		if (IS_ERR(newserver))
203 			return PTR_ERR(newserver);
204 		spin_lock_irqsave(&aliastree.lock, flags);
205 		server = _find_server(uid);
206 		if (!server) {
207 			list_add(&newserver->server, &aliastree.serverlist);
208 			server = newserver;
209 			is_lcu_known = 0;
210 		} else {
211 			/* someone was faster */
212 			_free_server(newserver);
213 		}
214 	}
215 
216 	lcu = _find_lcu(server, uid);
217 	if (!lcu) {
218 		spin_unlock_irqrestore(&aliastree.lock, flags);
219 		newlcu = _allocate_lcu(uid);
220 		if (IS_ERR(newlcu))
221 			return PTR_ERR(newlcu);
222 		spin_lock_irqsave(&aliastree.lock, flags);
223 		lcu = _find_lcu(server, uid);
224 		if (!lcu) {
225 			list_add(&newlcu->lcu, &server->lculist);
226 			lcu = newlcu;
227 			is_lcu_known = 0;
228 		} else {
229 			/* someone was faster */
230 			_free_lcu(newlcu);
231 		}
232 		is_lcu_known = 0;
233 	}
234 	spin_lock(&lcu->lock);
235 	list_add(&device->alias_list, &lcu->inactive_devices);
236 	private->lcu = lcu;
237 	spin_unlock(&lcu->lock);
238 	spin_unlock_irqrestore(&aliastree.lock, flags);
239 
240 	return is_lcu_known;
241 }
242 
243 /*
244  * The first device to be registered on an LCU will have to do
245  * some additional setup steps to configure that LCU on the
246  * storage server. All further devices should wait with their
247  * initialization until the first device is done.
248  * To synchronize this work, the first device will call
249  * dasd_alias_lcu_setup_complete when it is done, and all
250  * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
251  */
252 void dasd_alias_lcu_setup_complete(struct dasd_device *device)
253 {
254 	struct dasd_eckd_private *private;
255 	unsigned long flags;
256 	struct alias_server *server;
257 	struct alias_lcu *lcu;
258 	struct dasd_uid *uid;
259 
260 	private = (struct dasd_eckd_private *) device->private;
261 	uid = &private->uid;
262 	lcu = NULL;
263 	spin_lock_irqsave(&aliastree.lock, flags);
264 	server = _find_server(uid);
265 	if (server)
266 		lcu = _find_lcu(server, uid);
267 	spin_unlock_irqrestore(&aliastree.lock, flags);
268 	if (!lcu) {
269 		DBF_EVENT_DEVID(DBF_ERR, device->cdev,
270 				"could not find lcu for %04x %02x",
271 				uid->ssid, uid->real_unit_addr);
272 		WARN_ON(1);
273 		return;
274 	}
275 	complete_all(&lcu->lcu_setup);
276 }
277 
278 void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
279 {
280 	struct dasd_eckd_private *private;
281 	unsigned long flags;
282 	struct alias_server *server;
283 	struct alias_lcu *lcu;
284 	struct dasd_uid *uid;
285 
286 	private = (struct dasd_eckd_private *) device->private;
287 	uid = &private->uid;
288 	lcu = NULL;
289 	spin_lock_irqsave(&aliastree.lock, flags);
290 	server = _find_server(uid);
291 	if (server)
292 		lcu = _find_lcu(server, uid);
293 	spin_unlock_irqrestore(&aliastree.lock, flags);
294 	if (!lcu) {
295 		DBF_EVENT_DEVID(DBF_ERR, device->cdev,
296 				"could not find lcu for %04x %02x",
297 				uid->ssid, uid->real_unit_addr);
298 		WARN_ON(1);
299 		return;
300 	}
301 	wait_for_completion(&lcu->lcu_setup);
302 }
303 
304 /*
305  * This function removes a device from the scope of alias management.
306  * The complicated part is to make sure that it is not in use by
307  * any of the workers. If necessary cancel the work.
308  */
309 void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
310 {
311 	struct dasd_eckd_private *private;
312 	unsigned long flags;
313 	struct alias_lcu *lcu;
314 	struct alias_server *server;
315 	int was_pending;
316 
317 	private = (struct dasd_eckd_private *) device->private;
318 	lcu = private->lcu;
319 	spin_lock_irqsave(&lcu->lock, flags);
320 	list_del_init(&device->alias_list);
321 	/* make sure that the workers don't use this device */
322 	if (device == lcu->suc_data.device) {
323 		spin_unlock_irqrestore(&lcu->lock, flags);
324 		cancel_work_sync(&lcu->suc_data.worker);
325 		spin_lock_irqsave(&lcu->lock, flags);
326 		if (device == lcu->suc_data.device)
327 			lcu->suc_data.device = NULL;
328 	}
329 	was_pending = 0;
330 	if (device == lcu->ruac_data.device) {
331 		spin_unlock_irqrestore(&lcu->lock, flags);
332 		was_pending = 1;
333 		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
334 		spin_lock_irqsave(&lcu->lock, flags);
335 		if (device == lcu->ruac_data.device)
336 			lcu->ruac_data.device = NULL;
337 	}
338 	private->lcu = NULL;
339 	spin_unlock_irqrestore(&lcu->lock, flags);
340 
341 	spin_lock_irqsave(&aliastree.lock, flags);
342 	spin_lock(&lcu->lock);
343 	if (list_empty(&lcu->grouplist) &&
344 	    list_empty(&lcu->active_devices) &&
345 	    list_empty(&lcu->inactive_devices)) {
346 		list_del(&lcu->lcu);
347 		spin_unlock(&lcu->lock);
348 		_free_lcu(lcu);
349 		lcu = NULL;
350 	} else {
351 		if (was_pending)
352 			_schedule_lcu_update(lcu, NULL);
353 		spin_unlock(&lcu->lock);
354 	}
355 	server = _find_server(&private->uid);
356 	if (server && list_empty(&server->lculist)) {
357 		list_del(&server->server);
358 		_free_server(server);
359 	}
360 	spin_unlock_irqrestore(&aliastree.lock, flags);
361 }
362 
363 /*
364  * This function assumes that the unit address configuration stored
365  * in the lcu is up to date and will update the device uid before
366  * adding it to a pav group.
367  */
368 static int _add_device_to_lcu(struct alias_lcu *lcu,
369 			      struct dasd_device *device)
370 {
371 
372 	struct dasd_eckd_private *private;
373 	struct alias_pav_group *group;
374 	struct dasd_uid *uid;
375 
376 	private = (struct dasd_eckd_private *) device->private;
377 	uid = &private->uid;
378 	uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
379 	uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
380 	dasd_set_uid(device->cdev, &private->uid);
381 
382 	/* if we have no PAV anyway, we don't need to bother with PAV groups */
383 	if (lcu->pav == NO_PAV) {
384 		list_move(&device->alias_list, &lcu->active_devices);
385 		return 0;
386 	}
387 
388 	group = _find_group(lcu, uid);
389 	if (!group) {
390 		group = kzalloc(sizeof(*group), GFP_ATOMIC);
391 		if (!group)
392 			return -ENOMEM;
393 		memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
394 		memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
395 		group->uid.ssid = uid->ssid;
396 		if (uid->type == UA_BASE_DEVICE)
397 			group->uid.base_unit_addr = uid->real_unit_addr;
398 		else
399 			group->uid.base_unit_addr = uid->base_unit_addr;
400 		memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit));
401 		INIT_LIST_HEAD(&group->group);
402 		INIT_LIST_HEAD(&group->baselist);
403 		INIT_LIST_HEAD(&group->aliaslist);
404 		list_add(&group->group, &lcu->grouplist);
405 	}
406 	if (uid->type == UA_BASE_DEVICE)
407 		list_move(&device->alias_list, &group->baselist);
408 	else
409 		list_move(&device->alias_list, &group->aliaslist);
410 	private->pavgroup = group;
411 	return 0;
412 };
413 
414 static void _remove_device_from_lcu(struct alias_lcu *lcu,
415 				    struct dasd_device *device)
416 {
417 	struct dasd_eckd_private *private;
418 	struct alias_pav_group *group;
419 
420 	private = (struct dasd_eckd_private *) device->private;
421 	list_move(&device->alias_list, &lcu->inactive_devices);
422 	group = private->pavgroup;
423 	if (!group)
424 		return;
425 	private->pavgroup = NULL;
426 	if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
427 		list_del(&group->group);
428 		kfree(group);
429 		return;
430 	}
431 	if (group->next == device)
432 		group->next = NULL;
433 };
434 
435 static int read_unit_address_configuration(struct dasd_device *device,
436 					   struct alias_lcu *lcu)
437 {
438 	struct dasd_psf_prssd_data *prssdp;
439 	struct dasd_ccw_req *cqr;
440 	struct ccw1 *ccw;
441 	int rc;
442 	unsigned long flags;
443 
444 	cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
445 				   (sizeof(struct dasd_psf_prssd_data)),
446 				   device);
447 	if (IS_ERR(cqr))
448 		return PTR_ERR(cqr);
449 	cqr->startdev = device;
450 	cqr->memdev = device;
451 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
452 	cqr->retries = 10;
453 	cqr->expires = 20 * HZ;
454 
455 	/* Prepare for Read Subsystem Data */
456 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
457 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
458 	prssdp->order = PSF_ORDER_PRSSD;
459 	prssdp->suborder = 0x0e;	/* Read unit address configuration */
460 	/* all other bytes of prssdp must be zero */
461 
462 	ccw = cqr->cpaddr;
463 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
464 	ccw->count = sizeof(struct dasd_psf_prssd_data);
465 	ccw->flags |= CCW_FLAG_CC;
466 	ccw->cda = (__u32)(addr_t) prssdp;
467 
468 	/* Read Subsystem Data - feature codes */
469 	memset(lcu->uac, 0, sizeof(*(lcu->uac)));
470 
471 	ccw++;
472 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
473 	ccw->count = sizeof(*(lcu->uac));
474 	ccw->cda = (__u32)(addr_t) lcu->uac;
475 
476 	cqr->buildclk = get_clock();
477 	cqr->status = DASD_CQR_FILLED;
478 
479 	/* need to unset flag here to detect race with summary unit check */
480 	spin_lock_irqsave(&lcu->lock, flags);
481 	lcu->flags &= ~NEED_UAC_UPDATE;
482 	spin_unlock_irqrestore(&lcu->lock, flags);
483 
484 	do {
485 		rc = dasd_sleep_on(cqr);
486 	} while (rc && (cqr->retries > 0));
487 	if (rc) {
488 		spin_lock_irqsave(&lcu->lock, flags);
489 		lcu->flags |= NEED_UAC_UPDATE;
490 		spin_unlock_irqrestore(&lcu->lock, flags);
491 	}
492 	dasd_kfree_request(cqr, cqr->memdev);
493 	return rc;
494 }
495 
496 static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
497 {
498 	unsigned long flags;
499 	struct alias_pav_group *pavgroup, *tempgroup;
500 	struct dasd_device *device, *tempdev;
501 	int i, rc;
502 	struct dasd_eckd_private *private;
503 
504 	spin_lock_irqsave(&lcu->lock, flags);
505 	list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
506 		list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
507 					 alias_list) {
508 			list_move(&device->alias_list, &lcu->active_devices);
509 			private = (struct dasd_eckd_private *) device->private;
510 			private->pavgroup = NULL;
511 		}
512 		list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
513 					 alias_list) {
514 			list_move(&device->alias_list, &lcu->active_devices);
515 			private = (struct dasd_eckd_private *) device->private;
516 			private->pavgroup = NULL;
517 		}
518 		list_del(&pavgroup->group);
519 		kfree(pavgroup);
520 	}
521 	spin_unlock_irqrestore(&lcu->lock, flags);
522 
523 	rc = read_unit_address_configuration(refdev, lcu);
524 	if (rc)
525 		return rc;
526 
527 	spin_lock_irqsave(&lcu->lock, flags);
528 	lcu->pav = NO_PAV;
529 	for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
530 		switch (lcu->uac->unit[i].ua_type) {
531 		case UA_BASE_PAV_ALIAS:
532 			lcu->pav = BASE_PAV;
533 			break;
534 		case UA_HYPER_PAV_ALIAS:
535 			lcu->pav = HYPER_PAV;
536 			break;
537 		}
538 		if (lcu->pav != NO_PAV)
539 			break;
540 	}
541 
542 	list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
543 				 alias_list) {
544 		_add_device_to_lcu(lcu, device);
545 	}
546 	spin_unlock_irqrestore(&lcu->lock, flags);
547 	return 0;
548 }
549 
550 static void lcu_update_work(struct work_struct *work)
551 {
552 	struct alias_lcu *lcu;
553 	struct read_uac_work_data *ruac_data;
554 	struct dasd_device *device;
555 	unsigned long flags;
556 	int rc;
557 
558 	ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
559 	lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
560 	device = ruac_data->device;
561 	rc = _lcu_update(device, lcu);
562 	/*
563 	 * Need to check flags again, as there could have been another
564 	 * prepare_update or a new device a new device while we were still
565 	 * processing the data
566 	 */
567 	spin_lock_irqsave(&lcu->lock, flags);
568 	if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
569 		DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
570 			    " alias data in lcu (rc = %d), retry later", rc);
571 		schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
572 	} else {
573 		lcu->ruac_data.device = NULL;
574 		lcu->flags &= ~UPDATE_PENDING;
575 	}
576 	spin_unlock_irqrestore(&lcu->lock, flags);
577 }
578 
579 static int _schedule_lcu_update(struct alias_lcu *lcu,
580 				struct dasd_device *device)
581 {
582 	struct dasd_device *usedev = NULL;
583 	struct alias_pav_group *group;
584 
585 	lcu->flags |= NEED_UAC_UPDATE;
586 	if (lcu->ruac_data.device) {
587 		/* already scheduled or running */
588 		return 0;
589 	}
590 	if (device && !list_empty(&device->alias_list))
591 		usedev = device;
592 
593 	if (!usedev && !list_empty(&lcu->grouplist)) {
594 		group = list_first_entry(&lcu->grouplist,
595 					 struct alias_pav_group, group);
596 		if (!list_empty(&group->baselist))
597 			usedev = list_first_entry(&group->baselist,
598 						  struct dasd_device,
599 						  alias_list);
600 		else if (!list_empty(&group->aliaslist))
601 			usedev = list_first_entry(&group->aliaslist,
602 						  struct dasd_device,
603 						  alias_list);
604 	}
605 	if (!usedev && !list_empty(&lcu->active_devices)) {
606 		usedev = list_first_entry(&lcu->active_devices,
607 					  struct dasd_device, alias_list);
608 	}
609 	/*
610 	 * if we haven't found a proper device yet, give up for now, the next
611 	 * device that will be set active will trigger an lcu update
612 	 */
613 	if (!usedev)
614 		return -EINVAL;
615 	lcu->ruac_data.device = usedev;
616 	schedule_delayed_work(&lcu->ruac_data.dwork, 0);
617 	return 0;
618 }
619 
620 int dasd_alias_add_device(struct dasd_device *device)
621 {
622 	struct dasd_eckd_private *private;
623 	struct alias_lcu *lcu;
624 	unsigned long flags;
625 	int rc;
626 
627 	private = (struct dasd_eckd_private *) device->private;
628 	lcu = private->lcu;
629 	rc = 0;
630 	spin_lock_irqsave(&lcu->lock, flags);
631 	if (!(lcu->flags & UPDATE_PENDING)) {
632 		rc = _add_device_to_lcu(lcu, device);
633 		if (rc)
634 			lcu->flags |= UPDATE_PENDING;
635 	}
636 	if (lcu->flags & UPDATE_PENDING) {
637 		list_move(&device->alias_list, &lcu->active_devices);
638 		_schedule_lcu_update(lcu, device);
639 	}
640 	spin_unlock_irqrestore(&lcu->lock, flags);
641 	return rc;
642 }
643 
644 int dasd_alias_remove_device(struct dasd_device *device)
645 {
646 	struct dasd_eckd_private *private;
647 	struct alias_lcu *lcu;
648 	unsigned long flags;
649 
650 	private = (struct dasd_eckd_private *) device->private;
651 	lcu = private->lcu;
652 	spin_lock_irqsave(&lcu->lock, flags);
653 	_remove_device_from_lcu(lcu, device);
654 	spin_unlock_irqrestore(&lcu->lock, flags);
655 	return 0;
656 }
657 
658 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
659 {
660 
661 	struct dasd_device *alias_device;
662 	struct alias_pav_group *group;
663 	struct alias_lcu *lcu;
664 	struct dasd_eckd_private *private, *alias_priv;
665 	unsigned long flags;
666 
667 	private = (struct dasd_eckd_private *) base_device->private;
668 	group = private->pavgroup;
669 	lcu = private->lcu;
670 	if (!group || !lcu)
671 		return NULL;
672 	if (lcu->pav == NO_PAV ||
673 	    lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
674 		return NULL;
675 
676 	spin_lock_irqsave(&lcu->lock, flags);
677 	alias_device = group->next;
678 	if (!alias_device) {
679 		if (list_empty(&group->aliaslist)) {
680 			spin_unlock_irqrestore(&lcu->lock, flags);
681 			return NULL;
682 		} else {
683 			alias_device = list_first_entry(&group->aliaslist,
684 							struct dasd_device,
685 							alias_list);
686 		}
687 	}
688 	if (list_is_last(&alias_device->alias_list, &group->aliaslist))
689 		group->next = list_first_entry(&group->aliaslist,
690 					       struct dasd_device, alias_list);
691 	else
692 		group->next = list_first_entry(&alias_device->alias_list,
693 					       struct dasd_device, alias_list);
694 	spin_unlock_irqrestore(&lcu->lock, flags);
695 	alias_priv = (struct dasd_eckd_private *) alias_device->private;
696 	if ((alias_priv->count < private->count) && !alias_device->stopped)
697 		return alias_device;
698 	else
699 		return NULL;
700 }
701 
702 /*
703  * Summary unit check handling depends on the way alias devices
704  * are handled so it is done here rather then in dasd_eckd.c
705  */
706 static int reset_summary_unit_check(struct alias_lcu *lcu,
707 				    struct dasd_device *device,
708 				    char reason)
709 {
710 	struct dasd_ccw_req *cqr;
711 	int rc = 0;
712 	struct ccw1 *ccw;
713 
714 	cqr = lcu->rsu_cqr;
715 	strncpy((char *) &cqr->magic, "ECKD", 4);
716 	ASCEBC((char *) &cqr->magic, 4);
717 	ccw = cqr->cpaddr;
718 	ccw->cmd_code = DASD_ECKD_CCW_RSCK;
719 	ccw->flags = 0 ;
720 	ccw->count = 16;
721 	ccw->cda = (__u32)(addr_t) cqr->data;
722 	((char *)cqr->data)[0] = reason;
723 
724 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
725 	cqr->retries = 255;	/* set retry counter to enable basic ERP */
726 	cqr->startdev = device;
727 	cqr->memdev = device;
728 	cqr->block = NULL;
729 	cqr->expires = 5 * HZ;
730 	cqr->buildclk = get_clock();
731 	cqr->status = DASD_CQR_FILLED;
732 
733 	rc = dasd_sleep_on_immediatly(cqr);
734 	return rc;
735 }
736 
737 static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
738 {
739 	struct alias_pav_group *pavgroup;
740 	struct dasd_device *device;
741 	struct dasd_eckd_private *private;
742 
743 	/* active and inactive list can contain alias as well as base devices */
744 	list_for_each_entry(device, &lcu->active_devices, alias_list) {
745 		private = (struct dasd_eckd_private *) device->private;
746 		if (private->uid.type != UA_BASE_DEVICE)
747 			continue;
748 		dasd_schedule_block_bh(device->block);
749 		dasd_schedule_device_bh(device);
750 	}
751 	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
752 		private = (struct dasd_eckd_private *) device->private;
753 		if (private->uid.type != UA_BASE_DEVICE)
754 			continue;
755 		dasd_schedule_block_bh(device->block);
756 		dasd_schedule_device_bh(device);
757 	}
758 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
759 		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
760 			dasd_schedule_block_bh(device->block);
761 			dasd_schedule_device_bh(device);
762 		}
763 	}
764 }
765 
766 static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
767 {
768 	struct alias_pav_group *pavgroup;
769 	struct dasd_device *device, *temp;
770 	struct dasd_eckd_private *private;
771 	int rc;
772 	unsigned long flags;
773 	LIST_HEAD(active);
774 
775 	/*
776 	 * Problem here ist that dasd_flush_device_queue may wait
777 	 * for termination of a request to complete. We can't keep
778 	 * the lcu lock during that time, so we must assume that
779 	 * the lists may have changed.
780 	 * Idea: first gather all active alias devices in a separate list,
781 	 * then flush the first element of this list unlocked, and afterwards
782 	 * check if it is still on the list before moving it to the
783 	 * active_devices list.
784 	 */
785 
786 	spin_lock_irqsave(&lcu->lock, flags);
787 	list_for_each_entry_safe(device, temp, &lcu->active_devices,
788 				 alias_list) {
789 		private = (struct dasd_eckd_private *) device->private;
790 		if (private->uid.type == UA_BASE_DEVICE)
791 			continue;
792 		list_move(&device->alias_list, &active);
793 	}
794 
795 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
796 		list_splice_init(&pavgroup->aliaslist, &active);
797 	}
798 	while (!list_empty(&active)) {
799 		device = list_first_entry(&active, struct dasd_device,
800 					  alias_list);
801 		spin_unlock_irqrestore(&lcu->lock, flags);
802 		rc = dasd_flush_device_queue(device);
803 		spin_lock_irqsave(&lcu->lock, flags);
804 		/*
805 		 * only move device around if it wasn't moved away while we
806 		 * were waiting for the flush
807 		 */
808 		if (device == list_first_entry(&active,
809 					       struct dasd_device, alias_list))
810 			list_move(&device->alias_list, &lcu->active_devices);
811 	}
812 	spin_unlock_irqrestore(&lcu->lock, flags);
813 }
814 
815 static void __stop_device_on_lcu(struct dasd_device *device,
816 				 struct dasd_device *pos)
817 {
818 	/* If pos == device then device is already locked! */
819 	if (pos == device) {
820 		dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
821 		return;
822 	}
823 	spin_lock(get_ccwdev_lock(pos->cdev));
824 	dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
825 	spin_unlock(get_ccwdev_lock(pos->cdev));
826 }
827 
828 /*
829  * This function is called in interrupt context, so the
830  * cdev lock for device is already locked!
831  */
832 static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
833 				     struct dasd_device *device)
834 {
835 	struct alias_pav_group *pavgroup;
836 	struct dasd_device *pos;
837 
838 	list_for_each_entry(pos, &lcu->active_devices, alias_list)
839 		__stop_device_on_lcu(device, pos);
840 	list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
841 		__stop_device_on_lcu(device, pos);
842 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
843 		list_for_each_entry(pos, &pavgroup->baselist, alias_list)
844 			__stop_device_on_lcu(device, pos);
845 		list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
846 			__stop_device_on_lcu(device, pos);
847 	}
848 }
849 
850 static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
851 {
852 	struct alias_pav_group *pavgroup;
853 	struct dasd_device *device;
854 	unsigned long flags;
855 
856 	list_for_each_entry(device, &lcu->active_devices, alias_list) {
857 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
858 		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
859 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
860 	}
861 
862 	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
863 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
864 		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
865 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
866 	}
867 
868 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
869 		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
870 			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
871 			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
872 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
873 					       flags);
874 		}
875 		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
876 			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
877 			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
878 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
879 					       flags);
880 		}
881 	}
882 }
883 
884 static void summary_unit_check_handling_work(struct work_struct *work)
885 {
886 	struct alias_lcu *lcu;
887 	struct summary_unit_check_work_data *suc_data;
888 	unsigned long flags;
889 	struct dasd_device *device;
890 
891 	suc_data = container_of(work, struct summary_unit_check_work_data,
892 				worker);
893 	lcu = container_of(suc_data, struct alias_lcu, suc_data);
894 	device = suc_data->device;
895 
896 	/* 1. flush alias devices */
897 	flush_all_alias_devices_on_lcu(lcu);
898 
899 	/* 2. reset summary unit check */
900 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
901 	dasd_device_remove_stop_bits(device,
902 				     (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
903 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
904 	reset_summary_unit_check(lcu, device, suc_data->reason);
905 
906 	spin_lock_irqsave(&lcu->lock, flags);
907 	_unstop_all_devices_on_lcu(lcu);
908 	_restart_all_base_devices_on_lcu(lcu);
909 	/* 3. read new alias configuration */
910 	_schedule_lcu_update(lcu, device);
911 	lcu->suc_data.device = NULL;
912 	spin_unlock_irqrestore(&lcu->lock, flags);
913 }
914 
915 /*
916  * note: this will be called from int handler context (cdev locked)
917  */
918 void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
919 					  struct irb *irb)
920 {
921 	struct alias_lcu *lcu;
922 	char reason;
923 	struct dasd_eckd_private *private;
924 	char *sense;
925 
926 	private = (struct dasd_eckd_private *) device->private;
927 
928 	sense = dasd_get_sense(irb);
929 	if (sense) {
930 		reason = sense[8];
931 		DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
932 			    "eckd handle summary unit check: reason", reason);
933 	} else {
934 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
935 			    "eckd handle summary unit check:"
936 			    " no reason code available");
937 		return;
938 	}
939 
940 	lcu = private->lcu;
941 	if (!lcu) {
942 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
943 			    "device not ready to handle summary"
944 			    " unit check (no lcu structure)");
945 		return;
946 	}
947 	spin_lock(&lcu->lock);
948 	_stop_all_devices_on_lcu(lcu, device);
949 	/* prepare for lcu_update */
950 	private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
951 	/* If this device is about to be removed just return and wait for
952 	 * the next interrupt on a different device
953 	 */
954 	if (list_empty(&device->alias_list)) {
955 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
956 			    "device is in offline processing,"
957 			    " don't do summary unit check handling");
958 		spin_unlock(&lcu->lock);
959 		return;
960 	}
961 	if (lcu->suc_data.device) {
962 		/* already scheduled or running */
963 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
964 			    "previous instance of summary unit check worker"
965 			    " still pending");
966 		spin_unlock(&lcu->lock);
967 		return ;
968 	}
969 	lcu->suc_data.reason = reason;
970 	lcu->suc_data.device = device;
971 	spin_unlock(&lcu->lock);
972 	schedule_work(&lcu->suc_data.worker);
973 };
974