Lines Matching refs:device

88 	struct dasd_device *device;  in dasd_alloc_device()  local
90 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); in dasd_alloc_device()
91 if (!device) in dasd_alloc_device()
95 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); in dasd_alloc_device()
96 if (!device->ccw_mem) { in dasd_alloc_device()
97 kfree(device); in dasd_alloc_device()
101 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); in dasd_alloc_device()
102 if (!device->erp_mem) { in dasd_alloc_device()
103 free_pages((unsigned long) device->ccw_mem, 1); in dasd_alloc_device()
104 kfree(device); in dasd_alloc_device()
108 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); in dasd_alloc_device()
109 if (!device->ese_mem) { in dasd_alloc_device()
110 free_page((unsigned long) device->erp_mem); in dasd_alloc_device()
111 free_pages((unsigned long) device->ccw_mem, 1); in dasd_alloc_device()
112 kfree(device); in dasd_alloc_device()
116 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); in dasd_alloc_device()
117 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); in dasd_alloc_device()
118 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); in dasd_alloc_device()
119 spin_lock_init(&device->mem_lock); in dasd_alloc_device()
120 atomic_set(&device->tasklet_scheduled, 0); in dasd_alloc_device()
121 tasklet_init(&device->tasklet, dasd_device_tasklet, in dasd_alloc_device()
122 (unsigned long) device); in dasd_alloc_device()
123 INIT_LIST_HEAD(&device->ccw_queue); in dasd_alloc_device()
124 timer_setup(&device->timer, dasd_device_timeout, 0); in dasd_alloc_device()
125 INIT_WORK(&device->kick_work, do_kick_device); in dasd_alloc_device()
126 INIT_WORK(&device->reload_device, do_reload_device); in dasd_alloc_device()
127 INIT_WORK(&device->requeue_requests, do_requeue_requests); in dasd_alloc_device()
128 device->state = DASD_STATE_NEW; in dasd_alloc_device()
129 device->target = DASD_STATE_NEW; in dasd_alloc_device()
130 mutex_init(&device->state_mutex); in dasd_alloc_device()
131 spin_lock_init(&device->profile.lock); in dasd_alloc_device()
132 return device; in dasd_alloc_device()
138 void dasd_free_device(struct dasd_device *device) in dasd_free_device() argument
140 kfree(device->private); in dasd_free_device()
141 free_pages((unsigned long) device->ese_mem, 1); in dasd_free_device()
142 free_page((unsigned long) device->erp_mem); in dasd_free_device()
143 free_pages((unsigned long) device->ccw_mem, 1); in dasd_free_device()
144 kfree(device); in dasd_free_device()
186 static int dasd_state_new_to_known(struct dasd_device *device) in dasd_state_new_to_known() argument
192 dasd_get_device(device); in dasd_state_new_to_known()
193 device->state = DASD_STATE_KNOWN; in dasd_state_new_to_known()
200 static int dasd_state_known_to_new(struct dasd_device *device) in dasd_state_known_to_new() argument
203 dasd_eer_disable(device); in dasd_state_known_to_new()
204 device->state = DASD_STATE_NEW; in dasd_state_known_to_new()
207 dasd_put_device(device); in dasd_state_known_to_new()
227 static int dasd_state_known_to_basic(struct dasd_device *device) in dasd_state_known_to_basic() argument
229 struct dasd_block *block = device->block; in dasd_state_known_to_basic()
242 dasd_profile_on(&device->block->profile); in dasd_state_known_to_basic()
244 device->debugfs_dentry = in dasd_state_known_to_basic()
245 dasd_debugfs_setup(dev_name(&device->cdev->dev), in dasd_state_known_to_basic()
247 dasd_profile_init(&device->profile, device->debugfs_dentry); in dasd_state_known_to_basic()
248 dasd_hosts_init(device->debugfs_dentry, device); in dasd_state_known_to_basic()
251 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, in dasd_state_known_to_basic()
253 debug_register_view(device->debug_area, &debug_sprintf_view); in dasd_state_known_to_basic()
254 debug_set_level(device->debug_area, DBF_WARNING); in dasd_state_known_to_basic()
255 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); in dasd_state_known_to_basic()
257 device->state = DASD_STATE_BASIC; in dasd_state_known_to_basic()
265 static int dasd_state_basic_to_known(struct dasd_device *device) in dasd_state_basic_to_known() argument
269 if (device->discipline->basic_to_known) { in dasd_state_basic_to_known()
270 rc = device->discipline->basic_to_known(device); in dasd_state_basic_to_known()
275 if (device->block) { in dasd_state_basic_to_known()
276 dasd_profile_exit(&device->block->profile); in dasd_state_basic_to_known()
277 debugfs_remove(device->block->debugfs_dentry); in dasd_state_basic_to_known()
278 dasd_gendisk_free(device->block); in dasd_state_basic_to_known()
279 dasd_block_clear_timer(device->block); in dasd_state_basic_to_known()
281 rc = dasd_flush_device_queue(device); in dasd_state_basic_to_known()
284 dasd_device_clear_timer(device); in dasd_state_basic_to_known()
285 dasd_profile_exit(&device->profile); in dasd_state_basic_to_known()
286 dasd_hosts_exit(device); in dasd_state_basic_to_known()
287 debugfs_remove(device->debugfs_dentry); in dasd_state_basic_to_known()
288 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); in dasd_state_basic_to_known()
289 if (device->debug_area != NULL) { in dasd_state_basic_to_known()
290 debug_unregister(device->debug_area); in dasd_state_basic_to_known()
291 device->debug_area = NULL; in dasd_state_basic_to_known()
293 device->state = DASD_STATE_KNOWN; in dasd_state_basic_to_known()
311 static int dasd_state_basic_to_ready(struct dasd_device *device) in dasd_state_basic_to_ready() argument
318 block = device->block; in dasd_state_basic_to_ready()
325 device->state = DASD_STATE_UNFMT; in dasd_state_basic_to_ready()
326 disk = device->block->gdp; in dasd_state_basic_to_ready()
333 if (device->discipline->setup_blk_queue) in dasd_state_basic_to_ready()
334 device->discipline->setup_blk_queue(block); in dasd_state_basic_to_ready()
337 device->state = DASD_STATE_READY; in dasd_state_basic_to_ready()
340 device->state = DASD_STATE_BASIC; in dasd_state_basic_to_ready()
344 device->state = DASD_STATE_READY; in dasd_state_basic_to_ready()
347 if (device->discipline->basic_to_ready) in dasd_state_basic_to_ready()
348 rc = device->discipline->basic_to_ready(device); in dasd_state_basic_to_ready()
353 int _wait_for_empty_queues(struct dasd_device *device) in _wait_for_empty_queues() argument
355 if (device->block) in _wait_for_empty_queues()
356 return list_empty(&device->ccw_queue) && in _wait_for_empty_queues()
357 list_empty(&device->block->ccw_queue); in _wait_for_empty_queues()
359 return list_empty(&device->ccw_queue); in _wait_for_empty_queues()
367 static int dasd_state_ready_to_basic(struct dasd_device *device) in dasd_state_ready_to_basic() argument
371 device->state = DASD_STATE_BASIC; in dasd_state_ready_to_basic()
372 if (device->block) { in dasd_state_ready_to_basic()
373 struct dasd_block *block = device->block; in dasd_state_ready_to_basic()
376 device->state = DASD_STATE_READY; in dasd_state_ready_to_basic()
390 static int dasd_state_unfmt_to_basic(struct dasd_device *device) in dasd_state_unfmt_to_basic() argument
392 device->state = DASD_STATE_BASIC; in dasd_state_unfmt_to_basic()
402 dasd_state_ready_to_online(struct dasd_device * device) in dasd_state_ready_to_online() argument
404 device->state = DASD_STATE_ONLINE; in dasd_state_ready_to_online()
405 if (device->block) { in dasd_state_ready_to_online()
406 dasd_schedule_block_bh(device->block); in dasd_state_ready_to_online()
407 if ((device->features & DASD_FEATURE_USERAW)) { in dasd_state_ready_to_online()
408 kobject_uevent(&disk_to_dev(device->block->gdp)->kobj, in dasd_state_ready_to_online()
412 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); in dasd_state_ready_to_online()
420 static int dasd_state_online_to_ready(struct dasd_device *device) in dasd_state_online_to_ready() argument
424 if (device->discipline->online_to_ready) { in dasd_state_online_to_ready()
425 rc = device->discipline->online_to_ready(device); in dasd_state_online_to_ready()
430 device->state = DASD_STATE_READY; in dasd_state_online_to_ready()
431 if (device->block && !(device->features & DASD_FEATURE_USERAW)) in dasd_state_online_to_ready()
432 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); in dasd_state_online_to_ready()
439 static int dasd_increase_state(struct dasd_device *device) in dasd_increase_state() argument
444 if (device->state == DASD_STATE_NEW && in dasd_increase_state()
445 device->target >= DASD_STATE_KNOWN) in dasd_increase_state()
446 rc = dasd_state_new_to_known(device); in dasd_increase_state()
449 device->state == DASD_STATE_KNOWN && in dasd_increase_state()
450 device->target >= DASD_STATE_BASIC) in dasd_increase_state()
451 rc = dasd_state_known_to_basic(device); in dasd_increase_state()
454 device->state == DASD_STATE_BASIC && in dasd_increase_state()
455 device->target >= DASD_STATE_READY) in dasd_increase_state()
456 rc = dasd_state_basic_to_ready(device); in dasd_increase_state()
459 device->state == DASD_STATE_UNFMT && in dasd_increase_state()
460 device->target > DASD_STATE_UNFMT) in dasd_increase_state()
464 device->state == DASD_STATE_READY && in dasd_increase_state()
465 device->target >= DASD_STATE_ONLINE) in dasd_increase_state()
466 rc = dasd_state_ready_to_online(device); in dasd_increase_state()
474 static int dasd_decrease_state(struct dasd_device *device) in dasd_decrease_state() argument
479 if (device->state == DASD_STATE_ONLINE && in dasd_decrease_state()
480 device->target <= DASD_STATE_READY) in dasd_decrease_state()
481 rc = dasd_state_online_to_ready(device); in dasd_decrease_state()
484 device->state == DASD_STATE_READY && in dasd_decrease_state()
485 device->target <= DASD_STATE_BASIC) in dasd_decrease_state()
486 rc = dasd_state_ready_to_basic(device); in dasd_decrease_state()
489 device->state == DASD_STATE_UNFMT && in dasd_decrease_state()
490 device->target <= DASD_STATE_BASIC) in dasd_decrease_state()
491 rc = dasd_state_unfmt_to_basic(device); in dasd_decrease_state()
494 device->state == DASD_STATE_BASIC && in dasd_decrease_state()
495 device->target <= DASD_STATE_KNOWN) in dasd_decrease_state()
496 rc = dasd_state_basic_to_known(device); in dasd_decrease_state()
499 device->state == DASD_STATE_KNOWN && in dasd_decrease_state()
500 device->target <= DASD_STATE_NEW) in dasd_decrease_state()
501 rc = dasd_state_known_to_new(device); in dasd_decrease_state()
509 static void dasd_change_state(struct dasd_device *device) in dasd_change_state() argument
513 if (device->state == device->target) in dasd_change_state()
516 if (device->state < device->target) in dasd_change_state()
517 rc = dasd_increase_state(device); in dasd_change_state()
519 rc = dasd_decrease_state(device); in dasd_change_state()
523 device->target = device->state; in dasd_change_state()
526 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); in dasd_change_state()
528 if (device->state == device->target) in dasd_change_state()
540 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); in do_kick_device() local
541 mutex_lock(&device->state_mutex); in do_kick_device()
542 dasd_change_state(device); in do_kick_device()
543 mutex_unlock(&device->state_mutex); in do_kick_device()
544 dasd_schedule_device_bh(device); in do_kick_device()
545 dasd_put_device(device); in do_kick_device()
548 void dasd_kick_device(struct dasd_device *device) in dasd_kick_device() argument
550 dasd_get_device(device); in dasd_kick_device()
552 if (!schedule_work(&device->kick_work)) in dasd_kick_device()
553 dasd_put_device(device); in dasd_kick_device()
563 struct dasd_device *device = container_of(work, struct dasd_device, in do_reload_device() local
565 device->discipline->reload(device); in do_reload_device()
566 dasd_put_device(device); in do_reload_device()
569 void dasd_reload_device(struct dasd_device *device) in dasd_reload_device() argument
571 dasd_get_device(device); in dasd_reload_device()
573 if (!schedule_work(&device->reload_device)) in dasd_reload_device()
574 dasd_put_device(device); in dasd_reload_device()
581 void dasd_set_target_state(struct dasd_device *device, int target) in dasd_set_target_state() argument
583 dasd_get_device(device); in dasd_set_target_state()
584 mutex_lock(&device->state_mutex); in dasd_set_target_state()
588 if (device->target != target) { in dasd_set_target_state()
589 if (device->state == target) in dasd_set_target_state()
591 device->target = target; in dasd_set_target_state()
593 if (device->state != device->target) in dasd_set_target_state()
594 dasd_change_state(device); in dasd_set_target_state()
595 mutex_unlock(&device->state_mutex); in dasd_set_target_state()
596 dasd_put_device(device); in dasd_set_target_state()
602 static inline int _wait_for_device(struct dasd_device *device) in _wait_for_device() argument
604 return (device->state == device->target); in _wait_for_device()
607 void dasd_enable_device(struct dasd_device *device) in dasd_enable_device() argument
609 dasd_set_target_state(device, DASD_STATE_ONLINE); in dasd_enable_device()
610 if (device->state <= DASD_STATE_KNOWN) in dasd_enable_device()
612 dasd_set_target_state(device, DASD_STATE_NEW); in dasd_enable_device()
614 wait_event(dasd_init_waitq, _wait_for_device(device)); in dasd_enable_device()
616 dasd_reload_device(device); in dasd_enable_device()
617 if (device->discipline->kick_validate) in dasd_enable_device()
618 device->discipline->kick_validate(device); in dasd_enable_device()
643 struct dasd_device *device; in dasd_profile_start() local
673 device = cqr->startdev; in dasd_profile_start()
674 if (!device->profile.data) in dasd_profile_start()
677 spin_lock(get_ccwdev_lock(device->cdev)); in dasd_profile_start()
679 list_for_each(l, &device->ccw_queue) in dasd_profile_start()
682 spin_unlock(get_ccwdev_lock(device->cdev)); in dasd_profile_start()
684 spin_lock(&device->profile.lock); in dasd_profile_start()
685 device->profile.data->dasd_io_nr_req[counter]++; in dasd_profile_start()
687 device->profile.data->dasd_read_nr_req[counter]++; in dasd_profile_start()
688 spin_unlock(&device->profile.lock); in dasd_profile_start()
755 struct dasd_device *device; in dasd_profile_end() local
760 device = cqr->startdev; in dasd_profile_end()
763 device->profile.data)) in dasd_profile_end()
822 spin_lock(&device->profile.lock); in dasd_profile_end()
823 if (device->profile.data) { in dasd_profile_end()
824 data = device->profile.data; in dasd_profile_end()
829 dasd_profile_end_add_data(device->profile.data, in dasd_profile_end()
838 spin_unlock(&device->profile.lock); in dasd_profile_end()
1130 struct dasd_device *device; in dasd_hosts_show() local
1133 device = m->private; in dasd_hosts_show()
1134 dasd_get_device(device); in dasd_hosts_show()
1136 if (device->discipline->hosts_print) in dasd_hosts_show()
1137 rc = device->discipline->hosts_print(device, m); in dasd_hosts_show()
1139 dasd_put_device(device); in dasd_hosts_show()
1145 static void dasd_hosts_exit(struct dasd_device *device) in dasd_hosts_exit() argument
1147 debugfs_remove(device->hosts_dentry); in dasd_hosts_exit()
1148 device->hosts_dentry = NULL; in dasd_hosts_exit()
1152 struct dasd_device *device) in dasd_hosts_init() argument
1162 device, &dasd_hosts_fops); in dasd_hosts_init()
1164 device->hosts_dentry = pde; in dasd_hosts_init()
1168 struct dasd_device *device, in dasd_smalloc_request() argument
1182 spin_lock_irqsave(&device->mem_lock, flags); in dasd_smalloc_request()
1183 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); in dasd_smalloc_request()
1184 spin_unlock_irqrestore(&device->mem_lock, flags); in dasd_smalloc_request()
1204 dasd_get_device(device); in dasd_smalloc_request()
1211 struct dasd_device *device) in dasd_fmalloc_request() argument
1225 spin_lock_irqsave(&device->mem_lock, flags); in dasd_fmalloc_request()
1226 cqr = dasd_alloc_chunk(&device->ese_chunks, size); in dasd_fmalloc_request()
1227 spin_unlock_irqrestore(&device->mem_lock, flags); in dasd_fmalloc_request()
1246 dasd_get_device(device); in dasd_fmalloc_request()
1252 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) in dasd_sfree_request() argument
1256 spin_lock_irqsave(&device->mem_lock, flags); in dasd_sfree_request()
1257 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); in dasd_sfree_request()
1258 spin_unlock_irqrestore(&device->mem_lock, flags); in dasd_sfree_request()
1259 dasd_put_device(device); in dasd_sfree_request()
1263 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) in dasd_ffree_request() argument
1267 spin_lock_irqsave(&device->mem_lock, flags); in dasd_ffree_request()
1268 dasd_free_chunk(&device->ese_chunks, cqr); in dasd_ffree_request()
1269 spin_unlock_irqrestore(&device->mem_lock, flags); in dasd_ffree_request()
1270 dasd_put_device(device); in dasd_ffree_request()
1279 struct dasd_device *device; in dasd_check_cqr() local
1283 device = cqr->startdev; in dasd_check_cqr()
1284 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { in dasd_check_cqr()
1285 DBF_DEV_EVENT(DBF_WARNING, device, in dasd_check_cqr()
1289 *(unsigned int *) device->discipline->name); in dasd_check_cqr()
1303 struct dasd_device *device; in dasd_term_IO() local
1312 device = (struct dasd_device *) cqr->startdev; in dasd_term_IO()
1314 rc = ccw_device_clear(device->cdev, (long) cqr); in dasd_term_IO()
1320 DBF_DEV_EVENT(DBF_DEBUG, device, in dasd_term_IO()
1325 DBF_DEV_EVENT(DBF_ERR, device, "%s", in dasd_term_IO()
1338 DBF_DEV_EVENT(DBF_ERR, device, "%s", in dasd_term_IO()
1346 dev_err(&device->cdev->dev, "An error occurred in the " in dasd_term_IO()
1353 dasd_schedule_device_bh(device); in dasd_term_IO()
1364 struct dasd_device *device; in dasd_start_IO() local
1374 device = (struct dasd_device *) cqr->startdev; in dasd_start_IO()
1377 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && in dasd_start_IO()
1379 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " in dasd_start_IO()
1388 dev_err(&device->cdev->dev, "An error occurred in the DASD " in dasd_start_IO()
1397 cqr->lpm &= dasd_path_get_opm(device); in dasd_start_IO()
1399 cqr->lpm = dasd_path_get_opm(device); in dasd_start_IO()
1409 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, in dasd_start_IO()
1412 rc = ccw_device_start(device->cdev, cqr->cpaddr, in dasd_start_IO()
1420 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_start_IO()
1432 DBF_DEV_EVENT(DBF_WARNING, device, in dasd_start_IO()
1435 } else if (cqr->lpm != dasd_path_get_opm(device)) { in dasd_start_IO()
1436 cqr->lpm = dasd_path_get_opm(device); in dasd_start_IO()
1437 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", in dasd_start_IO()
1441 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_start_IO()
1444 dasd_generic_last_path_gone(device); in dasd_start_IO()
1445 dasd_path_no_path(device); in dasd_start_IO()
1446 dasd_path_set_tbvpm(device, in dasd_start_IO()
1448 device->cdev)); in dasd_start_IO()
1452 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_start_IO()
1455 dasd_handle_autoquiesce(device, cqr, DASD_EER_STARTIO); in dasd_start_IO()
1458 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_start_IO()
1462 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_start_IO()
1469 dev_err(&device->cdev->dev, in dasd_start_IO()
1491 struct dasd_device *device; in dasd_device_timeout() local
1493 device = from_timer(device, t, timer); in dasd_device_timeout()
1494 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); in dasd_device_timeout()
1496 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); in dasd_device_timeout()
1497 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); in dasd_device_timeout()
1498 dasd_schedule_device_bh(device); in dasd_device_timeout()
1504 void dasd_device_set_timer(struct dasd_device *device, int expires) in dasd_device_set_timer() argument
1507 del_timer(&device->timer); in dasd_device_set_timer()
1509 mod_timer(&device->timer, jiffies + expires); in dasd_device_set_timer()
1516 void dasd_device_clear_timer(struct dasd_device *device) in dasd_device_clear_timer() argument
1518 del_timer(&device->timer); in dasd_device_clear_timer()
1526 struct dasd_device *device; in dasd_handle_killed_request() local
1538 device = dasd_device_from_cdev_locked(cdev); in dasd_handle_killed_request()
1539 if (IS_ERR(device)) { in dasd_handle_killed_request()
1546 device != cqr->startdev || in dasd_handle_killed_request()
1551 dasd_put_device(device); in dasd_handle_killed_request()
1558 dasd_device_clear_timer(device); in dasd_handle_killed_request()
1559 dasd_schedule_device_bh(device); in dasd_handle_killed_request()
1560 dasd_put_device(device); in dasd_handle_killed_request()
1563 void dasd_generic_handle_state_change(struct dasd_device *device) in dasd_generic_handle_state_change() argument
1566 dasd_eer_snss(device); in dasd_generic_handle_state_change()
1568 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); in dasd_generic_handle_state_change()
1569 dasd_schedule_device_bh(device); in dasd_generic_handle_state_change()
1570 if (device->block) { in dasd_generic_handle_state_change()
1571 dasd_schedule_block_bh(device->block); in dasd_generic_handle_state_change()
1572 if (device->block->gdp) in dasd_generic_handle_state_change()
1573 blk_mq_run_hw_queues(device->block->gdp->queue, true); in dasd_generic_handle_state_change()
1587 struct dasd_device *device = NULL; in dasd_ese_needs_format() local
1592 device = block->base; in dasd_ese_needs_format()
1593 if (!device || !device->discipline->is_ese) in dasd_ese_needs_format()
1595 if (!device->discipline->is_ese(device)) in dasd_ese_needs_format()
1628 struct dasd_device *device; in dasd_int_handler() local
1641 device = cqr->startdev; in dasd_int_handler()
1643 dasd_device_clear_timer(device); in dasd_int_handler()
1645 dasd_schedule_device_bh(device); in dasd_int_handler()
1669 device = dasd_device_from_cdev_locked(cdev); in dasd_int_handler()
1670 if (IS_ERR(device)) in dasd_int_handler()
1673 if (device->discipline == dasd_diag_discipline_pointer) { in dasd_int_handler()
1674 dasd_put_device(device); in dasd_int_handler()
1697 dasd_generic_space_exhaust(device, cqr); in dasd_int_handler()
1698 device->discipline->ext_pool_exhaust(device, cqr); in dasd_int_handler()
1699 dasd_put_device(device); in dasd_int_handler()
1704 device->discipline->dump_sense_dbf(device, irb, "int"); in dasd_int_handler()
1706 if (device->features & DASD_FEATURE_ERPLOG) in dasd_int_handler()
1707 device->discipline->dump_sense(device, cqr, irb); in dasd_int_handler()
1708 device->discipline->check_for_device_change(device, cqr, irb); in dasd_int_handler()
1709 dasd_put_device(device); in dasd_int_handler()
1714 device = dasd_device_from_cdev_locked(cdev); in dasd_int_handler()
1715 if (!IS_ERR(device)) { in dasd_int_handler()
1716 device->discipline->check_attention(device, in dasd_int_handler()
1718 dasd_put_device(device); in dasd_int_handler()
1725 device = (struct dasd_device *) cqr->startdev; in dasd_int_handler()
1726 if (!device || in dasd_int_handler()
1727 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { in dasd_int_handler()
1740 device->discipline->ese_read(cqr, irb); in dasd_int_handler()
1743 dasd_device_clear_timer(device); in dasd_int_handler()
1744 dasd_schedule_device_bh(device); in dasd_int_handler()
1747 fcqr = device->discipline->ese_format(device, cqr, irb); in dasd_int_handler()
1758 dasd_schedule_device_bh(device); in dasd_int_handler()
1763 list_add(&fcqr->devlist, &device->ccw_queue); in dasd_int_handler()
1764 dasd_schedule_device_bh(device); in dasd_int_handler()
1773 dasd_device_clear_timer(device); in dasd_int_handler()
1775 dasd_schedule_device_bh(device); in dasd_int_handler()
1781 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " in dasd_int_handler()
1794 if (cqr->devlist.next != &device->ccw_queue) { in dasd_int_handler()
1804 device->discipline->handle_hpf_error) in dasd_int_handler()
1805 device->discipline->handle_hpf_error(device, irb); in dasd_int_handler()
1812 if (cqr->lpm == dasd_path_get_opm(device)) in dasd_int_handler()
1813 DBF_DEV_EVENT(DBF_DEBUG, device, in dasd_int_handler()
1818 cqr->lpm = dasd_path_get_opm(device); in dasd_int_handler()
1825 (!device->stopped)) { in dasd_int_handler()
1826 if (device->discipline->start_IO(next) == 0) in dasd_int_handler()
1830 dasd_device_set_timer(device, expires); in dasd_int_handler()
1832 dasd_device_clear_timer(device); in dasd_int_handler()
1833 dasd_schedule_device_bh(device); in dasd_int_handler()
1839 struct dasd_device *device; in dasd_generic_uc_handler() local
1841 device = dasd_device_from_cdev_locked(cdev); in dasd_generic_uc_handler()
1843 if (IS_ERR(device)) in dasd_generic_uc_handler()
1845 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || in dasd_generic_uc_handler()
1846 device->state != device->target || in dasd_generic_uc_handler()
1847 !device->discipline->check_for_device_change){ in dasd_generic_uc_handler()
1848 dasd_put_device(device); in dasd_generic_uc_handler()
1851 if (device->discipline->dump_sense_dbf) in dasd_generic_uc_handler()
1852 device->discipline->dump_sense_dbf(device, irb, "uc"); in dasd_generic_uc_handler()
1853 device->discipline->check_for_device_change(device, NULL, irb); in dasd_generic_uc_handler()
1854 dasd_put_device(device); in dasd_generic_uc_handler()
1864 static void __dasd_device_recovery(struct dasd_device *device, in __dasd_device_recovery() argument
1876 list_for_each_safe(l, n, &device->ccw_queue) { in __dasd_device_recovery()
1889 static void __dasd_device_process_ccw_queue(struct dasd_device *device, in __dasd_device_process_ccw_queue() argument
1896 list_for_each_safe(l, n, &device->ccw_queue) { in __dasd_device_process_ccw_queue()
1905 __dasd_device_recovery(device, cqr); in __dasd_device_process_ccw_queue()
1912 static void __dasd_process_cqr(struct dasd_device *device, in __dasd_process_cqr() argument
1930 dev_err(&device->cdev->dev, in __dasd_process_cqr()
1943 static void __dasd_device_process_final_queue(struct dasd_device *device, in __dasd_device_process_final_queue() argument
1955 __dasd_process_cqr(device, cqr); in __dasd_device_process_final_queue()
1958 __dasd_process_cqr(device, cqr); in __dasd_device_process_final_queue()
1967 static void __dasd_device_check_autoquiesce_timeout(struct dasd_device *device, in __dasd_device_check_autoquiesce_timeout() argument
1970 if ((device->default_retries - cqr->retries) >= device->aq_timeouts) in __dasd_device_check_autoquiesce_timeout()
1971 dasd_handle_autoquiesce(device, cqr, DASD_EER_TIMEOUTS); in __dasd_device_check_autoquiesce_timeout()
1978 static void __dasd_device_check_expire(struct dasd_device *device) in __dasd_device_check_expire() argument
1982 if (list_empty(&device->ccw_queue)) in __dasd_device_check_expire()
1984 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); in __dasd_device_check_expire()
1987 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { in __dasd_device_check_expire()
1994 if (device->discipline->term_IO(cqr) != 0) { in __dasd_device_check_expire()
1996 dev_err(&device->cdev->dev, in __dasd_device_check_expire()
2001 dasd_device_set_timer(device, 5*HZ); in __dasd_device_check_expire()
2003 dev_err(&device->cdev->dev, in __dasd_device_check_expire()
2008 __dasd_device_check_autoquiesce_timeout(device, cqr); in __dasd_device_check_expire()
2015 static int __dasd_device_is_unusable(struct dasd_device *device, in __dasd_device_is_unusable() argument
2020 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && in __dasd_device_is_unusable()
2021 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { in __dasd_device_is_unusable()
2028 if (device->stopped) { in __dasd_device_is_unusable()
2029 if (device->stopped & mask) { in __dasd_device_is_unusable()
2047 static void __dasd_device_start_head(struct dasd_device *device) in __dasd_device_start_head() argument
2052 if (list_empty(&device->ccw_queue)) in __dasd_device_start_head()
2054 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); in __dasd_device_start_head()
2058 if (__dasd_device_is_unusable(device, cqr)) { in __dasd_device_start_head()
2061 dasd_schedule_device_bh(device); in __dasd_device_start_head()
2065 rc = device->discipline->start_IO(cqr); in __dasd_device_start_head()
2067 dasd_device_set_timer(device, cqr->expires); in __dasd_device_start_head()
2069 dasd_schedule_device_bh(device); in __dasd_device_start_head()
2072 dasd_device_set_timer(device, 50); in __dasd_device_start_head()
2075 static void __dasd_device_check_path_events(struct dasd_device *device) in __dasd_device_check_path_events() argument
2080 tbvpm = dasd_path_get_tbvpm(device); in __dasd_device_check_path_events()
2081 fcsecpm = dasd_path_get_fcsecpm(device); in __dasd_device_check_path_events()
2086 if (device->stopped & ~(DASD_STOPPED_DC_WAIT)) in __dasd_device_check_path_events()
2089 dasd_path_clear_all_verify(device); in __dasd_device_check_path_events()
2090 dasd_path_clear_all_fcsec(device); in __dasd_device_check_path_events()
2092 rc = device->discipline->pe_handler(device, tbvpm, fcsecpm); in __dasd_device_check_path_events()
2094 dasd_path_add_tbvpm(device, tbvpm); in __dasd_device_check_path_events()
2095 dasd_path_add_fcsecpm(device, fcsecpm); in __dasd_device_check_path_events()
2096 dasd_device_set_timer(device, 50); in __dasd_device_check_path_events()
2110 int dasd_flush_device_queue(struct dasd_device *device) in dasd_flush_device_queue() argument
2117 spin_lock_irq(get_ccwdev_lock(device->cdev)); in dasd_flush_device_queue()
2119 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { in dasd_flush_device_queue()
2123 rc = device->discipline->term_IO(cqr); in dasd_flush_device_queue()
2126 dev_err(&device->cdev->dev, in dasd_flush_device_queue()
2143 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_flush_device_queue()
2156 __dasd_device_process_final_queue(device, &flush_queue); in dasd_flush_device_queue()
2166 struct dasd_device *device = (struct dasd_device *) data; in dasd_device_tasklet() local
2169 atomic_set (&device->tasklet_scheduled, 0); in dasd_device_tasklet()
2171 spin_lock_irq(get_ccwdev_lock(device->cdev)); in dasd_device_tasklet()
2173 __dasd_device_check_expire(device); in dasd_device_tasklet()
2175 __dasd_device_process_ccw_queue(device, &final_queue); in dasd_device_tasklet()
2176 __dasd_device_check_path_events(device); in dasd_device_tasklet()
2177 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_device_tasklet()
2179 __dasd_device_process_final_queue(device, &final_queue); in dasd_device_tasklet()
2180 spin_lock_irq(get_ccwdev_lock(device->cdev)); in dasd_device_tasklet()
2182 __dasd_device_start_head(device); in dasd_device_tasklet()
2183 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_device_tasklet()
2186 dasd_put_device(device); in dasd_device_tasklet()
2192 void dasd_schedule_device_bh(struct dasd_device *device) in dasd_schedule_device_bh() argument
2195 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) in dasd_schedule_device_bh()
2197 dasd_get_device(device); in dasd_schedule_device_bh()
2198 tasklet_hi_schedule(&device->tasklet); in dasd_schedule_device_bh()
2202 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) in dasd_device_set_stop_bits() argument
2204 device->stopped |= bits; in dasd_device_set_stop_bits()
2208 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) in dasd_device_remove_stop_bits() argument
2210 device->stopped &= ~bits; in dasd_device_remove_stop_bits()
2211 if (!device->stopped) in dasd_device_remove_stop_bits()
2222 struct dasd_device *device; in dasd_add_request_head() local
2225 device = cqr->startdev; in dasd_add_request_head()
2226 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); in dasd_add_request_head()
2228 list_add(&cqr->devlist, &device->ccw_queue); in dasd_add_request_head()
2230 dasd_schedule_device_bh(device); in dasd_add_request_head()
2231 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); in dasd_add_request_head()
2241 struct dasd_device *device; in dasd_add_request_tail() local
2244 device = cqr->startdev; in dasd_add_request_tail()
2245 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); in dasd_add_request_tail()
2247 list_add_tail(&cqr->devlist, &device->ccw_queue); in dasd_add_request_tail()
2249 dasd_schedule_device_bh(device); in dasd_add_request_tail()
2250 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); in dasd_add_request_tail()
2268 struct dasd_device *device; in _wait_for_wakeup() local
2271 device = cqr->startdev; in _wait_for_wakeup()
2272 spin_lock_irq(get_ccwdev_lock(device->cdev)); in _wait_for_wakeup()
2274 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in _wait_for_wakeup()
2283 struct dasd_device *device; in __dasd_sleep_on_erp() local
2288 device = cqr->startdev; in __dasd_sleep_on_erp()
2291 device->discipline->handle_terminated_request(cqr); in __dasd_sleep_on_erp()
2295 erp_fn = device->discipline->erp_action(cqr); in __dasd_sleep_on_erp()
2302 __dasd_process_erp(device, cqr); in __dasd_sleep_on_erp()
2322 struct dasd_device *device; in _dasd_sleep_on() local
2329 device = maincqr->startdev; in _dasd_sleep_on()
2339 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && in _dasd_sleep_on()
2346 if (device->stopped & ~DASD_STOPPED_PENDING && in _dasd_sleep_on()
2348 !dasd_eer_enabled(device) && device->aq_mask == 0) { in _dasd_sleep_on()
2357 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { in _dasd_sleep_on()
2369 generic_waitq, !(device->stopped)); in _dasd_sleep_on()
2376 wait_event(generic_waitq, !(device->stopped)); in _dasd_sleep_on()
2426 struct dasd_device *device; in _dasd_sleep_on_queue() local
2433 device = cqr->startdev; in _dasd_sleep_on_queue()
2437 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && in _dasd_sleep_on_queue()
2444 if (device->stopped & ~DASD_STOPPED_PENDING && in _dasd_sleep_on_queue()
2446 !dasd_eer_enabled(device)) { in _dasd_sleep_on_queue()
2455 generic_waitq, !device->stopped); in _dasd_sleep_on_queue()
2462 wait_event(generic_waitq, !(device->stopped)); in _dasd_sleep_on_queue()
2554 static inline int _dasd_term_running_cqr(struct dasd_device *device) in _dasd_term_running_cqr() argument
2559 if (list_empty(&device->ccw_queue)) in _dasd_term_running_cqr()
2561 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); in _dasd_term_running_cqr()
2562 rc = device->discipline->term_IO(cqr); in _dasd_term_running_cqr()
2575 struct dasd_device *device; in dasd_sleep_on_immediatly() local
2578 device = cqr->startdev; in dasd_sleep_on_immediatly()
2579 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && in dasd_sleep_on_immediatly()
2585 spin_lock_irq(get_ccwdev_lock(device->cdev)); in dasd_sleep_on_immediatly()
2586 rc = _dasd_term_running_cqr(device); in dasd_sleep_on_immediatly()
2588 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_sleep_on_immediatly()
2598 list_add(&cqr->devlist, device->ccw_queue.next); in dasd_sleep_on_immediatly()
2601 dasd_schedule_device_bh(device); in dasd_sleep_on_immediatly()
2603 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_sleep_on_immediatly()
2615 dasd_schedule_device_bh(device); in dasd_sleep_on_immediatly()
2616 if (device->block) in dasd_sleep_on_immediatly()
2617 dasd_schedule_block_bh(device->block); in dasd_sleep_on_immediatly()
2634 struct dasd_device *device = cqr->startdev; in __dasd_cancel_req() local
2644 rc = device->discipline->term_IO(cqr); in __dasd_cancel_req()
2646 dev_err(&device->cdev->dev, in __dasd_cancel_req()
2656 dasd_schedule_device_bh(device); in __dasd_cancel_req()
2662 struct dasd_device *device = cqr->startdev; in dasd_cancel_req() local
2666 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); in dasd_cancel_req()
2668 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); in dasd_cancel_req()
2719 static void __dasd_process_erp(struct dasd_device *device, in __dasd_process_erp() argument
2725 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); in __dasd_process_erp()
2727 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); in __dasd_process_erp()
2728 erp_fn = device->discipline->erp_postaction(cqr); in __dasd_process_erp()
3162 struct dasd_device *device; in dasd_times_out() local
3172 device = cqr->startdev ? cqr->startdev : block->base; in dasd_times_out()
3173 if (!device->blk_timeout) { in dasd_times_out()
3177 DBF_DEV_EVENT(DBF_WARNING, device, in dasd_times_out()
3182 spin_lock(get_ccwdev_lock(device->cdev)); in dasd_times_out()
3219 spin_unlock(get_ccwdev_lock(device->cdev)); in dasd_times_out()
3386 int dasd_device_is_ro(struct dasd_device *device) in dasd_device_is_ro() argument
3394 ccw_device_get_id(device->cdev, &dev_id); in dasd_device_is_ro()
3439 void dasd_generic_free_discipline(struct dasd_device *device) in dasd_generic_free_discipline() argument
3442 if (device->discipline) { in dasd_generic_free_discipline()
3443 if (device->discipline->uncheck_device) in dasd_generic_free_discipline()
3444 device->discipline->uncheck_device(device); in dasd_generic_free_discipline()
3445 module_put(device->discipline->owner); in dasd_generic_free_discipline()
3446 device->discipline = NULL; in dasd_generic_free_discipline()
3448 if (device->base_discipline) { in dasd_generic_free_discipline()
3449 module_put(device->base_discipline->owner); in dasd_generic_free_discipline()
3450 device->base_discipline = NULL; in dasd_generic_free_discipline()
3461 struct dasd_device *device; in dasd_generic_remove() local
3464 device = dasd_device_from_cdev(cdev); in dasd_generic_remove()
3465 if (IS_ERR(device)) in dasd_generic_remove()
3468 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && in dasd_generic_remove()
3469 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { in dasd_generic_remove()
3471 dasd_put_device(device); in dasd_generic_remove()
3479 dasd_set_target_state(device, DASD_STATE_NEW); in dasd_generic_remove()
3482 block = device->block; in dasd_generic_remove()
3483 dasd_delete_device(device); in dasd_generic_remove()
3502 struct dasd_device *device; in dasd_generic_set_online() local
3503 struct device *dev; in dasd_generic_set_online()
3510 device = dasd_create_device(cdev); in dasd_generic_set_online()
3511 if (IS_ERR(device)) in dasd_generic_set_online()
3512 return PTR_ERR(device); in dasd_generic_set_online()
3515 if (device->features & DASD_FEATURE_USEDIAG) { in dasd_generic_set_online()
3524 dasd_delete_device(device); in dasd_generic_set_online()
3532 dasd_delete_device(device); in dasd_generic_set_online()
3538 dasd_delete_device(device); in dasd_generic_set_online()
3541 device->base_discipline = base_discipline; in dasd_generic_set_online()
3543 dasd_delete_device(device); in dasd_generic_set_online()
3546 device->discipline = discipline; in dasd_generic_set_online()
3549 rc = discipline->check_device(device); in dasd_generic_set_online()
3553 dasd_delete_device(device); in dasd_generic_set_online()
3557 dasd_set_target_state(device, DASD_STATE_ONLINE); in dasd_generic_set_online()
3558 if (device->state <= DASD_STATE_KNOWN) { in dasd_generic_set_online()
3561 dasd_set_target_state(device, DASD_STATE_NEW); in dasd_generic_set_online()
3562 if (device->block) in dasd_generic_set_online()
3563 dasd_free_block(device->block); in dasd_generic_set_online()
3564 dasd_delete_device(device); in dasd_generic_set_online()
3569 wait_event(dasd_init_waitq, _wait_for_device(device)); in dasd_generic_set_online()
3571 dasd_put_device(device); in dasd_generic_set_online()
3579 struct dasd_device *device; in dasd_generic_set_offline() local
3582 struct device *dev; in dasd_generic_set_offline()
3588 device = dasd_device_from_cdev_locked(cdev); in dasd_generic_set_offline()
3589 if (IS_ERR(device)) { in dasd_generic_set_offline()
3591 return PTR_ERR(device); in dasd_generic_set_offline()
3600 if (device->block) { in dasd_generic_set_offline()
3601 max_count = device->block->bdev ? 0 : -1; in dasd_generic_set_offline()
3602 open_count = atomic_read(&device->block->open_count); in dasd_generic_set_offline()
3620 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { in dasd_generic_set_offline()
3621 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { in dasd_generic_set_offline()
3623 &device->flags); in dasd_generic_set_offline()
3629 set_bit(DASD_FLAG_OFFLINE, &device->flags); in dasd_generic_set_offline()
3636 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && in dasd_generic_set_offline()
3637 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { in dasd_generic_set_offline()
3646 if (device->block) in dasd_generic_set_offline()
3647 bdev_mark_dead(device->block->bdev, false); in dasd_generic_set_offline()
3648 dasd_schedule_device_bh(device); in dasd_generic_set_offline()
3650 _wait_for_empty_queues(device)); in dasd_generic_set_offline()
3662 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { in dasd_generic_set_offline()
3666 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); in dasd_generic_set_offline()
3670 dasd_set_target_state(device, DASD_STATE_NEW); in dasd_generic_set_offline()
3672 block = device->block; in dasd_generic_set_offline()
3673 dasd_delete_device(device); in dasd_generic_set_offline()
3686 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); in dasd_generic_set_offline()
3687 clear_bit(DASD_FLAG_OFFLINE, &device->flags); in dasd_generic_set_offline()
3689 dasd_put_device(device); in dasd_generic_set_offline()
3695 int dasd_generic_last_path_gone(struct dasd_device *device) in dasd_generic_last_path_gone() argument
3699 dev_warn(&device->cdev->dev, "No operational channel path is left " in dasd_generic_last_path_gone()
3701 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); in dasd_generic_last_path_gone()
3703 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH); in dasd_generic_last_path_gone()
3705 if (device->state < DASD_STATE_BASIC) in dasd_generic_last_path_gone()
3708 list_for_each_entry(cqr, &device->ccw_queue, devlist) in dasd_generic_last_path_gone()
3714 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); in dasd_generic_last_path_gone()
3715 dasd_device_clear_timer(device); in dasd_generic_last_path_gone()
3716 dasd_schedule_device_bh(device); in dasd_generic_last_path_gone()
3721 int dasd_generic_path_operational(struct dasd_device *device) in dasd_generic_path_operational() argument
3723 dev_info(&device->cdev->dev, "A channel path to the device has become " in dasd_generic_path_operational()
3725 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); in dasd_generic_path_operational()
3726 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); in dasd_generic_path_operational()
3727 dasd_schedule_device_bh(device); in dasd_generic_path_operational()
3728 if (device->block) { in dasd_generic_path_operational()
3729 dasd_schedule_block_bh(device->block); in dasd_generic_path_operational()
3730 if (device->block->gdp) in dasd_generic_path_operational()
3731 blk_mq_run_hw_queues(device->block->gdp->queue, true); in dasd_generic_path_operational()
3734 if (!device->stopped) in dasd_generic_path_operational()
3743 struct dasd_device *device; in dasd_generic_notify() local
3746 device = dasd_device_from_cdev_locked(cdev); in dasd_generic_notify()
3747 if (IS_ERR(device)) in dasd_generic_notify()
3754 dasd_path_no_path(device); in dasd_generic_notify()
3755 ret = dasd_generic_last_path_gone(device); in dasd_generic_notify()
3759 if (dasd_path_get_opm(device)) in dasd_generic_notify()
3760 ret = dasd_generic_path_operational(device); in dasd_generic_notify()
3763 dasd_put_device(device); in dasd_generic_notify()
3770 struct dasd_device *device; in dasd_generic_path_event() local
3773 device = dasd_device_from_cdev_locked(cdev); in dasd_generic_path_event()
3774 if (IS_ERR(device)) in dasd_generic_path_event()
3777 oldopm = dasd_path_get_opm(device); in dasd_generic_path_event()
3780 dasd_path_notoper(device, chp); in dasd_generic_path_event()
3783 dasd_path_available(device, chp); in dasd_generic_path_event()
3784 dasd_schedule_device_bh(device); in dasd_generic_path_event()
3787 if (!dasd_path_is_operational(device, chp) && in dasd_generic_path_event()
3788 !dasd_path_need_verify(device, chp)) { in dasd_generic_path_event()
3794 dasd_path_available(device, chp); in dasd_generic_path_event()
3795 dasd_schedule_device_bh(device); in dasd_generic_path_event()
3797 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_generic_path_event()
3799 if (device->discipline->kick_validate) in dasd_generic_path_event()
3800 device->discipline->kick_validate(device); in dasd_generic_path_event()
3803 dasd_path_fcsec_update(device, chp); in dasd_generic_path_event()
3804 dasd_schedule_device_bh(device); in dasd_generic_path_event()
3807 hpfpm = dasd_path_get_hpfpm(device); in dasd_generic_path_event()
3808 ifccpm = dasd_path_get_ifccpm(device); in dasd_generic_path_event()
3809 if (!dasd_path_get_opm(device) && hpfpm) { in dasd_generic_path_event()
3815 if (device->discipline->disable_hpf) in dasd_generic_path_event()
3816 device->discipline->disable_hpf(device); in dasd_generic_path_event()
3817 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); in dasd_generic_path_event()
3818 dasd_path_set_tbvpm(device, hpfpm); in dasd_generic_path_event()
3819 dasd_schedule_device_bh(device); in dasd_generic_path_event()
3820 dasd_schedule_requeue(device); in dasd_generic_path_event()
3821 } else if (!dasd_path_get_opm(device) && ifccpm) { in dasd_generic_path_event()
3827 dasd_path_set_tbvpm(device, ifccpm); in dasd_generic_path_event()
3828 dasd_schedule_device_bh(device); in dasd_generic_path_event()
3830 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { in dasd_generic_path_event()
3831 dev_warn(&device->cdev->dev, in dasd_generic_path_event()
3833 DBF_DEV_EVENT(DBF_WARNING, device, in dasd_generic_path_event()
3836 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH); in dasd_generic_path_event()
3837 dasd_device_set_stop_bits(device, in dasd_generic_path_event()
3840 dasd_put_device(device); in dasd_generic_path_event()
3844 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) in dasd_generic_verify_path() argument
3846 if (!dasd_path_get_opm(device) && lpm) { in dasd_generic_verify_path()
3847 dasd_path_set_opm(device, lpm); in dasd_generic_verify_path()
3848 dasd_generic_path_operational(device); in dasd_generic_verify_path()
3850 dasd_path_add_opm(device, lpm); in dasd_generic_verify_path()
3855 void dasd_generic_space_exhaust(struct dasd_device *device, in dasd_generic_space_exhaust() argument
3859 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC); in dasd_generic_space_exhaust()
3861 if (device->state < DASD_STATE_BASIC) in dasd_generic_space_exhaust()
3869 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); in dasd_generic_space_exhaust()
3870 dasd_device_clear_timer(device); in dasd_generic_space_exhaust()
3871 dasd_schedule_device_bh(device); in dasd_generic_space_exhaust()
3875 void dasd_generic_space_avail(struct dasd_device *device) in dasd_generic_space_avail() argument
3877 dev_info(&device->cdev->dev, "Extent pool space is available\n"); in dasd_generic_space_avail()
3878 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); in dasd_generic_space_avail()
3880 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); in dasd_generic_space_avail()
3881 dasd_schedule_device_bh(device); in dasd_generic_space_avail()
3883 if (device->block) { in dasd_generic_space_avail()
3884 dasd_schedule_block_bh(device->block); in dasd_generic_space_avail()
3885 if (device->block->gdp) in dasd_generic_space_avail()
3886 blk_mq_run_hw_queues(device->block->gdp->queue, true); in dasd_generic_space_avail()
3888 if (!device->stopped) in dasd_generic_space_avail()
3896 int dasd_generic_requeue_all_requests(struct dasd_device *device) in dasd_generic_requeue_all_requests() argument
3898 struct dasd_block *block = device->block; in dasd_generic_requeue_all_requests()
3928 dasd_schedule_device_bh(device); in dasd_generic_requeue_all_requests()
3935 struct dasd_device *device = container_of(work, struct dasd_device, in do_requeue_requests() local
3937 dasd_generic_requeue_all_requests(device); in do_requeue_requests()
3938 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); in do_requeue_requests()
3939 if (device->block) in do_requeue_requests()
3940 dasd_schedule_block_bh(device->block); in do_requeue_requests()
3941 dasd_put_device(device); in do_requeue_requests()
3944 void dasd_schedule_requeue(struct dasd_device *device) in dasd_schedule_requeue() argument
3946 dasd_get_device(device); in dasd_schedule_requeue()
3948 if (!schedule_work(&device->requeue_requests)) in dasd_schedule_requeue()
3949 dasd_put_device(device); in dasd_schedule_requeue()
3953 static int dasd_handle_autoquiesce(struct dasd_device *device, in dasd_handle_autoquiesce() argument
3958 if (dasd_eer_enabled(device)) in dasd_handle_autoquiesce()
3959 dasd_eer_write(device, cqr, reason); in dasd_handle_autoquiesce()
3961 if (!test_bit(reason, &device->aq_mask)) in dasd_handle_autoquiesce()
3965 if (dasd_eer_enabled(device)) in dasd_handle_autoquiesce()
3966 dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE); in dasd_handle_autoquiesce()
3968 dev_info(&device->cdev->dev, in dasd_handle_autoquiesce()
3970 dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE); in dasd_handle_autoquiesce()
3972 if (device->features & DASD_FEATURE_REQUEUEQUIESCE) in dasd_handle_autoquiesce()
3973 dasd_schedule_requeue(device); in dasd_handle_autoquiesce()
3978 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, in dasd_generic_build_rdc() argument
3985 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, in dasd_generic_build_rdc()
3990 dev_err(&device->cdev->dev, in dasd_generic_build_rdc()
4001 cqr->startdev = device; in dasd_generic_build_rdc()
4002 cqr->memdev = device; in dasd_generic_build_rdc()
4011 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, in dasd_generic_read_dev_chars() argument
4017 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); in dasd_generic_read_dev_chars()
4064 struct dasd_device *device; in dasd_generic_shutdown() local
4066 device = dasd_device_from_cdev(cdev); in dasd_generic_shutdown()
4067 if (IS_ERR(device)) in dasd_generic_shutdown()
4070 if (device->block) in dasd_generic_shutdown()
4071 dasd_schedule_block_bh(device->block); in dasd_generic_shutdown()
4073 dasd_schedule_device_bh(device); in dasd_generic_shutdown()
4075 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); in dasd_generic_shutdown()