dasd.c (de3e0da1270bccb046885fbf1baa9140721de7e0) | dasd.c (8e09f21574ea3028d5629e5de759e0b196c690c5) |
---|---|
1/* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 --- 34 unchanged lines hidden (view full) --- 43MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 44 " Copyright 2000 IBM Corporation"); 45MODULE_SUPPORTED_DEVICE("dasd"); 46MODULE_LICENSE("GPL"); 47 48/* 49 * SECTION: prototypes for static functions of dasd.c 50 */ | 1/* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 --- 34 unchanged lines hidden (view full) --- 43MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 44 " Copyright 2000 IBM Corporation"); 45MODULE_SUPPORTED_DEVICE("dasd"); 46MODULE_LICENSE("GPL"); 47 48/* 49 * SECTION: prototypes for static functions of dasd.c 50 */ |
51static int dasd_alloc_queue(struct dasd_device * device); 52static void dasd_setup_queue(struct dasd_device * device); 53static void dasd_free_queue(struct dasd_device * device); 54static void dasd_flush_request_queue(struct dasd_device *); 55static int dasd_flush_ccw_queue(struct dasd_device *, int); 56static void dasd_tasklet(struct dasd_device *); | 51static int dasd_alloc_queue(struct dasd_block *); 52static void dasd_setup_queue(struct dasd_block *); 53static void dasd_free_queue(struct dasd_block *); 54static void dasd_flush_request_queue(struct dasd_block *); 55static int dasd_flush_block_queue(struct dasd_block *); 56static void dasd_device_tasklet(struct dasd_device *); 57static void dasd_block_tasklet(struct dasd_block *); |
57static void do_kick_device(struct work_struct *); | 58static void do_kick_device(struct work_struct *); |
59static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); |
|
58 59/* 60 * SECTION: Operations on the device structure. 61 */ 62static wait_queue_head_t dasd_init_waitq; 63static wait_queue_head_t dasd_flush_wq; 64 65/* 66 * Allocate memory for a new device structure. 67 */ | 60 61/* 62 * SECTION: Operations on the device structure. 63 */ 64static wait_queue_head_t dasd_init_waitq; 65static wait_queue_head_t dasd_flush_wq; 66 67/* 68 * Allocate memory for a new device structure. 69 */ |
68struct dasd_device * 69dasd_alloc_device(void) | 70struct dasd_device *dasd_alloc_device(void) |
70{ 71 struct dasd_device *device; 72 | 71{ 72 struct dasd_device *device; 73 |
73 device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC); 74 if (device == NULL) | 74 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 75 if (!device) |
75 return ERR_PTR(-ENOMEM); | 76 return ERR_PTR(-ENOMEM); |
76 /* open_count = 0 means device online but not in use */ 77 atomic_set(&device->open_count, -1); | |
78 79 /* Get two pages for normal block device operations. */ 80 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); | 77 78 /* Get two pages for normal block device operations. */ 79 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); |
81 if (device->ccw_mem == NULL) { | 80 if (!device->ccw_mem) { |
82 kfree(device); 83 return ERR_PTR(-ENOMEM); 84 } 85 /* Get one page for error recovery. */ 86 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); | 81 kfree(device); 82 return ERR_PTR(-ENOMEM); 83 } 84 /* Get one page for error recovery. */ 85 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); |
87 if (device->erp_mem == NULL) { | 86 if (!device->erp_mem) { |
88 free_pages((unsigned long) device->ccw_mem, 1); 89 kfree(device); 90 return ERR_PTR(-ENOMEM); 91 } 92 93 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 94 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 95 spin_lock_init(&device->mem_lock); | 87 free_pages((unsigned long) device->ccw_mem, 1); 88 kfree(device); 89 return ERR_PTR(-ENOMEM); 90 } 91 92 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 93 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 94 spin_lock_init(&device->mem_lock); |
96 spin_lock_init(&device->request_queue_lock); 97 atomic_set (&device->tasklet_scheduled, 0); | 95 atomic_set(&device->tasklet_scheduled, 0); |
98 tasklet_init(&device->tasklet, | 96 tasklet_init(&device->tasklet, |
99 (void (*)(unsigned long)) dasd_tasklet, | 97 (void (*)(unsigned long)) dasd_device_tasklet, |
100 (unsigned long) device); 101 INIT_LIST_HEAD(&device->ccw_queue); 102 init_timer(&device->timer); 103 INIT_WORK(&device->kick_work, do_kick_device); 104 device->state = DASD_STATE_NEW; 105 device->target = DASD_STATE_NEW; 106 107 return device; 108} 109 110/* 111 * Free memory of a device structure. 112 */ | 98 (unsigned long) device); 99 INIT_LIST_HEAD(&device->ccw_queue); 100 init_timer(&device->timer); 101 INIT_WORK(&device->kick_work, do_kick_device); 102 device->state = DASD_STATE_NEW; 103 device->target = DASD_STATE_NEW; 104 105 return device; 106} 107 108/* 109 * Free memory of a device structure. 110 */ |
113void 114dasd_free_device(struct dasd_device *device) | 111void dasd_free_device(struct dasd_device *device) |
115{ 116 kfree(device->private); 117 free_page((unsigned long) device->erp_mem); 118 free_pages((unsigned long) device->ccw_mem, 1); 119 kfree(device); 120} 121 122/* | 112{ 113 kfree(device->private); 114 free_page((unsigned long) device->erp_mem); 115 free_pages((unsigned long) device->ccw_mem, 1); 116 kfree(device); 117} 118 119/* |
120 * Allocate memory for a new device structure. 121 */ 122struct dasd_block *dasd_alloc_block(void) 123{ 124 struct dasd_block *block; 125 126 block = kzalloc(sizeof(*block), GFP_ATOMIC); 127 if (!block) 128 return ERR_PTR(-ENOMEM); 129 /* open_count = 0 means device online but not in use */ 130 atomic_set(&block->open_count, -1); 131 132 spin_lock_init(&block->request_queue_lock); 133 atomic_set(&block->tasklet_scheduled, 0); 134 tasklet_init(&block->tasklet, 135 (void (*)(unsigned long)) dasd_block_tasklet, 136 (unsigned long) block); 137 INIT_LIST_HEAD(&block->ccw_queue); 138 spin_lock_init(&block->queue_lock); 139 init_timer(&block->timer); 140 141 return block; 142} 143 144/* 145 * Free memory of a device structure. 146 */ 147void dasd_free_block(struct dasd_block *block) 148{ 149 kfree(block); 150} 151 152/* |
|
123 * Make a new device known to the system. 124 */ | 153 * Make a new device known to the system. 154 */ |
125static int 126dasd_state_new_to_known(struct dasd_device *device) | 155static int dasd_state_new_to_known(struct dasd_device *device) |
127{ 128 int rc; 129 130 /* 131 * As long as the device is not in state DASD_STATE_NEW we want to 132 * keep the reference count > 0. 133 */ 134 dasd_get_device(device); 135 | 156{ 157 int rc; 158 159 /* 160 * As long as the device is not in state DASD_STATE_NEW we want to 161 * keep the reference count > 0. 162 */ 163 dasd_get_device(device); 164 |
136 rc = dasd_alloc_queue(device); 137 if (rc) { 138 dasd_put_device(device); 139 return rc; | 165 if (device->block) { 166 rc = dasd_alloc_queue(device->block); 167 if (rc) { 168 dasd_put_device(device); 169 return rc; 170 } |
140 } | 171 } |
141 | |
142 device->state = DASD_STATE_KNOWN; 143 return 0; 144} 145 146/* 147 * Let the system forget about a device. 148 */ | 172 device->state = DASD_STATE_KNOWN; 173 return 0; 174} 175 176/* 177 * Let the system forget about a device. 178 */ |
149static int 150dasd_state_known_to_new(struct dasd_device * device) | 179static int dasd_state_known_to_new(struct dasd_device *device) |
151{ 152 /* Disable extended error reporting for this device. */ 153 dasd_eer_disable(device); 154 /* Forget the discipline information. */ | 180{ 181 /* Disable extended error reporting for this device. */ 182 dasd_eer_disable(device); 183 /* Forget the discipline information. */ |
155 if (device->discipline) | 184 if (device->discipline) { 185 if (device->discipline->uncheck_device) 186 device->discipline->uncheck_device(device); |
156 module_put(device->discipline->owner); | 187 module_put(device->discipline->owner); |
188 } |
|
157 device->discipline = NULL; 158 if (device->base_discipline) 159 module_put(device->base_discipline->owner); 160 device->base_discipline = NULL; 161 device->state = DASD_STATE_NEW; 162 | 189 device->discipline = NULL; 190 if (device->base_discipline) 191 module_put(device->base_discipline->owner); 192 device->base_discipline = NULL; 193 device->state = DASD_STATE_NEW; 194 |
163 dasd_free_queue(device); | 195 if (device->block) 196 dasd_free_queue(device->block); |
164 165 /* Give up reference we took in dasd_state_new_to_known. */ 166 dasd_put_device(device); 167 return 0; 168} 169 170/* 171 * Request the irq line for the device. 172 */ | 197 198 /* Give up reference we took in dasd_state_new_to_known. */ 199 dasd_put_device(device); 200 return 0; 201} 202 203/* 204 * Request the irq line for the device. 205 */ |
173static int 174dasd_state_known_to_basic(struct dasd_device * device) | 206static int dasd_state_known_to_basic(struct dasd_device *device) |
175{ 176 int rc; 177 178 /* Allocate and register gendisk structure. */ | 207{ 208 int rc; 209 210 /* Allocate and register gendisk structure. */ |
179 rc = dasd_gendisk_alloc(device); 180 if (rc) 181 return rc; 182 | 211 if (device->block) { 212 rc = dasd_gendisk_alloc(device->block); 213 if (rc) 214 return rc; 215 } |
183 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, | 216 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 217 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, |
185 8 * sizeof (long)); | 218 8 * sizeof(long)); |
186 debug_register_view(device->debug_area, &debug_sprintf_view); 187 debug_set_level(device->debug_area, DBF_WARNING); 188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 189 190 device->state = DASD_STATE_BASIC; 191 return 0; 192} 193 194/* 195 * Release the irq line for the device. Terminate any running i/o. 196 */ | 219 debug_register_view(device->debug_area, &debug_sprintf_view); 220 debug_set_level(device->debug_area, DBF_WARNING); 221 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 222 223 device->state = DASD_STATE_BASIC; 224 return 0; 225} 226 227/* 228 * Release the irq line for the device. Terminate any running i/o. 229 */ |
197static int 198dasd_state_basic_to_known(struct dasd_device * device) | 230static int dasd_state_basic_to_known(struct dasd_device *device) |
199{ 200 int rc; | 231{ 232 int rc; |
201 202 dasd_gendisk_free(device); 203 rc = dasd_flush_ccw_queue(device, 1); | 233 if (device->block) { 234 dasd_gendisk_free(device->block); 235 dasd_block_clear_timer(device->block); 236 } 237 rc = dasd_flush_device_queue(device); |
204 if (rc) 205 return rc; | 238 if (rc) 239 return rc; |
206 dasd_clear_timer(device); | 240 dasd_device_clear_timer(device); |
207 208 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 209 if (device->debug_area != NULL) { 210 debug_unregister(device->debug_area); 211 device->debug_area = NULL; 212 } 213 device->state = DASD_STATE_KNOWN; 214 return 0; --- 8 unchanged lines hidden (view full) --- 223 * interrupt for this detection ccw uses the kernel event daemon to 224 * trigger the call to dasd_change_state. All this is done in the 225 * discipline code, see dasd_eckd.c. 226 * After the analysis ccw is done (do_analysis returned 0) the block 227 * device is setup. 228 * In case the analysis returns an error, the device setup is stopped 229 * (a fake disk was already added to allow formatting). 230 */ | 241 242 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 243 if (device->debug_area != NULL) { 244 debug_unregister(device->debug_area); 245 device->debug_area = NULL; 246 } 247 device->state = DASD_STATE_KNOWN; 248 return 0; --- 8 unchanged lines hidden (view full) --- 257 * interrupt for this detection ccw uses the kernel event daemon to 258 * trigger the call to dasd_change_state. All this is done in the 259 * discipline code, see dasd_eckd.c. 260 * After the analysis ccw is done (do_analysis returned 0) the block 261 * device is setup. 262 * In case the analysis returns an error, the device setup is stopped 263 * (a fake disk was already added to allow formatting). 264 */ |
231static int 232dasd_state_basic_to_ready(struct dasd_device * device) | 265static int dasd_state_basic_to_ready(struct dasd_device *device) |
233{ 234 int rc; | 266{ 267 int rc; |
268 struct dasd_block *block; |
|
235 236 rc = 0; | 269 270 rc = 0; |
237 if (device->discipline->do_analysis != NULL) 238 rc = device->discipline->do_analysis(device); 239 if (rc) { 240 if (rc != -EAGAIN) 241 device->state = DASD_STATE_UNFMT; 242 return rc; 243 } | 271 block = device->block; |
244 /* make disk known with correct capacity */ | 272 /* make disk known with correct capacity */ |
245 dasd_setup_queue(device); 246 set_capacity(device->gdp, device->blocks << device->s2b_shift); 247 device->state = DASD_STATE_READY; 248 rc = dasd_scan_partitions(device); 249 if (rc) 250 device->state = DASD_STATE_BASIC; | 273 if (block) { 274 if (block->base->discipline->do_analysis != NULL) 275 rc = block->base->discipline->do_analysis(block); 276 if (rc) { 277 if (rc != -EAGAIN) 278 device->state = DASD_STATE_UNFMT; 279 return rc; 280 } 281 dasd_setup_queue(block); 282 set_capacity(block->gdp, 283 block->blocks << block->s2b_shift); 284 device->state = DASD_STATE_READY; 285 rc = dasd_scan_partitions(block); 286 if (rc) 287 device->state = DASD_STATE_BASIC; 288 } else { 289 device->state = DASD_STATE_READY; 290 } |
251 return rc; 252} 253 254/* 255 * Remove device from block device layer. Destroy dirty buffers. 256 * Forget format information. Check if the target level is basic 257 * and if it is create fake disk for formatting. 258 */ | 291 return rc; 292} 293 294/* 295 * Remove device from block device layer. Destroy dirty buffers. 296 * Forget format information. Check if the target level is basic 297 * and if it is create fake disk for formatting. 298 */ |
259static int 260dasd_state_ready_to_basic(struct dasd_device * device) | 299static int dasd_state_ready_to_basic(struct dasd_device *device) |
261{ 262 int rc; 263 | 300{ 301 int rc; 302 |
264 rc = dasd_flush_ccw_queue(device, 0); 265 if (rc) 266 return rc; 267 dasd_destroy_partitions(device); 268 dasd_flush_request_queue(device); 269 device->blocks = 0; 270 device->bp_block = 0; 271 device->s2b_shift = 0; | |
272 device->state = DASD_STATE_BASIC; | 303 device->state = DASD_STATE_BASIC; |
304 if (device->block) { 305 struct dasd_block *block = device->block; 306 rc = dasd_flush_block_queue(block); 307 if (rc) { 308 device->state = DASD_STATE_READY; 309 return rc; 310 } 311 dasd_destroy_partitions(block); 312 dasd_flush_request_queue(block); 313 block->blocks = 0; 314 block->bp_block = 0; 315 block->s2b_shift = 0; 316 } |
|
273 return 0; 274} 275 276/* 277 * Back to basic. 278 */ | 317 return 0; 318} 319 320/* 321 * Back to basic. 322 */ |
279static int 280dasd_state_unfmt_to_basic(struct dasd_device * device) | 323static int dasd_state_unfmt_to_basic(struct dasd_device *device) |
281{ 282 device->state = DASD_STATE_BASIC; 283 return 0; 284} 285 286/* 287 * Make the device online and schedule the bottom half to start 288 * the requeueing of requests from the linux request queue to the 289 * ccw queue. 290 */ 291static int 292dasd_state_ready_to_online(struct dasd_device * device) 293{ | 324{ 325 device->state = DASD_STATE_BASIC; 326 return 0; 327} 328 329/* 330 * Make the device online and schedule the bottom half to start 331 * the requeueing of requests from the linux request queue to the 332 * ccw queue. 333 */ 334static int 335dasd_state_ready_to_online(struct dasd_device * device) 336{ |
337 int rc; 338 339 if (device->discipline->ready_to_online) { 340 rc = device->discipline->ready_to_online(device); 341 if (rc) 342 return rc; 343 } |
|
294 device->state = DASD_STATE_ONLINE; | 344 device->state = DASD_STATE_ONLINE; |
295 dasd_schedule_bh(device); | 345 if (device->block) 346 dasd_schedule_block_bh(device->block); |
296 return 0; 297} 298 299/* 300 * Stop the requeueing of requests again. 301 */ | 347 return 0; 348} 349 350/* 351 * Stop the requeueing of requests again. 352 */ |
302static int 303dasd_state_online_to_ready(struct dasd_device * device) | 353static int dasd_state_online_to_ready(struct dasd_device *device) |
304{ | 354{ |
355 int rc; 356 357 if (device->discipline->online_to_ready) { 358 rc = device->discipline->online_to_ready(device); 359 if (rc) 360 return rc; 361 } |
|
305 device->state = DASD_STATE_READY; 306 return 0; 307} 308 309/* 310 * Device startup state changes. 311 */ | 362 device->state = DASD_STATE_READY; 363 return 0; 364} 365 366/* 367 * Device startup state changes. 368 */ |
312static int 313dasd_increase_state(struct dasd_device *device) | 369static int dasd_increase_state(struct dasd_device *device) |
314{ 315 int rc; 316 317 rc = 0; 318 if (device->state == DASD_STATE_NEW && 319 device->target >= DASD_STATE_KNOWN) 320 rc = dasd_state_new_to_known(device); 321 --- 18 unchanged lines hidden (view full) --- 340 rc = dasd_state_ready_to_online(device); 341 342 return rc; 343} 344 345/* 346 * Device shutdown state changes. 347 */ | 370{ 371 int rc; 372 373 rc = 0; 374 if (device->state == DASD_STATE_NEW && 375 device->target >= DASD_STATE_KNOWN) 376 rc = dasd_state_new_to_known(device); 377 --- 18 unchanged lines hidden (view full) --- 396 rc = dasd_state_ready_to_online(device); 397 398 return rc; 399} 400 401/* 402 * Device shutdown state changes. 403 */ |
348static int 349dasd_decrease_state(struct dasd_device *device) | 404static int dasd_decrease_state(struct dasd_device *device) |
350{ 351 int rc; 352 353 rc = 0; 354 if (device->state == DASD_STATE_ONLINE && 355 device->target <= DASD_STATE_READY) 356 rc = dasd_state_online_to_ready(device); 357 --- 18 unchanged lines hidden (view full) --- 376 rc = dasd_state_known_to_new(device); 377 378 return rc; 379} 380 381/* 382 * This is the main startup/shutdown routine. 383 */ | 405{ 406 int rc; 407 408 rc = 0; 409 if (device->state == DASD_STATE_ONLINE && 410 device->target <= DASD_STATE_READY) 411 rc = dasd_state_online_to_ready(device); 412 --- 18 unchanged lines hidden (view full) --- 431 rc = dasd_state_known_to_new(device); 432 433 return rc; 434} 435 436/* 437 * This is the main startup/shutdown routine. 438 */ |
384static void 385dasd_change_state(struct dasd_device *device) | 439static void dasd_change_state(struct dasd_device *device) |
386{ 387 int rc; 388 389 if (device->state == device->target) 390 /* Already where we want to go today... */ 391 return; 392 if (device->state < device->target) 393 rc = dasd_increase_state(device); --- 10 unchanged lines hidden (view full) --- 404} 405 406/* 407 * Kick starter for devices that did not complete the startup/shutdown 408 * procedure or were sleeping because of a pending state. 409 * dasd_kick_device will schedule a call do do_kick_device to the kernel 410 * event daemon. 411 */ | 440{ 441 int rc; 442 443 if (device->state == device->target) 444 /* Already where we want to go today... */ 445 return; 446 if (device->state < device->target) 447 rc = dasd_increase_state(device); --- 10 unchanged lines hidden (view full) --- 458} 459 460/* 461 * Kick starter for devices that did not complete the startup/shutdown 462 * procedure or were sleeping because of a pending state. 463 * dasd_kick_device will schedule a call do do_kick_device to the kernel 464 * event daemon. 465 */ |
412static void 413do_kick_device(struct work_struct *work) | 466static void do_kick_device(struct work_struct *work) |
414{ 415 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 416 dasd_change_state(device); | 467{ 468 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 469 dasd_change_state(device); |
417 dasd_schedule_bh(device); | 470 dasd_schedule_device_bh(device); |
418 dasd_put_device(device); 419} 420 | 471 dasd_put_device(device); 472} 473 |
421void 422dasd_kick_device(struct dasd_device *device) | 474void dasd_kick_device(struct dasd_device *device) |
423{ 424 dasd_get_device(device); 425 /* queue call to dasd_kick_device to the kernel event daemon. */ 426 schedule_work(&device->kick_work); 427} 428 429/* 430 * Set the target state for a device and starts the state change. 431 */ | 475{ 476 dasd_get_device(device); 477 /* queue call to dasd_kick_device to the kernel event daemon. */ 478 schedule_work(&device->kick_work); 479} 480 481/* 482 * Set the target state for a device and starts the state change. 483 */ |
432void 433dasd_set_target_state(struct dasd_device *device, int target) | 484void dasd_set_target_state(struct dasd_device *device, int target) |
434{ 435 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 436 if (dasd_probeonly && target > DASD_STATE_READY) 437 target = DASD_STATE_READY; 438 if (device->target != target) { 439 if (device->state == target) 440 wake_up(&dasd_init_waitq); 441 device->target = target; 442 } 443 if (device->state != device->target) 444 dasd_change_state(device); 445} 446 447/* 448 * Enable devices with device numbers in [from..to]. 449 */ | 485{ 486 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 487 if (dasd_probeonly && target > DASD_STATE_READY) 488 target = DASD_STATE_READY; 489 if (device->target != target) { 490 if (device->state == target) 491 wake_up(&dasd_init_waitq); 492 device->target = target; 493 } 494 if (device->state != device->target) 495 dasd_change_state(device); 496} 497 498/* 499 * Enable devices with device numbers in [from..to]. 500 */ |
450static inline int 451_wait_for_device(struct dasd_device *device) | 501static inline int _wait_for_device(struct dasd_device *device) |
452{ 453 return (device->state == device->target); 454} 455 | 502{ 503 return (device->state == device->target); 504} 505 |
456void 457dasd_enable_device(struct dasd_device *device) | 506void dasd_enable_device(struct dasd_device *device) |
458{ 459 dasd_set_target_state(device, DASD_STATE_ONLINE); 460 if (device->state <= DASD_STATE_KNOWN) 461 /* No discipline for device found. */ 462 dasd_set_target_state(device, DASD_STATE_NEW); 463 /* Now wait for the devices to come up. */ 464 wait_event(dasd_init_waitq, _wait_for_device(device)); 465} --- 4 unchanged lines hidden (view full) --- 470#ifdef CONFIG_DASD_PROFILE 471 472struct dasd_profile_info_t dasd_global_profile; 473unsigned int dasd_profile_level = DASD_PROFILE_OFF; 474 475/* 476 * Increments counter in global and local profiling structures. 477 */ | 507{ 508 dasd_set_target_state(device, DASD_STATE_ONLINE); 509 if (device->state <= DASD_STATE_KNOWN) 510 /* No discipline for device found. */ 511 dasd_set_target_state(device, DASD_STATE_NEW); 512 /* Now wait for the devices to come up. */ 513 wait_event(dasd_init_waitq, _wait_for_device(device)); 514} --- 4 unchanged lines hidden (view full) --- 519#ifdef CONFIG_DASD_PROFILE 520 521struct dasd_profile_info_t dasd_global_profile; 522unsigned int dasd_profile_level = DASD_PROFILE_OFF; 523 524/* 525 * Increments counter in global and local profiling structures. 526 */ |
478#define dasd_profile_counter(value, counter, device) \ | 527#define dasd_profile_counter(value, counter, block) \ |
479{ \ 480 int index; \ 481 for (index = 0; index < 31 && value >> (2+index); index++); \ 482 dasd_global_profile.counter[index]++; \ | 528{ \ 529 int index; \ 530 for (index = 0; index < 31 && value >> (2+index); index++); \ 531 dasd_global_profile.counter[index]++; \ |
483 device->profile.counter[index]++; \ | 532 block->profile.counter[index]++; \ |
484} 485 486/* 487 * Add profiling information for cqr before execution. 488 */ | 533} 534 535/* 536 * Add profiling information for cqr before execution. 537 */ |
489static void 490dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 491 struct request *req) | 538static void dasd_profile_start(struct dasd_block *block, 539 struct dasd_ccw_req *cqr, 540 struct request *req) |
492{ 493 struct list_head *l; 494 unsigned int counter; 495 496 if (dasd_profile_level != DASD_PROFILE_ON) 497 return; 498 499 /* count the length of the chanq for statistics */ 500 counter = 0; | 541{ 542 struct list_head *l; 543 unsigned int counter; 544 545 if (dasd_profile_level != DASD_PROFILE_ON) 546 return; 547 548 /* count the length of the chanq for statistics */ 549 counter = 0; |
501 list_for_each(l, &device->ccw_queue) | 550 list_for_each(l, &block->ccw_queue) |
502 if (++counter >= 31) 503 break; 504 dasd_global_profile.dasd_io_nr_req[counter]++; | 551 if (++counter >= 31) 552 break; 553 dasd_global_profile.dasd_io_nr_req[counter]++; |
505 device->profile.dasd_io_nr_req[counter]++; | 554 block->profile.dasd_io_nr_req[counter]++; |
506} 507 508/* 509 * Add profiling information for cqr after execution. 510 */ | 555} 556 557/* 558 * Add profiling information for cqr after execution. 559 */ |
511static void 512dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 513 struct request *req) | 560static void dasd_profile_end(struct dasd_block *block, 561 struct dasd_ccw_req *cqr, 562 struct request *req) |
514{ 515 long strtime, irqtime, endtime, tottime; /* in microseconds */ 516 long tottimeps, sectors; 517 518 if (dasd_profile_level != DASD_PROFILE_ON) 519 return; 520 521 sectors = req->nr_sectors; --- 5 unchanged lines hidden (view full) --- 527 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 528 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 529 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 530 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 531 tottimeps = tottime / sectors; 532 533 if (!dasd_global_profile.dasd_io_reqs) 534 memset(&dasd_global_profile, 0, | 563{ 564 long strtime, irqtime, endtime, tottime; /* in microseconds */ 565 long tottimeps, sectors; 566 567 if (dasd_profile_level != DASD_PROFILE_ON) 568 return; 569 570 sectors = req->nr_sectors; --- 5 unchanged lines hidden (view full) --- 576 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 577 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 578 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 579 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 580 tottimeps = tottime / sectors; 581 582 if (!dasd_global_profile.dasd_io_reqs) 583 memset(&dasd_global_profile, 0, |
535 sizeof (struct dasd_profile_info_t)); | 584 sizeof(struct dasd_profile_info_t)); |
536 dasd_global_profile.dasd_io_reqs++; 537 dasd_global_profile.dasd_io_sects += sectors; 538 | 585 dasd_global_profile.dasd_io_reqs++; 586 dasd_global_profile.dasd_io_sects += sectors; 587 |
539 if (!device->profile.dasd_io_reqs) 540 memset(&device->profile, 0, 541 sizeof (struct dasd_profile_info_t)); 542 device->profile.dasd_io_reqs++; 543 device->profile.dasd_io_sects += sectors; | 588 if (!block->profile.dasd_io_reqs) 589 memset(&block->profile, 0, 590 sizeof(struct dasd_profile_info_t)); 591 block->profile.dasd_io_reqs++; 592 block->profile.dasd_io_sects += sectors; |
544 | 593 |
545 dasd_profile_counter(sectors, dasd_io_secs, device); 546 dasd_profile_counter(tottime, dasd_io_times, device); 547 dasd_profile_counter(tottimeps, dasd_io_timps, device); 548 dasd_profile_counter(strtime, dasd_io_time1, device); 549 dasd_profile_counter(irqtime, dasd_io_time2, device); 550 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device); 551 dasd_profile_counter(endtime, dasd_io_time3, device); | 594 dasd_profile_counter(sectors, dasd_io_secs, block); 595 dasd_profile_counter(tottime, dasd_io_times, block); 596 dasd_profile_counter(tottimeps, dasd_io_timps, block); 597 dasd_profile_counter(strtime, dasd_io_time1, block); 598 dasd_profile_counter(irqtime, dasd_io_time2, block); 599 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); 600 dasd_profile_counter(endtime, dasd_io_time3, block); |
552} 553#else | 601} 602#else |
554#define dasd_profile_start(device, cqr, req) do {} while (0) 555#define dasd_profile_end(device, cqr, req) do {} while (0) | 603#define dasd_profile_start(block, cqr, req) do {} while (0) 604#define dasd_profile_end(block, cqr, req) do {} while (0) |
556#endif /* CONFIG_DASD_PROFILE */ 557 558/* 559 * Allocate memory for a channel program with 'cplength' channel 560 * command words and 'datasize' additional space. There are two 561 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 562 * memory and 2) dasd_smalloc_request uses the static ccw memory 563 * that gets allocated for each device. 564 */ | 605#endif /* CONFIG_DASD_PROFILE */ 606 607/* 608 * Allocate memory for a channel program with 'cplength' channel 609 * command words and 'datasize' additional space. There are two 610 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 611 * memory and 2) dasd_smalloc_request uses the static ccw memory 612 * that gets allocated for each device. 613 */ |
565struct dasd_ccw_req * 566dasd_kmalloc_request(char *magic, int cplength, int datasize, 567 struct dasd_device * device) | 614struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, 615 int datasize, 616 struct dasd_device *device) |
568{ 569 struct dasd_ccw_req *cqr; 570 571 /* Sanity checks */ 572 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 573 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 574 575 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); --- 19 unchanged lines hidden (view full) --- 595 } 596 strncpy((char *) &cqr->magic, magic, 4); 597 ASCEBC((char *) &cqr->magic, 4); 598 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 599 dasd_get_device(device); 600 return cqr; 601} 602 | 617{ 618 struct dasd_ccw_req *cqr; 619 620 /* Sanity checks */ 621 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 622 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 623 624 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); --- 19 unchanged lines hidden (view full) --- 644 } 645 strncpy((char *) &cqr->magic, magic, 4); 646 ASCEBC((char *) &cqr->magic, 4); 647 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 648 dasd_get_device(device); 649 return cqr; 650} 651 |
603struct dasd_ccw_req * 604dasd_smalloc_request(char *magic, int cplength, int datasize, 605 struct dasd_device * device) | 652struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, 653 int datasize, 654 struct dasd_device *device) |
606{ 607 unsigned long flags; 608 struct dasd_ccw_req *cqr; 609 char *data; 610 int size; 611 612 /* Sanity checks */ 613 BUG_ON( magic == NULL || datasize > PAGE_SIZE || --- 30 unchanged lines hidden (view full) --- 644 return cqr; 645} 646 647/* 648 * Free memory of a channel program. This function needs to free all the 649 * idal lists that might have been created by dasd_set_cda and the 650 * struct dasd_ccw_req itself. 651 */ | 655{ 656 unsigned long flags; 657 struct dasd_ccw_req *cqr; 658 char *data; 659 int size; 660 661 /* Sanity checks */ 662 BUG_ON( magic == NULL || datasize > PAGE_SIZE || --- 30 unchanged lines hidden (view full) --- 693 return cqr; 694} 695 696/* 697 * Free memory of a channel program. This function needs to free all the 698 * idal lists that might have been created by dasd_set_cda and the 699 * struct dasd_ccw_req itself. 700 */ |
652void 653dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) | 701void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) |
654{ 655#ifdef CONFIG_64BIT 656 struct ccw1 *ccw; 657 658 /* Clear any idals used for the request. */ 659 ccw = cqr->cpaddr; 660 do { 661 clear_normalized_cda(ccw); 662 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 663#endif 664 kfree(cqr->cpaddr); 665 kfree(cqr->data); 666 kfree(cqr); 667 dasd_put_device(device); 668} 669 | 702{ 703#ifdef CONFIG_64BIT 704 struct ccw1 *ccw; 705 706 /* Clear any idals used for the request. */ 707 ccw = cqr->cpaddr; 708 do { 709 clear_normalized_cda(ccw); 710 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 711#endif 712 kfree(cqr->cpaddr); 713 kfree(cqr->data); 714 kfree(cqr); 715 dasd_put_device(device); 716} 717 |
670void 671dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) | 718void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) |
672{ 673 unsigned long flags; 674 675 spin_lock_irqsave(&device->mem_lock, flags); 676 dasd_free_chunk(&device->ccw_chunks, cqr); 677 spin_unlock_irqrestore(&device->mem_lock, flags); 678 dasd_put_device(device); 679} 680 681/* 682 * Check discipline magic in cqr. 683 */ | 719{ 720 unsigned long flags; 721 722 spin_lock_irqsave(&device->mem_lock, flags); 723 dasd_free_chunk(&device->ccw_chunks, cqr); 724 spin_unlock_irqrestore(&device->mem_lock, flags); 725 dasd_put_device(device); 726} 727 728/* 729 * Check discipline magic in cqr. 730 */ |
684static inline int 685dasd_check_cqr(struct dasd_ccw_req *cqr) | 731static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) |
686{ 687 struct dasd_device *device; 688 689 if (cqr == NULL) 690 return -EINVAL; | 732{ 733 struct dasd_device *device; 734 735 if (cqr == NULL) 736 return -EINVAL; |
691 device = cqr->device; | 737 device = cqr->startdev; |
692 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 693 DEV_MESSAGE(KERN_WARNING, device, 694 " dasd_ccw_req 0x%08x magic doesn't match" 695 " discipline 0x%08x", 696 cqr->magic, 697 *(unsigned int *) device->discipline->name); 698 return -EINVAL; 699 } 700 return 0; 701} 702 703/* 704 * Terminate the current i/o and set the request to clear_pending. 705 * Timer keeps device runnig. 706 * ccw_device_clear can fail if the i/o subsystem 707 * is in a bad mood. 708 */ | 738 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 739 DEV_MESSAGE(KERN_WARNING, device, 740 " dasd_ccw_req 0x%08x magic doesn't match" 741 " discipline 0x%08x", 742 cqr->magic, 743 *(unsigned int *) device->discipline->name); 744 return -EINVAL; 745 } 746 return 0; 747} 748 749/* 750 * Terminate the current i/o and set the request to clear_pending. 751 * Timer keeps device runnig. 752 * ccw_device_clear can fail if the i/o subsystem 753 * is in a bad mood. 754 */ |
709int 710dasd_term_IO(struct dasd_ccw_req * cqr) | 755int dasd_term_IO(struct dasd_ccw_req *cqr) |
711{ 712 struct dasd_device *device; 713 int retries, rc; 714 715 /* Check the cqr */ 716 rc = dasd_check_cqr(cqr); 717 if (rc) 718 return rc; 719 retries = 0; | 756{ 757 struct dasd_device *device; 758 int retries, rc; 759 760 /* Check the cqr */ 761 rc = dasd_check_cqr(cqr); 762 if (rc) 763 return rc; 764 retries = 0; |
720 device = (struct dasd_device *) cqr->device; | 765 device = (struct dasd_device *) cqr->startdev; |
721 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 722 rc = ccw_device_clear(device->cdev, (long) cqr); 723 switch (rc) { 724 case 0: /* termination successful */ 725 cqr->retries--; | 766 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 767 rc = ccw_device_clear(device->cdev, (long) cqr); 768 switch (rc) { 769 case 0: /* termination successful */ 770 cqr->retries--; |
726 cqr->status = DASD_CQR_CLEAR; | 771 cqr->status = DASD_CQR_CLEAR_PENDING; |
727 cqr->stopclk = get_clock(); 728 cqr->starttime = 0; 729 DBF_DEV_EVENT(DBF_DEBUG, device, 730 "terminate cqr %p successful", 731 cqr); 732 break; 733 case -ENODEV: 734 DBF_DEV_EVENT(DBF_ERR, device, "%s", --- 13 unchanged lines hidden (view full) --- 748 "line %d unknown RC=%d, please " 749 "report to linux390@de.ibm.com", 750 __LINE__, rc); 751 BUG(); 752 break; 753 } 754 retries++; 755 } | 772 cqr->stopclk = get_clock(); 773 cqr->starttime = 0; 774 DBF_DEV_EVENT(DBF_DEBUG, device, 775 "terminate cqr %p successful", 776 cqr); 777 break; 778 case -ENODEV: 779 DBF_DEV_EVENT(DBF_ERR, device, "%s", --- 13 unchanged lines hidden (view full) --- 793 "line %d unknown RC=%d, please " 794 "report to linux390@de.ibm.com", 795 __LINE__, rc); 796 BUG(); 797 break; 798 } 799 retries++; 800 } |
756 dasd_schedule_bh(device); | 801 dasd_schedule_device_bh(device); |
757 return rc; 758} 759 760/* 761 * Start the i/o. This start_IO can fail if the channel is really busy. 762 * In that case set up a timer to start the request later. 763 */ | 802 return rc; 803} 804 805/* 806 * Start the i/o. This start_IO can fail if the channel is really busy. 807 * In that case set up a timer to start the request later. 808 */ |
764int 765dasd_start_IO(struct dasd_ccw_req * cqr) | 809int dasd_start_IO(struct dasd_ccw_req *cqr) |
766{ 767 struct dasd_device *device; 768 int rc; 769 770 /* Check the cqr */ 771 rc = dasd_check_cqr(cqr); 772 if (rc) 773 return rc; | 810{ 811 struct dasd_device *device; 812 int rc; 813 814 /* Check the cqr */ 815 rc = dasd_check_cqr(cqr); 816 if (rc) 817 return rc; |
774 device = (struct dasd_device *) cqr->device; | 818 device = (struct dasd_device *) cqr->startdev; |
775 if (cqr->retries < 0) { 776 DEV_MESSAGE(KERN_DEBUG, device, 777 "start_IO: request %p (%02x/%i) - no retry left.", 778 cqr, cqr->status, cqr->retries); | 819 if (cqr->retries < 0) { 820 DEV_MESSAGE(KERN_DEBUG, device, 821 "start_IO: request %p (%02x/%i) - no retry left.", 822 cqr, cqr->status, cqr->retries); |
779 cqr->status = DASD_CQR_FAILED; | 823 cqr->status = DASD_CQR_ERROR; |
780 return -EIO; 781 } 782 cqr->startclk = get_clock(); 783 cqr->starttime = jiffies; 784 cqr->retries--; 785 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr, 786 cqr->lpm, 0); 787 switch (rc) { --- 40 unchanged lines hidden (view full) --- 828/* 829 * Timeout function for dasd devices. This is used for different purposes 830 * 1) missing interrupt handler for normal operation 831 * 2) delayed start of request where start_IO failed with -EBUSY 832 * 3) timeout for missing state change interrupts 833 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 834 * DASD_CQR_QUEUED for 2) and 3). 835 */ | 824 return -EIO; 825 } 826 cqr->startclk = get_clock(); 827 cqr->starttime = jiffies; 828 cqr->retries--; 829 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr, 830 cqr->lpm, 0); 831 switch (rc) { --- 40 unchanged lines hidden (view full) --- 872/* 873 * Timeout function for dasd devices. This is used for different purposes 874 * 1) missing interrupt handler for normal operation 875 * 2) delayed start of request where start_IO failed with -EBUSY 876 * 3) timeout for missing state change interrupts 877 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 878 * DASD_CQR_QUEUED for 2) and 3). 879 */ |
836static void 837dasd_timeout_device(unsigned long ptr) | 880static void dasd_device_timeout(unsigned long ptr) |
838{ 839 unsigned long flags; 840 struct dasd_device *device; 841 842 device = (struct dasd_device *) ptr; 843 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 844 /* re-activate request queue */ 845 device->stopped &= ~DASD_STOPPED_PENDING; 846 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 881{ 882 unsigned long flags; 883 struct dasd_device *device; 884 885 device = (struct dasd_device *) ptr; 886 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 887 /* re-activate request queue */ 888 device->stopped &= ~DASD_STOPPED_PENDING; 889 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
847 dasd_schedule_bh(device); | 890 dasd_schedule_device_bh(device); |
848} 849 850/* 851 * Setup timeout for a device in jiffies. 852 */ | 891} 892 893/* 894 * Setup timeout for a device in jiffies. 895 */ |
853void 854dasd_set_timer(struct dasd_device *device, int expires) | 896void dasd_device_set_timer(struct dasd_device *device, int expires) |
855{ 856 if (expires == 0) { 857 if (timer_pending(&device->timer)) 858 del_timer(&device->timer); 859 return; 860 } 861 if (timer_pending(&device->timer)) { 862 if (mod_timer(&device->timer, jiffies + expires)) 863 return; 864 } | 897{ 898 if (expires == 0) { 899 if (timer_pending(&device->timer)) 900 del_timer(&device->timer); 901 return; 902 } 903 if (timer_pending(&device->timer)) { 904 if (mod_timer(&device->timer, jiffies + expires)) 905 return; 906 } |
865 device->timer.function = dasd_timeout_device; | 907 device->timer.function = dasd_device_timeout; |
866 device->timer.data = (unsigned long) device; 867 device->timer.expires = jiffies + expires; 868 add_timer(&device->timer); 869} 870 871/* 872 * Clear timeout for a device. 873 */ | 908 device->timer.data = (unsigned long) device; 909 device->timer.expires = jiffies + expires; 910 add_timer(&device->timer); 911} 912 913/* 914 * Clear timeout for a device. 915 */ |
874void 875dasd_clear_timer(struct dasd_device *device) | 916void dasd_device_clear_timer(struct dasd_device *device) |
876{ 877 if (timer_pending(&device->timer)) 878 del_timer(&device->timer); 879} 880 | 917{ 918 if (timer_pending(&device->timer)) 919 del_timer(&device->timer); 920} 921 |
881static void 882dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) | 922static void dasd_handle_killed_request(struct ccw_device *cdev, 923 unsigned long intparm) |
883{ 884 struct dasd_ccw_req *cqr; 885 struct dasd_device *device; 886 887 cqr = (struct dasd_ccw_req *) intparm; 888 if (cqr->status != DASD_CQR_IN_IO) { 889 MESSAGE(KERN_DEBUG, 890 "invalid status in handle_killed_request: " 891 "bus_id %s, status %02x", 892 cdev->dev.bus_id, cqr->status); 893 return; 894 } 895 | 924{ 925 struct dasd_ccw_req *cqr; 926 struct dasd_device *device; 927 928 cqr = (struct dasd_ccw_req *) intparm; 929 if (cqr->status != DASD_CQR_IN_IO) { 930 MESSAGE(KERN_DEBUG, 931 "invalid status in handle_killed_request: " 932 "bus_id %s, status %02x", 933 cdev->dev.bus_id, cqr->status); 934 return; 935 } 936 |
896 device = (struct dasd_device *) cqr->device; | 937 device = (struct dasd_device *) cqr->startdev; |
897 if (device == NULL || 898 device != dasd_device_from_cdev_locked(cdev) || 899 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 900 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 901 cdev->dev.bus_id); 902 return; 903 } 904 905 /* Schedule request to be retried. */ 906 cqr->status = DASD_CQR_QUEUED; 907 | 938 if (device == NULL || 939 device != dasd_device_from_cdev_locked(cdev) || 940 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 941 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 942 cdev->dev.bus_id); 943 return; 944 } 945 946 /* Schedule request to be retried. */ 947 cqr->status = DASD_CQR_QUEUED; 948 |
908 dasd_clear_timer(device); 909 dasd_schedule_bh(device); | 949 dasd_device_clear_timer(device); 950 dasd_schedule_device_bh(device); |
910 dasd_put_device(device); 911} 912 | 951 dasd_put_device(device); 952} 953 |
913static void 914dasd_handle_state_change_pending(struct dasd_device *device) | 954void dasd_generic_handle_state_change(struct dasd_device *device) |
915{ | 955{ |
916 struct dasd_ccw_req *cqr; 917 struct list_head *l, *n; 918 | |
919 /* First of all start sense subsystem status request. */ 920 dasd_eer_snss(device); 921 922 device->stopped &= ~DASD_STOPPED_PENDING; | 956 /* First of all start sense subsystem status request. */ 957 dasd_eer_snss(device); 958 959 device->stopped &= ~DASD_STOPPED_PENDING; |
923 924 /* restart all 'running' IO on queue */ 925 list_for_each_safe(l, n, &device->ccw_queue) { 926 cqr = list_entry(l, struct dasd_ccw_req, list); 927 if (cqr->status == DASD_CQR_IN_IO) { 928 cqr->status = DASD_CQR_QUEUED; 929 } 930 } 931 dasd_clear_timer(device); 932 dasd_schedule_bh(device); | 960 dasd_schedule_device_bh(device); 961 if (device->block) 962 dasd_schedule_block_bh(device->block); |
933} 934 935/* 936 * Interrupt handler for "normal" ssch-io based dasd devices. 937 */ | 963} 964 965/* 966 * Interrupt handler for "normal" ssch-io based dasd devices. 967 */ |
938void 939dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 940 struct irb *irb) | 968void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 969 struct irb *irb) |
941{ 942 struct dasd_ccw_req *cqr, *next; 943 struct dasd_device *device; 944 unsigned long long now; 945 int expires; | 970{ 971 struct dasd_ccw_req *cqr, *next; 972 struct dasd_device *device; 973 unsigned long long now; 974 int expires; |
946 dasd_era_t era; 947 char mask; | |
948 949 if (IS_ERR(irb)) { 950 switch (PTR_ERR(irb)) { 951 case -EIO: 952 dasd_handle_killed_request(cdev, intparm); 953 break; 954 case -ETIMEDOUT: 955 printk(KERN_WARNING"%s(%s): request timed out\n", --- 8 unchanged lines hidden (view full) --- 964 } 965 966 now = get_clock(); 967 968 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 969 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 970 (unsigned int) intparm); 971 | 975 976 if (IS_ERR(irb)) { 977 switch (PTR_ERR(irb)) { 978 case -EIO: 979 dasd_handle_killed_request(cdev, intparm); 980 break; 981 case -ETIMEDOUT: 982 printk(KERN_WARNING"%s(%s): request timed out\n", --- 8 unchanged lines hidden (view full) --- 991 } 992 993 now = get_clock(); 994 995 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 996 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 997 (unsigned int) intparm); 998 |
972 /* first of all check for state change pending interrupt */ 973 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 974 if ((irb->scsw.dstat & mask) == mask) { | 999 /* check for unsolicited interrupts */ 1000 cqr = (struct dasd_ccw_req *) intparm; 1001 if (!cqr || ((irb->scsw.cc == 1) && 1002 (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && 1003 (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) { 1004 if (cqr && cqr->status == DASD_CQR_IN_IO) 1005 cqr->status = DASD_CQR_QUEUED; |
975 device = dasd_device_from_cdev_locked(cdev); 976 if (!IS_ERR(device)) { | 1006 device = dasd_device_from_cdev_locked(cdev); 1007 if (!IS_ERR(device)) { |
977 dasd_handle_state_change_pending(device); | 1008 dasd_device_clear_timer(device); 1009 device->discipline->handle_unsolicited_interrupt(device, 1010 irb); |
978 dasd_put_device(device); 979 } 980 return; 981 } 982 | 1011 dasd_put_device(device); 1012 } 1013 return; 1014 } 1015 |
983 cqr = (struct dasd_ccw_req *) intparm; 984 985 /* check for unsolicited interrupts */ 986 if (cqr == NULL) { 987 MESSAGE(KERN_DEBUG, 988 "unsolicited interrupt received: bus_id %s", 989 cdev->dev.bus_id); 990 return; 991 } 992 993 device = (struct dasd_device *) cqr->device; 994 if (device == NULL || | 1016 device = (struct dasd_device *) cqr->startdev; 1017 if (!device || |
995 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 996 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 997 cdev->dev.bus_id); 998 return; 999 } 1000 1001 /* Check for clear pending */ | 1018 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1019 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 1020 cdev->dev.bus_id); 1021 return; 1022 } 1023 1024 /* Check for clear pending */ |
1002 if (cqr->status == DASD_CQR_CLEAR && | 1025 if (cqr->status == DASD_CQR_CLEAR_PENDING && |
1003 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { | 1026 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { |
1004 cqr->status = DASD_CQR_QUEUED; 1005 dasd_clear_timer(device); | 1027 cqr->status = DASD_CQR_CLEARED; 1028 dasd_device_clear_timer(device); |
1006 wake_up(&dasd_flush_wq); | 1029 wake_up(&dasd_flush_wq); |
1007 dasd_schedule_bh(device); | 1030 dasd_schedule_device_bh(device); |
1008 return; 1009 } 1010 1011 /* check status - the request might have been killed by dyn detach */ 1012 if (cqr->status != DASD_CQR_IN_IO) { 1013 MESSAGE(KERN_DEBUG, 1014 "invalid status: bus_id %s, status %02x", 1015 cdev->dev.bus_id, cqr->status); 1016 return; 1017 } 1018 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1019 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); | 1031 return; 1032 } 1033 1034 /* check status - the request might have been killed by dyn detach */ 1035 if (cqr->status != DASD_CQR_IN_IO) { 1036 MESSAGE(KERN_DEBUG, 1037 "invalid status: bus_id %s, status %02x", 1038 cdev->dev.bus_id, cqr->status); 1039 return; 1040 } 1041 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1042 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); |
1020 1021 /* Find out the appropriate era_action. */ 1022 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) 1023 era = dasd_era_fatal; 1024 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1025 irb->scsw.cstat == 0 && 1026 !irb->esw.esw0.erw.cons) 1027 era = dasd_era_none; 1028 else if (irb->esw.esw0.erw.cons) 1029 era = device->discipline->examine_error(cqr, irb); 1030 else 1031 era = dasd_era_recover; 1032 1033 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era); | 1043 next = NULL; |
1034 expires = 0; | 1044 expires = 0; |
1035 if (era == dasd_era_none) { 1036 cqr->status = DASD_CQR_DONE; | 1045 if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1046 irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) { 1047 /* request was completed successfully */ 1048 cqr->status = DASD_CQR_SUCCESS; |
1037 cqr->stopclk = now; 1038 /* Start first request on queue if possible -> fast_io. */ | 1049 cqr->stopclk = now; 1050 /* Start first request on queue if possible -> fast_io. */ |
1039 if (cqr->list.next != &device->ccw_queue) { 1040 next = list_entry(cqr->list.next, 1041 struct dasd_ccw_req, list); 1042 if ((next->status == DASD_CQR_QUEUED) && 1043 (!device->stopped)) { 1044 if (device->discipline->start_IO(next) == 0) 1045 expires = next->expires; 1046 else 1047 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1048 "Interrupt fastpath " 1049 "failed!"); 1050 } | 1051 if (cqr->devlist.next != &device->ccw_queue) { 1052 next = list_entry(cqr->devlist.next, 1053 struct dasd_ccw_req, devlist); |
1051 } | 1054 } |
1052 } else { /* error */ 1053 memcpy(&cqr->irb, irb, sizeof (struct irb)); | 1055 } else { /* error */ 1056 memcpy(&cqr->irb, irb, sizeof(struct irb)); |
1054 if (device->features & DASD_FEATURE_ERPLOG) { | 1057 if (device->features & DASD_FEATURE_ERPLOG) { |
1055 /* dump sense data */ | |
1056 dasd_log_sense(cqr, irb); 1057 } | 1058 dasd_log_sense(cqr, irb); 1059 } |
1058 switch (era) { 1059 case dasd_era_fatal: 1060 cqr->status = DASD_CQR_FAILED; 1061 cqr->stopclk = now; 1062 break; 1063 case dasd_era_recover: | 1060 /* If we have no sense data, or we just don't want complex ERP 1061 * for this request, but if we have retries left, then just 1062 * reset this request and retry it in the fastpath 1063 */ 1064 if (!(cqr->irb.esw.esw0.erw.cons && 1065 test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) && 1066 cqr->retries > 0) { 1067 DEV_MESSAGE(KERN_DEBUG, device, 1068 "default ERP in fastpath (%i retries left)", 1069 cqr->retries); 1070 cqr->lpm = LPM_ANYPATH; 1071 cqr->status = DASD_CQR_QUEUED; 1072 next = cqr; 1073 } else |
1064 cqr->status = DASD_CQR_ERROR; | 1074 cqr->status = DASD_CQR_ERROR; |
1065 break; 1066 default: 1067 BUG(); 1068 } | |
1069 } | 1075 } |
1076 if (next && (next->status == DASD_CQR_QUEUED) && 1077 (!device->stopped)) { 1078 if (device->discipline->start_IO(next) == 0) 1079 expires = next->expires; 1080 else 1081 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1082 "Interrupt fastpath " 1083 "failed!"); 1084 } |
|
1070 if (expires != 0) | 1085 if (expires != 0) |
1071 dasd_set_timer(device, expires); | 1086 dasd_device_set_timer(device, expires); |
1072 else | 1087 else |
1073 dasd_clear_timer(device); 1074 dasd_schedule_bh(device); | 1088 dasd_device_clear_timer(device); 1089 dasd_schedule_device_bh(device); |
1075} 1076 1077/* | 1090} 1091 1092/* |
1078 * posts the buffer_cache about a finalized request | 1093 * If we have an error on a dasd_block layer request then we cancel 1094 * and return all further requests from the same dasd_block as well. |
1079 */ | 1095 */ |
1080static inline void 1081dasd_end_request(struct request *req, int uptodate) | 1096static void __dasd_device_recovery(struct dasd_device *device, 1097 struct dasd_ccw_req *ref_cqr) |
1082{ | 1098{ |
1083 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1084 BUG(); 1085 add_disk_randomness(req->rq_disk); 1086 end_that_request_last(req, uptodate); 1087} | 1099 struct list_head *l, *n; 1100 struct dasd_ccw_req *cqr; |
1088 | 1101 |
1089/* 1090 * Process finished error recovery ccw. 1091 */ 1092static inline void 1093__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) 1094{ 1095 dasd_erp_fn_t erp_fn; | 1102 /* 1103 * only requeue request that came from the dasd_block layer 1104 */ 1105 if (!ref_cqr->block) 1106 return; |
1096 | 1107 |
1097 if (cqr->status == DASD_CQR_DONE) 1098 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1099 else 1100 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1101 erp_fn = device->discipline->erp_postaction(cqr); 1102 erp_fn(cqr); 1103} | 1108 list_for_each_safe(l, n, &device->ccw_queue) { 1109 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1110 if (cqr->status == DASD_CQR_QUEUED && 1111 ref_cqr->block == cqr->block) { 1112 cqr->status = DASD_CQR_CLEARED; 1113 } 1114 } 1115}; |
1104 1105/* | 1116 1117/* |
1106 * Process ccw request queue. | 1118 * Remove those ccw requests from the queue that need to be returned 1119 * to the upper layer. |
1107 */ | 1120 */ |
1108static void 1109__dasd_process_ccw_queue(struct dasd_device * device, 1110 struct list_head *final_queue) | 1121static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1122 struct list_head *final_queue) |
1111{ 1112 struct list_head *l, *n; 1113 struct dasd_ccw_req *cqr; | 1123{ 1124 struct list_head *l, *n; 1125 struct dasd_ccw_req *cqr; |
1114 dasd_erp_fn_t erp_fn; | |
1115 | 1126 |
1116restart: | |
1117 /* Process request with final status. */ 1118 list_for_each_safe(l, n, &device->ccw_queue) { | 1127 /* Process request with final status. */ 1128 list_for_each_safe(l, n, &device->ccw_queue) { |
1119 cqr = list_entry(l, struct dasd_ccw_req, list); | 1129 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1130 |
1120 /* Stop list processing at the first non-final request. */ | 1131 /* Stop list processing at the first non-final request. */ |
1121 if (cqr->status != DASD_CQR_DONE && 1122 cqr->status != DASD_CQR_FAILED && 1123 cqr->status != DASD_CQR_ERROR) | 1132 if (cqr->status == DASD_CQR_QUEUED || 1133 cqr->status == DASD_CQR_IN_IO || 1134 cqr->status == DASD_CQR_CLEAR_PENDING) |
1124 break; | 1135 break; |
1125 /* Process requests with DASD_CQR_ERROR */ | |
1126 if (cqr->status == DASD_CQR_ERROR) { | 1136 if (cqr->status == DASD_CQR_ERROR) { |
1127 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1128 cqr->status = DASD_CQR_FAILED; 1129 cqr->stopclk = get_clock(); 1130 } else { 1131 if (cqr->irb.esw.esw0.erw.cons && 1132 test_bit(DASD_CQR_FLAGS_USE_ERP, 1133 &cqr->flags)) { 1134 erp_fn = device->discipline-> 1135 erp_action(cqr); 1136 erp_fn(cqr); 1137 } else 1138 dasd_default_erp_action(cqr); 1139 } 1140 goto restart; | 1137 __dasd_device_recovery(device, cqr); |
1141 } | 1138 } |
1142 1143 /* First of all call extended error reporting. */ 1144 if (dasd_eer_enabled(device) && 1145 cqr->status == DASD_CQR_FAILED) { 1146 dasd_eer_write(device, cqr, DASD_EER_FATALERROR); 1147 1148 /* restart request */ 1149 cqr->status = DASD_CQR_QUEUED; 1150 cqr->retries = 255; 1151 device->stopped |= DASD_STOPPED_QUIESCE; 1152 goto restart; 1153 } 1154 1155 /* Process finished ERP request. */ 1156 if (cqr->refers) { 1157 __dasd_process_erp(device, cqr); 1158 goto restart; 1159 } 1160 | |
1161 /* Rechain finished requests to final queue */ | 1139 /* Rechain finished requests to final queue */ |
1162 cqr->endclk = get_clock(); 1163 list_move_tail(&cqr->list, final_queue); | 1140 list_move_tail(&cqr->devlist, final_queue); |
1164 } 1165} 1166 | 1141 } 1142} 1143 |
1167static void 1168dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data) 1169{ 1170 struct request *req; 1171 struct dasd_device *device; 1172 int status; 1173 1174 req = (struct request *) data; 1175 device = cqr->device; 1176 dasd_profile_end(device, cqr, req); 1177 status = cqr->device->discipline->free_cp(cqr,req); 1178 spin_lock_irq(&device->request_queue_lock); 1179 dasd_end_request(req, status); 1180 spin_unlock_irq(&device->request_queue_lock); 1181} 1182 1183 | |
1184/* | 1144/* |
1185 * Fetch requests from the block device queue. | 1145 * the cqrs from the final queue are returned to the upper layer 1146 * by setting a dasd_block state and calling the callback function |
1186 */ | 1147 */ |
1187static void 1188__dasd_process_blk_queue(struct dasd_device * device) | 1148static void __dasd_device_process_final_queue(struct dasd_device *device, 1149 struct list_head *final_queue) |
1189{ | 1150{ |
1190 struct request_queue *queue; 1191 struct request *req; | 1151 struct list_head *l, *n; |
1192 struct dasd_ccw_req *cqr; | 1152 struct dasd_ccw_req *cqr; |
1193 int nr_queued; | |
1194 | 1153 |
1195 queue = device->request_queue; 1196 /* No queue ? Then there is nothing to do. */ 1197 if (queue == NULL) 1198 return; 1199 1200 /* 1201 * We requeue request from the block device queue to the ccw 1202 * queue only in two states. In state DASD_STATE_READY the 1203 * partition detection is done and we need to requeue requests 1204 * for that. State DASD_STATE_ONLINE is normal block device 1205 * operation. 1206 */ 1207 if (device->state != DASD_STATE_READY && 1208 device->state != DASD_STATE_ONLINE) 1209 return; 1210 nr_queued = 0; 1211 /* Now we try to fetch requests from the request queue */ 1212 list_for_each_entry(cqr, &device->ccw_queue, list) 1213 if (cqr->status == DASD_CQR_QUEUED) 1214 nr_queued++; 1215 while (!blk_queue_plugged(queue) && 1216 elv_next_request(queue) && 1217 nr_queued < DASD_CHANQ_MAX_SIZE) { 1218 req = elv_next_request(queue); 1219 1220 if (device->features & DASD_FEATURE_READONLY && 1221 rq_data_dir(req) == WRITE) { 1222 DBF_DEV_EVENT(DBF_ERR, device, 1223 "Rejecting write request %p", 1224 req); 1225 blkdev_dequeue_request(req); 1226 dasd_end_request(req, 0); 1227 continue; | 1154 list_for_each_safe(l, n, final_queue) { 1155 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1156 list_del_init(&cqr->devlist); 1157 if (cqr->block) 1158 spin_lock_bh(&cqr->block->queue_lock); 1159 switch (cqr->status) { 1160 case DASD_CQR_SUCCESS: 1161 cqr->status = DASD_CQR_DONE; 1162 break; 1163 case DASD_CQR_ERROR: 1164 cqr->status = DASD_CQR_NEED_ERP; 1165 break; 1166 case DASD_CQR_CLEARED: 1167 cqr->status = DASD_CQR_TERMINATED; 1168 break; 1169 default: 1170 DEV_MESSAGE(KERN_ERR, device, 1171 "wrong cqr status in __dasd_process_final_queue " 1172 "for cqr %p, status %x", 1173 cqr, cqr->status); 1174 BUG(); |
1228 } | 1175 } |
1229 if (device->stopped & DASD_STOPPED_DC_EIO) { 1230 blkdev_dequeue_request(req); 1231 dasd_end_request(req, 0); 1232 continue; 1233 } 1234 cqr = device->discipline->build_cp(device, req); 1235 if (IS_ERR(cqr)) { 1236 if (PTR_ERR(cqr) == -ENOMEM) 1237 break; /* terminate request queue loop */ 1238 if (PTR_ERR(cqr) == -EAGAIN) { 1239 /* 1240 * The current request cannot be build right 1241 * now, we have to try later. If this request 1242 * is the head-of-queue we stop the device 1243 * for 1/2 second. 1244 */ 1245 if (!list_empty(&device->ccw_queue)) 1246 break; 1247 device->stopped |= DASD_STOPPED_PENDING; 1248 dasd_set_timer(device, HZ/2); 1249 break; 1250 } 1251 DBF_DEV_EVENT(DBF_ERR, device, 1252 "CCW creation failed (rc=%ld) " 1253 "on request %p", 1254 PTR_ERR(cqr), req); 1255 blkdev_dequeue_request(req); 1256 dasd_end_request(req, 0); 1257 continue; 1258 } 1259 cqr->callback = dasd_end_request_cb; 1260 cqr->callback_data = (void *) req; 1261 cqr->status = DASD_CQR_QUEUED; 1262 blkdev_dequeue_request(req); 1263 list_add_tail(&cqr->list, &device->ccw_queue); 1264 dasd_profile_start(device, cqr, req); 1265 nr_queued++; | 1176 if (cqr->block) 1177 spin_unlock_bh(&cqr->block->queue_lock); 1178 if (cqr->callback != NULL) 1179 (cqr->callback)(cqr, cqr->callback_data); |
1266 } 1267} 1268 | 1180 } 1181} 1182 |
1183 1184 |
|
1269/* 1270 * Take a look at the first request on the ccw queue and check 1271 * if it reached its expire time. If so, terminate the IO. 1272 */ | 1185/* 1186 * Take a look at the first request on the ccw queue and check 1187 * if it reached its expire time. If so, terminate the IO. 1188 */ |
1273static void 1274__dasd_check_expire(struct dasd_device * device) | 1189static void __dasd_device_check_expire(struct dasd_device *device) |
1275{ 1276 struct dasd_ccw_req *cqr; 1277 1278 if (list_empty(&device->ccw_queue)) 1279 return; | 1190{ 1191 struct dasd_ccw_req *cqr; 1192 1193 if (list_empty(&device->ccw_queue)) 1194 return; |
1280 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1195 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); |
1281 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1282 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1283 if (device->discipline->term_IO(cqr) != 0) { 1284 /* Hmpf, try again in 5 sec */ | 1196 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1197 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1198 if (device->discipline->term_IO(cqr) != 0) { 1199 /* Hmpf, try again in 5 sec */ |
1285 dasd_set_timer(device, 5*HZ); | 1200 dasd_device_set_timer(device, 5*HZ); |
1286 DEV_MESSAGE(KERN_ERR, device, 1287 "internal error - timeout (%is) expired " 1288 "for cqr %p, termination failed, " 1289 "retrying in 5s", 1290 (cqr->expires/HZ), cqr); 1291 } else { 1292 DEV_MESSAGE(KERN_ERR, device, 1293 "internal error - timeout (%is) expired " 1294 "for cqr %p (%i retries left)", 1295 (cqr->expires/HZ), cqr, cqr->retries); 1296 } 1297 } 1298} 1299 1300/* 1301 * Take a look at the first request on the ccw queue and check 1302 * if it needs to be started. 1303 */ | 1201 DEV_MESSAGE(KERN_ERR, device, 1202 "internal error - timeout (%is) expired " 1203 "for cqr %p, termination failed, " 1204 "retrying in 5s", 1205 (cqr->expires/HZ), cqr); 1206 } else { 1207 DEV_MESSAGE(KERN_ERR, device, 1208 "internal error - timeout (%is) expired " 1209 "for cqr %p (%i retries left)", 1210 (cqr->expires/HZ), cqr, cqr->retries); 1211 } 1212 } 1213} 1214 1215/* 1216 * Take a look at the first request on the ccw queue and check 1217 * if it needs to be started. 1218 */ |
1304static void 1305__dasd_start_head(struct dasd_device * device) | 1219static void __dasd_device_start_head(struct dasd_device *device) |
1306{ 1307 struct dasd_ccw_req *cqr; 1308 int rc; 1309 1310 if (list_empty(&device->ccw_queue)) 1311 return; | 1220{ 1221 struct dasd_ccw_req *cqr; 1222 int rc; 1223 1224 if (list_empty(&device->ccw_queue)) 1225 return; |
1312 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1226 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); |
1313 if (cqr->status != DASD_CQR_QUEUED) 1314 return; | 1227 if (cqr->status != DASD_CQR_QUEUED) 1228 return; |
1315 /* Non-temporary stop condition will trigger fail fast */ 1316 if (device->stopped & ~DASD_STOPPED_PENDING && 1317 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1318 (!dasd_eer_enabled(device))) { 1319 cqr->status = DASD_CQR_FAILED; 1320 dasd_schedule_bh(device); | 1229 /* when device is stopped, return request to previous layer */ 1230 if (device->stopped) { 1231 cqr->status = DASD_CQR_CLEARED; 1232 dasd_schedule_device_bh(device); |
1321 return; 1322 } | 1233 return; 1234 } |
1323 /* Don't try to start requests if device is stopped */ 1324 if (device->stopped) 1325 return; | |
1326 1327 rc = device->discipline->start_IO(cqr); 1328 if (rc == 0) | 1235 1236 rc = device->discipline->start_IO(cqr); 1237 if (rc == 0) |
1329 dasd_set_timer(device, cqr->expires); | 1238 dasd_device_set_timer(device, cqr->expires); |
1330 else if (rc == -EACCES) { | 1239 else if (rc == -EACCES) { |
1331 dasd_schedule_bh(device); | 1240 dasd_schedule_device_bh(device); |
1332 } else 1333 /* Hmpf, try again in 1/2 sec */ | 1241 } else 1242 /* Hmpf, try again in 1/2 sec */ |
1334 dasd_set_timer(device, 50); | 1243 dasd_device_set_timer(device, 50); |
1335} 1336 | 1244} 1245 |
1337static inline int 1338_wait_for_clear(struct dasd_ccw_req *cqr) 1339{ 1340 return (cqr->status == DASD_CQR_QUEUED); 1341} 1342 | |
1343/* | 1246/* |
1344 * Remove all requests from the ccw queue (all = '1') or only block device 1345 * requests in case all = '0'. 1346 * Take care of the erp-chain (chained via cqr->refers) and remove either 1347 * the whole erp-chain or none of the erp-requests. 1348 * If a request is currently running, term_IO is called and the request 1349 * is re-queued. Prior to removing the terminated request we need to wait 1350 * for the clear-interrupt. 1351 * In case termination is not possible we stop processing and just finishing 1352 * the already moved requests. | 1247 * Go through all request on the dasd_device request queue, 1248 * terminate them on the cdev if necessary, and return them to the 1249 * submitting layer via callback. 1250 * Note: 1251 * Make sure that all 'submitting layers' still exist when 1252 * this function is called!. In other words, when 'device' is a base 1253 * device then all block layer requests must have been removed before 1254 * via dasd_flush_block_queue. |
1353 */ | 1255 */ |
1354static int 1355dasd_flush_ccw_queue(struct dasd_device * device, int all) | 1256int dasd_flush_device_queue(struct dasd_device *device) |
1356{ | 1257{ |
1357 struct dasd_ccw_req *cqr, *orig, *n; 1358 int rc, i; 1359 | 1258 struct dasd_ccw_req *cqr, *n; 1259 int rc; |
1360 struct list_head flush_queue; 1361 1362 INIT_LIST_HEAD(&flush_queue); 1363 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1364 rc = 0; | 1260 struct list_head flush_queue; 1261 1262 INIT_LIST_HEAD(&flush_queue); 1263 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1264 rc = 0; |
1365restart: 1366 list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) { 1367 /* get original request of erp request-chain */ 1368 for (orig = cqr; orig->refers != NULL; orig = orig->refers); 1369 1370 /* Flush all request or only block device requests? */ 1371 if (all == 0 && cqr->callback != dasd_end_request_cb && 1372 orig->callback != dasd_end_request_cb) { 1373 continue; 1374 } | 1265 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { |
1375 /* Check status and move request to flush_queue */ 1376 switch (cqr->status) { 1377 case DASD_CQR_IN_IO: 1378 rc = device->discipline->term_IO(cqr); 1379 if (rc) { 1380 /* unable to terminate requeust */ 1381 DEV_MESSAGE(KERN_ERR, device, 1382 "dasd flush ccw_queue is unable " 1383 " to terminate request %p", 1384 cqr); 1385 /* stop flush processing */ 1386 goto finished; 1387 } 1388 break; 1389 case DASD_CQR_QUEUED: | 1266 /* Check status and move request to flush_queue */ 1267 switch (cqr->status) { 1268 case DASD_CQR_IN_IO: 1269 rc = device->discipline->term_IO(cqr); 1270 if (rc) { 1271 /* unable to terminate requeust */ 1272 DEV_MESSAGE(KERN_ERR, device, 1273 "dasd flush ccw_queue is unable " 1274 " to terminate request %p", 1275 cqr); 1276 /* stop flush processing */ 1277 goto finished; 1278 } 1279 break; 1280 case DASD_CQR_QUEUED: |
1390 case DASD_CQR_ERROR: 1391 /* set request to FAILED */ | |
1392 cqr->stopclk = get_clock(); | 1281 cqr->stopclk = get_clock(); |
1393 cqr->status = DASD_CQR_FAILED; | 1282 cqr->status = DASD_CQR_CLEARED; |
1394 break; | 1283 break; |
1395 default: /* do not touch the others */ | 1284 default: /* no need to modify the others */ |
1396 break; 1397 } | 1285 break; 1286 } |
1398 /* Rechain request (including erp chain) */ 1399 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) { 1400 cqr->endclk = get_clock(); 1401 list_move_tail(&cqr->list, &flush_queue); 1402 } 1403 if (i > 1) 1404 /* moved more than one request - need to restart */ 1405 goto restart; | 1287 list_move_tail(&cqr->devlist, &flush_queue); |
1406 } | 1288 } |
1407 | |
1408finished: 1409 spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1289finished: 1290 spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1410 /* Now call the callback function of flushed requests */ 1411restart_cb: 1412 list_for_each_entry_safe(cqr, n, &flush_queue, list) { 1413 if (cqr->status == DASD_CQR_CLEAR) { 1414 /* wait for clear interrupt! */ 1415 wait_event(dasd_flush_wq, _wait_for_clear(cqr)); 1416 cqr->status = DASD_CQR_FAILED; 1417 } 1418 /* Process finished ERP request. */ 1419 if (cqr->refers) { 1420 __dasd_process_erp(device, cqr); 1421 /* restart list_for_xx loop since dasd_process_erp 1422 * might remove multiple elements */ 1423 goto restart_cb; 1424 } 1425 /* call the callback function */ 1426 cqr->endclk = get_clock(); 1427 if (cqr->callback != NULL) 1428 (cqr->callback)(cqr, cqr->callback_data); 1429 } | 1291 /* 1292 * After this point all requests must be in state CLEAR_PENDING, 1293 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1294 * one of the others. 1295 */ 1296 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1297 wait_event(dasd_flush_wq, 1298 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1299 /* 1300 * Now set each request back to TERMINATED, DONE or NEED_ERP 1301 * and call the callback function of flushed requests 1302 */ 1303 __dasd_device_process_final_queue(device, &flush_queue); |
1430 return rc; 1431} 1432 1433/* 1434 * Acquire the device lock and process queues for the device. 1435 */ | 1304 return rc; 1305} 1306 1307/* 1308 * Acquire the device lock and process queues for the device. 1309 */ |
1436static void 1437dasd_tasklet(struct dasd_device * device) | 1310static void dasd_device_tasklet(struct dasd_device *device) |
1438{ 1439 struct list_head final_queue; | 1311{ 1312 struct list_head final_queue; |
1440 struct list_head *l, *n; 1441 struct dasd_ccw_req *cqr; | |
1442 1443 atomic_set (&device->tasklet_scheduled, 0); 1444 INIT_LIST_HEAD(&final_queue); 1445 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1446 /* Check expire time of first request on the ccw queue. */ | 1313 1314 atomic_set (&device->tasklet_scheduled, 0); 1315 INIT_LIST_HEAD(&final_queue); 1316 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1317 /* Check expire time of first request on the ccw queue. */ |
1447 __dasd_check_expire(device); 1448 /* Finish off requests on ccw queue */ 1449 __dasd_process_ccw_queue(device, &final_queue); | 1318 __dasd_device_check_expire(device); 1319 /* find final requests on ccw queue */ 1320 __dasd_device_process_ccw_queue(device, &final_queue); |
1450 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1451 /* Now call the callback function of requests with final status */ | 1321 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1322 /* Now call the callback function of requests with final status */ |
1452 list_for_each_safe(l, n, &final_queue) { 1453 cqr = list_entry(l, struct dasd_ccw_req, list); 1454 list_del_init(&cqr->list); 1455 if (cqr->callback != NULL) 1456 (cqr->callback)(cqr, cqr->callback_data); 1457 } 1458 spin_lock_irq(&device->request_queue_lock); 1459 spin_lock(get_ccwdev_lock(device->cdev)); 1460 /* Get new request from the block device request queue */ 1461 __dasd_process_blk_queue(device); | 1323 __dasd_device_process_final_queue(device, &final_queue); 1324 spin_lock_irq(get_ccwdev_lock(device->cdev)); |
1462 /* Now check if the head of the ccw queue needs to be started. */ | 1325 /* Now check if the head of the ccw queue needs to be started. */ |
1463 __dasd_start_head(device); 1464 spin_unlock(get_ccwdev_lock(device->cdev)); 1465 spin_unlock_irq(&device->request_queue_lock); | 1326 __dasd_device_start_head(device); 1327 spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1466 dasd_put_device(device); 1467} 1468 1469/* 1470 * Schedules a call to dasd_tasklet over the device tasklet. 1471 */ | 1328 dasd_put_device(device); 1329} 1330 1331/* 1332 * Schedules a call to dasd_tasklet over the device tasklet. 1333 */ |
1472void 1473dasd_schedule_bh(struct dasd_device * device) | 1334void dasd_schedule_device_bh(struct dasd_device *device) |
1474{ 1475 /* Protect against rescheduling. */ 1476 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1477 return; 1478 dasd_get_device(device); 1479 tasklet_hi_schedule(&device->tasklet); 1480} 1481 1482/* | 1335{ 1336 /* Protect against rescheduling. */ 1337 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1338 return; 1339 dasd_get_device(device); 1340 tasklet_hi_schedule(&device->tasklet); 1341} 1342 1343/* |
1483 * Queue a request to the head of the ccw_queue. Start the I/O if 1484 * possible. | 1344 * Queue a request to the head of the device ccw_queue. 1345 * Start the I/O if possible. |
1485 */ | 1346 */ |
1486void 1487dasd_add_request_head(struct dasd_ccw_req *req) | 1347void dasd_add_request_head(struct dasd_ccw_req *cqr) |
1488{ 1489 struct dasd_device *device; 1490 unsigned long flags; 1491 | 1348{ 1349 struct dasd_device *device; 1350 unsigned long flags; 1351 |
1492 device = req->device; | 1352 device = cqr->startdev; |
1493 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 1353 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
1494 req->status = DASD_CQR_QUEUED; 1495 req->device = device; 1496 list_add(&req->list, &device->ccw_queue); | 1354 cqr->status = DASD_CQR_QUEUED; 1355 list_add(&cqr->devlist, &device->ccw_queue); |
1497 /* let the bh start the request to keep them in order */ | 1356 /* let the bh start the request to keep them in order */ |
1498 dasd_schedule_bh(device); | 1357 dasd_schedule_device_bh(device); |
1499 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1500} 1501 1502/* | 1358 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1359} 1360 1361/* |
1503 * Queue a request to the tail of the ccw_queue. Start the I/O if 1504 * possible. | 1362 * Queue a request to the tail of the device ccw_queue. 1363 * Start the I/O if possible. |
1505 */ | 1364 */ |
1506void 1507dasd_add_request_tail(struct dasd_ccw_req *req) | 1365void dasd_add_request_tail(struct dasd_ccw_req *cqr) |
1508{ 1509 struct dasd_device *device; 1510 unsigned long flags; 1511 | 1366{ 1367 struct dasd_device *device; 1368 unsigned long flags; 1369 |
1512 device = req->device; | 1370 device = cqr->startdev; |
1513 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 1371 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
1514 req->status = DASD_CQR_QUEUED; 1515 req->device = device; 1516 list_add_tail(&req->list, &device->ccw_queue); | 1372 cqr->status = DASD_CQR_QUEUED; 1373 list_add_tail(&cqr->devlist, &device->ccw_queue); |
1517 /* let the bh start the request to keep them in order */ | 1374 /* let the bh start the request to keep them in order */ |
1518 dasd_schedule_bh(device); | 1375 dasd_schedule_device_bh(device); |
1519 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1520} 1521 1522/* | 1376 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1377} 1378 1379/* |
1523 * Wakeup callback. | 1380 * Wakeup helper for the 'sleep_on' functions. |
1524 */ | 1381 */ |
1525static void 1526dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) | 1382static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) |
1527{ 1528 wake_up((wait_queue_head_t *) data); 1529} 1530 | 1383{ 1384 wake_up((wait_queue_head_t *) data); 1385} 1386 |
1531static inline int 1532_wait_for_wakeup(struct dasd_ccw_req *cqr) | 1387static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) |
1533{ 1534 struct dasd_device *device; 1535 int rc; 1536 | 1388{ 1389 struct dasd_device *device; 1390 int rc; 1391 |
1537 device = cqr->device; | 1392 device = cqr->startdev; |
1538 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1539 rc = ((cqr->status == DASD_CQR_DONE || | 1393 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1394 rc = ((cqr->status == DASD_CQR_DONE || |
1540 cqr->status == DASD_CQR_FAILED) && 1541 list_empty(&cqr->list)); | 1395 cqr->status == DASD_CQR_NEED_ERP || 1396 cqr->status == DASD_CQR_TERMINATED) && 1397 list_empty(&cqr->devlist)); |
1542 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1543 return rc; 1544} 1545 1546/* | 1398 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1399 return rc; 1400} 1401 1402/* |
1547 * Attempts to start a special ccw queue and waits for its completion. | 1403 * Queue a request to the tail of the device ccw_queue and wait for 1404 * it's completion. |
1548 */ | 1405 */ |
1549int 1550dasd_sleep_on(struct dasd_ccw_req * cqr) | 1406int dasd_sleep_on(struct dasd_ccw_req *cqr) |
1551{ 1552 wait_queue_head_t wait_q; 1553 struct dasd_device *device; 1554 int rc; 1555 | 1407{ 1408 wait_queue_head_t wait_q; 1409 struct dasd_device *device; 1410 int rc; 1411 |
1556 device = cqr->device; 1557 spin_lock_irq(get_ccwdev_lock(device->cdev)); | 1412 device = cqr->startdev; |
1558 1559 init_waitqueue_head (&wait_q); 1560 cqr->callback = dasd_wakeup_cb; 1561 cqr->callback_data = (void *) &wait_q; | 1413 1414 init_waitqueue_head (&wait_q); 1415 cqr->callback = dasd_wakeup_cb; 1416 cqr->callback_data = (void *) &wait_q; |
1562 cqr->status = DASD_CQR_QUEUED; 1563 list_add_tail(&cqr->list, &device->ccw_queue); 1564 1565 /* let the bh start the request to keep them in order */ 1566 dasd_schedule_bh(device); 1567 1568 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1569 | 1417 dasd_add_request_tail(cqr); |
1570 wait_event(wait_q, _wait_for_wakeup(cqr)); 1571 1572 /* Request status is either done or failed. */ | 1418 wait_event(wait_q, _wait_for_wakeup(cqr)); 1419 1420 /* Request status is either done or failed. */ |
1573 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; | 1421 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; |
1574 return rc; 1575} 1576 1577/* | 1422 return rc; 1423} 1424 1425/* |
1578 * Attempts to start a special ccw queue and wait interruptible 1579 * for its completion. | 1426 * Queue a request to the tail of the device ccw_queue and wait 1427 * interruptible for it's completion. |
1580 */ | 1428 */ |
1581int 1582dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr) | 1429int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) |
1583{ 1584 wait_queue_head_t wait_q; 1585 struct dasd_device *device; | 1430{ 1431 wait_queue_head_t wait_q; 1432 struct dasd_device *device; |
1586 int rc, finished; | 1433 int rc; |
1587 | 1434 |
1588 device = cqr->device; 1589 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1590 | 1435 device = cqr->startdev; |
1591 init_waitqueue_head (&wait_q); 1592 cqr->callback = dasd_wakeup_cb; 1593 cqr->callback_data = (void *) &wait_q; | 1436 init_waitqueue_head (&wait_q); 1437 cqr->callback = dasd_wakeup_cb; 1438 cqr->callback_data = (void *) &wait_q; |
1594 cqr->status = DASD_CQR_QUEUED; 1595 list_add_tail(&cqr->list, &device->ccw_queue); 1596 1597 /* let the bh start the request to keep them in order */ 1598 dasd_schedule_bh(device); 1599 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1600 1601 finished = 0; 1602 while (!finished) { 1603 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); 1604 if (rc != -ERESTARTSYS) { 1605 /* Request is final (done or failed) */ 1606 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1607 break; 1608 } 1609 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1610 switch (cqr->status) { 1611 case DASD_CQR_IN_IO: 1612 /* terminate runnig cqr */ 1613 if (device->discipline->term_IO) { 1614 cqr->retries = -1; 1615 device->discipline->term_IO(cqr); 1616 /* wait (non-interruptible) for final status 1617 * because signal ist still pending */ 1618 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1619 wait_event(wait_q, _wait_for_wakeup(cqr)); 1620 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1621 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1622 finished = 1; 1623 } 1624 break; 1625 case DASD_CQR_QUEUED: 1626 /* request */ 1627 list_del_init(&cqr->list); 1628 rc = -EIO; 1629 finished = 1; 1630 break; 1631 default: 1632 /* cqr with 'non-interruptable' status - just wait */ 1633 break; 1634 } 1635 spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1439 dasd_add_request_tail(cqr); 1440 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); 1441 if (rc == -ERESTARTSYS) { 1442 dasd_cancel_req(cqr); 1443 /* wait (non-interruptible) for final status */ 1444 wait_event(wait_q, _wait_for_wakeup(cqr)); |
1636 } | 1445 } |
1446 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; |
|
1637 return rc; 1638} 1639 1640/* 1641 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1642 * for eckd devices) the currently running request has to be terminated 1643 * and be put back to status queued, before the special request is added 1644 * to the head of the queue. Then the special request is waited on normally. 1645 */ | 1447 return rc; 1448} 1449 1450/* 1451 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1452 * for eckd devices) the currently running request has to be terminated 1453 * and be put back to status queued, before the special request is added 1454 * to the head of the queue. Then the special request is waited on normally. 1455 */ |
1646static inline int 1647_dasd_term_running_cqr(struct dasd_device *device) | 1456static inline int _dasd_term_running_cqr(struct dasd_device *device) |
1648{ 1649 struct dasd_ccw_req *cqr; 1650 1651 if (list_empty(&device->ccw_queue)) 1652 return 0; | 1457{ 1458 struct dasd_ccw_req *cqr; 1459 1460 if (list_empty(&device->ccw_queue)) 1461 return 0; |
1653 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1462 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); |
1654 return device->discipline->term_IO(cqr); 1655} 1656 | 1463 return device->discipline->term_IO(cqr); 1464} 1465 |
1657int 1658dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr) | 1466int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) |
1659{ 1660 wait_queue_head_t wait_q; 1661 struct dasd_device *device; 1662 int rc; 1663 | 1467{ 1468 wait_queue_head_t wait_q; 1469 struct dasd_device *device; 1470 int rc; 1471 |
1664 device = cqr->device; | 1472 device = cqr->startdev; |
1665 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1666 rc = _dasd_term_running_cqr(device); 1667 if (rc) { 1668 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1669 return rc; 1670 } 1671 1672 init_waitqueue_head (&wait_q); 1673 cqr->callback = dasd_wakeup_cb; 1674 cqr->callback_data = (void *) &wait_q; 1675 cqr->status = DASD_CQR_QUEUED; | 1473 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1474 rc = _dasd_term_running_cqr(device); 1475 if (rc) { 1476 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1477 return rc; 1478 } 1479 1480 init_waitqueue_head (&wait_q); 1481 cqr->callback = dasd_wakeup_cb; 1482 cqr->callback_data = (void *) &wait_q; 1483 cqr->status = DASD_CQR_QUEUED; |
1676 list_add(&cqr->list, &device->ccw_queue); | 1484 list_add(&cqr->devlist, &device->ccw_queue); |
1677 1678 /* let the bh start the request to keep them in order */ | 1485 1486 /* let the bh start the request to keep them in order */ |
1679 dasd_schedule_bh(device); | 1487 dasd_schedule_device_bh(device); |
1680 1681 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1682 1683 wait_event(wait_q, _wait_for_wakeup(cqr)); 1684 1685 /* Request status is either done or failed. */ | 1488 1489 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1490 1491 wait_event(wait_q, _wait_for_wakeup(cqr)); 1492 1493 /* Request status is either done or failed. */ |
1686 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; | 1494 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; |
1687 return rc; 1688} 1689 1690/* 1691 * Cancels a request that was started with dasd_sleep_on_req. 1692 * This is useful to timeout requests. The request will be 1693 * terminated if it is currently in i/o. 1694 * Returns 1 if the request has been terminated. | 1495 return rc; 1496} 1497 1498/* 1499 * Cancels a request that was started with dasd_sleep_on_req. 1500 * This is useful to timeout requests. The request will be 1501 * terminated if it is currently in i/o. 1502 * Returns 1 if the request has been terminated. |
1503 * 0 if there was no need to terminate the request (not started yet) 1504 * negative error code if termination failed 1505 * Cancellation of a request is an asynchronous operation! The calling 1506 * function has to wait until the request is properly returned via callback. |
|
1695 */ | 1507 */ |
1696int 1697dasd_cancel_req(struct dasd_ccw_req *cqr) | 1508int dasd_cancel_req(struct dasd_ccw_req *cqr) |
1698{ | 1509{ |
1699 struct dasd_device *device = cqr->device; | 1510 struct dasd_device *device = cqr->startdev; |
1700 unsigned long flags; 1701 int rc; 1702 1703 rc = 0; 1704 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1705 switch (cqr->status) { 1706 case DASD_CQR_QUEUED: | 1511 unsigned long flags; 1512 int rc; 1513 1514 rc = 0; 1515 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1516 switch (cqr->status) { 1517 case DASD_CQR_QUEUED: |
1707 /* request was not started - just set to failed */ 1708 cqr->status = DASD_CQR_FAILED; | 1518 /* request was not started - just set to cleared */ 1519 cqr->status = DASD_CQR_CLEARED; |
1709 break; 1710 case DASD_CQR_IN_IO: 1711 /* request in IO - terminate IO and release again */ | 1520 break; 1521 case DASD_CQR_IN_IO: 1522 /* request in IO - terminate IO and release again */ |
1712 if (device->discipline->term_IO(cqr) != 0) 1713 /* what to do if unable to terminate ?????? 1714 e.g. not _IN_IO */ 1715 cqr->status = DASD_CQR_FAILED; 1716 cqr->stopclk = get_clock(); 1717 rc = 1; | 1523 rc = device->discipline->term_IO(cqr); 1524 if (rc) { 1525 DEV_MESSAGE(KERN_ERR, device, 1526 "dasd_cancel_req is unable " 1527 " to terminate request %p, rc = %d", 1528 cqr, rc); 1529 } else { 1530 cqr->stopclk = get_clock(); 1531 rc = 1; 1532 } |
1718 break; | 1533 break; |
1719 case DASD_CQR_DONE: 1720 case DASD_CQR_FAILED: 1721 /* already finished - do nothing */ | 1534 default: /* already finished or clear pending - do nothing */ |
1722 break; | 1535 break; |
1723 default: 1724 DEV_MESSAGE(KERN_ALERT, device, 1725 "invalid status %02x in request", 1726 cqr->status); | 1536 } 1537 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1538 dasd_schedule_device_bh(device); 1539 return rc; 1540} 1541 1542 1543/* 1544 * SECTION: Operations of the dasd_block layer. 1545 */ 1546 1547/* 1548 * Timeout function for dasd_block. This is used when the block layer 1549 * is waiting for something that may not come reliably, (e.g. a state 1550 * change interrupt) 1551 */ 1552static void dasd_block_timeout(unsigned long ptr) 1553{ 1554 unsigned long flags; 1555 struct dasd_block *block; 1556 1557 block = (struct dasd_block *) ptr; 1558 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 1559 /* re-activate request queue */ 1560 block->base->stopped &= ~DASD_STOPPED_PENDING; 1561 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 1562 dasd_schedule_block_bh(block); 1563} 1564 1565/* 1566 * Setup timeout for a dasd_block in jiffies. 1567 */ 1568void dasd_block_set_timer(struct dasd_block *block, int expires) 1569{ 1570 if (expires == 0) { 1571 if (timer_pending(&block->timer)) 1572 del_timer(&block->timer); 1573 return; 1574 } 1575 if (timer_pending(&block->timer)) { 1576 if (mod_timer(&block->timer, jiffies + expires)) 1577 return; 1578 } 1579 block->timer.function = dasd_block_timeout; 1580 block->timer.data = (unsigned long) block; 1581 block->timer.expires = jiffies + expires; 1582 add_timer(&block->timer); 1583} 1584 1585/* 1586 * Clear timeout for a dasd_block. 1587 */ 1588void dasd_block_clear_timer(struct dasd_block *block) 1589{ 1590 if (timer_pending(&block->timer)) 1591 del_timer(&block->timer); 1592} 1593 1594/* 1595 * posts the buffer_cache about a finalized request 1596 */ 1597static inline void dasd_end_request(struct request *req, int uptodate) 1598{ 1599 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) |
1727 BUG(); | 1600 BUG(); |
1601 add_disk_randomness(req->rq_disk); 1602 end_that_request_last(req, uptodate); 1603} |
|
1728 | 1604 |
1605/* 1606 * Process finished error recovery ccw. 1607 */ 1608static inline void __dasd_block_process_erp(struct dasd_block *block, 1609 struct dasd_ccw_req *cqr) 1610{ 1611 dasd_erp_fn_t erp_fn; 1612 struct dasd_device *device = block->base; 1613 1614 if (cqr->status == DASD_CQR_DONE) 1615 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1616 else 1617 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1618 erp_fn = device->discipline->erp_postaction(cqr); 1619 erp_fn(cqr); 1620} 1621 1622/* 1623 * Fetch requests from the block device queue. 1624 */ 1625static void __dasd_process_request_queue(struct dasd_block *block) 1626{ 1627 struct request_queue *queue; 1628 struct request *req; 1629 struct dasd_ccw_req *cqr; 1630 struct dasd_device *basedev; 1631 unsigned long flags; 1632 queue = block->request_queue; 1633 basedev = block->base; 1634 /* No queue ? Then there is nothing to do. */ 1635 if (queue == NULL) 1636 return; 1637 1638 /* 1639 * We requeue request from the block device queue to the ccw 1640 * queue only in two states. In state DASD_STATE_READY the 1641 * partition detection is done and we need to requeue requests 1642 * for that. State DASD_STATE_ONLINE is normal block device 1643 * operation. 1644 */ 1645 if (basedev->state < DASD_STATE_READY) 1646 return; 1647 /* Now we try to fetch requests from the request queue */ 1648 while (!blk_queue_plugged(queue) && 1649 elv_next_request(queue)) { 1650 1651 req = elv_next_request(queue); 1652 1653 if (basedev->features & DASD_FEATURE_READONLY && 1654 rq_data_dir(req) == WRITE) { 1655 DBF_DEV_EVENT(DBF_ERR, basedev, 1656 "Rejecting write request %p", 1657 req); 1658 blkdev_dequeue_request(req); 1659 dasd_end_request(req, 0); 1660 continue; 1661 } 1662 cqr = basedev->discipline->build_cp(basedev, block, req); 1663 if (IS_ERR(cqr)) { 1664 if (PTR_ERR(cqr) == -EBUSY) 1665 break; /* normal end condition */ 1666 if (PTR_ERR(cqr) == -ENOMEM) 1667 break; /* terminate request queue loop */ 1668 if (PTR_ERR(cqr) == -EAGAIN) { 1669 /* 1670 * The current request cannot be build right 1671 * now, we have to try later. If this request 1672 * is the head-of-queue we stop the device 1673 * for 1/2 second. 1674 */ 1675 if (!list_empty(&block->ccw_queue)) 1676 break; 1677 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags); 1678 basedev->stopped |= DASD_STOPPED_PENDING; 1679 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags); 1680 dasd_block_set_timer(block, HZ/2); 1681 break; 1682 } 1683 DBF_DEV_EVENT(DBF_ERR, basedev, 1684 "CCW creation failed (rc=%ld) " 1685 "on request %p", 1686 PTR_ERR(cqr), req); 1687 blkdev_dequeue_request(req); 1688 dasd_end_request(req, 0); 1689 continue; 1690 } 1691 /* 1692 * Note: callback is set to dasd_return_cqr_cb in 1693 * __dasd_block_start_head to cover erp requests as well 1694 */ 1695 cqr->callback_data = (void *) req; 1696 cqr->status = DASD_CQR_FILLED; 1697 blkdev_dequeue_request(req); 1698 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1699 dasd_profile_start(block, cqr, req); |
|
1729 } | 1700 } |
1730 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1731 dasd_schedule_bh(device); | 1701} 1702 1703static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 1704{ 1705 struct request *req; 1706 int status; 1707 1708 req = (struct request *) cqr->callback_data; 1709 dasd_profile_end(cqr->block, cqr, req); 1710 status = cqr->memdev->discipline->free_cp(cqr, req); 1711 dasd_end_request(req, status); 1712} 1713 1714/* 1715 * Process ccw request queue. 1716 */ 1717static void __dasd_process_block_ccw_queue(struct dasd_block *block, 1718 struct list_head *final_queue) 1719{ 1720 struct list_head *l, *n; 1721 struct dasd_ccw_req *cqr; 1722 dasd_erp_fn_t erp_fn; 1723 unsigned long flags; 1724 struct dasd_device *base = block->base; 1725 1726restart: 1727 /* Process request with final status. */ 1728 list_for_each_safe(l, n, &block->ccw_queue) { 1729 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1730 if (cqr->status != DASD_CQR_DONE && 1731 cqr->status != DASD_CQR_FAILED && 1732 cqr->status != DASD_CQR_NEED_ERP && 1733 cqr->status != DASD_CQR_TERMINATED) 1734 continue; 1735 1736 if (cqr->status == DASD_CQR_TERMINATED) { 1737 base->discipline->handle_terminated_request(cqr); 1738 goto restart; 1739 } 1740 1741 /* Process requests that may be recovered */ 1742 if (cqr->status == DASD_CQR_NEED_ERP) { 1743 if (cqr->irb.esw.esw0.erw.cons && 1744 test_bit(DASD_CQR_FLAGS_USE_ERP, 1745 &cqr->flags)) { 1746 erp_fn = base->discipline->erp_action(cqr); 1747 erp_fn(cqr); 1748 } 1749 goto restart; 1750 } 1751 1752 /* First of all call extended error reporting. */ 1753 if (dasd_eer_enabled(base) && 1754 cqr->status == DASD_CQR_FAILED) { 1755 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 1756 1757 /* restart request */ 1758 cqr->status = DASD_CQR_FILLED; 1759 cqr->retries = 255; 1760 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 1761 base->stopped |= DASD_STOPPED_QUIESCE; 1762 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 1763 flags); 1764 goto restart; 1765 } 1766 1767 /* Process finished ERP request. */ 1768 if (cqr->refers) { 1769 __dasd_block_process_erp(block, cqr); 1770 goto restart; 1771 } 1772 1773 /* Rechain finished requests to final queue */ 1774 cqr->endclk = get_clock(); 1775 list_move_tail(&cqr->blocklist, final_queue); 1776 } 1777} 1778 1779static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 1780{ 1781 dasd_schedule_block_bh(cqr->block); 1782} 1783 1784static void __dasd_block_start_head(struct dasd_block *block) 1785{ 1786 struct dasd_ccw_req *cqr; 1787 1788 if (list_empty(&block->ccw_queue)) 1789 return; 1790 /* We allways begin with the first requests on the queue, as some 1791 * of previously started requests have to be enqueued on a 1792 * dasd_device again for error recovery. 1793 */ 1794 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 1795 if (cqr->status != DASD_CQR_FILLED) 1796 continue; 1797 /* Non-temporary stop condition will trigger fail fast */ 1798 if (block->base->stopped & ~DASD_STOPPED_PENDING && 1799 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1800 (!dasd_eer_enabled(block->base))) { 1801 cqr->status = DASD_CQR_FAILED; 1802 dasd_schedule_block_bh(block); 1803 continue; 1804 } 1805 /* Don't try to start requests if device is stopped */ 1806 if (block->base->stopped) 1807 return; 1808 1809 /* just a fail safe check, should not happen */ 1810 if (!cqr->startdev) 1811 cqr->startdev = block->base; 1812 1813 /* make sure that the requests we submit find their way back */ 1814 cqr->callback = dasd_return_cqr_cb; 1815 1816 dasd_add_request_tail(cqr); 1817 } 1818} 1819 1820/* 1821 * Central dasd_block layer routine. Takes requests from the generic 1822 * block layer request queue, creates ccw requests, enqueues them on 1823 * a dasd_device and processes ccw requests that have been returned. 1824 */ 1825static void dasd_block_tasklet(struct dasd_block *block) 1826{ 1827 struct list_head final_queue; 1828 struct list_head *l, *n; 1829 struct dasd_ccw_req *cqr; 1830 1831 atomic_set(&block->tasklet_scheduled, 0); 1832 INIT_LIST_HEAD(&final_queue); 1833 spin_lock(&block->queue_lock); 1834 /* Finish off requests on ccw queue */ 1835 __dasd_process_block_ccw_queue(block, &final_queue); 1836 spin_unlock(&block->queue_lock); 1837 /* Now call the callback function of requests with final status */ 1838 spin_lock_irq(&block->request_queue_lock); 1839 list_for_each_safe(l, n, &final_queue) { 1840 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1841 list_del_init(&cqr->blocklist); 1842 __dasd_cleanup_cqr(cqr); 1843 } 1844 spin_lock(&block->queue_lock); 1845 /* Get new request from the block device request queue */ 1846 __dasd_process_request_queue(block); 1847 /* Now check if the head of the ccw queue needs to be started. */ 1848 __dasd_block_start_head(block); 1849 spin_unlock(&block->queue_lock); 1850 spin_unlock_irq(&block->request_queue_lock); 1851 dasd_put_device(block->base); 1852} 1853 1854static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 1855{ 1856 wake_up(&dasd_flush_wq); 1857} 1858 1859/* 1860 * Go through all request on the dasd_block request queue, cancel them 1861 * on the respective dasd_device, and return them to the generic 1862 * block layer. 1863 */ 1864static int dasd_flush_block_queue(struct dasd_block *block) 1865{ 1866 struct dasd_ccw_req *cqr, *n; 1867 int rc, i; 1868 struct list_head flush_queue; 1869 1870 INIT_LIST_HEAD(&flush_queue); 1871 spin_lock_bh(&block->queue_lock); 1872 rc = 0; 1873restart: 1874 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 1875 /* if this request currently owned by a dasd_device cancel it */ 1876 if (cqr->status >= DASD_CQR_QUEUED) 1877 rc = dasd_cancel_req(cqr); 1878 if (rc < 0) 1879 break; 1880 /* Rechain request (including erp chain) so it won't be 1881 * touched by the dasd_block_tasklet anymore. 1882 * Replace the callback so we notice when the request 1883 * is returned from the dasd_device layer. 1884 */ 1885 cqr->callback = _dasd_wake_block_flush_cb; 1886 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 1887 list_move_tail(&cqr->blocklist, &flush_queue); 1888 if (i > 1) 1889 /* moved more than one request - need to restart */ 1890 goto restart; 1891 } 1892 spin_unlock_bh(&block->queue_lock); 1893 /* Now call the callback function of flushed requests */ 1894restart_cb: 1895 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 1896 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 1897 /* Process finished ERP request. */ 1898 if (cqr->refers) { 1899 __dasd_block_process_erp(block, cqr); 1900 /* restart list_for_xx loop since dasd_process_erp 1901 * might remove multiple elements */ 1902 goto restart_cb; 1903 } 1904 /* call the callback function */ 1905 cqr->endclk = get_clock(); 1906 list_del_init(&cqr->blocklist); 1907 __dasd_cleanup_cqr(cqr); 1908 } |
1732 return rc; 1733} 1734 1735/* | 1909 return rc; 1910} 1911 1912/* |
1736 * SECTION: Block device operations (request queue, partitions, open, release). | 1913 * Schedules a call to dasd_tasklet over the device tasklet. |
1737 */ | 1914 */ |
1915void dasd_schedule_block_bh(struct dasd_block *block) 1916{ 1917 /* Protect against rescheduling. */ 1918 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 1919 return; 1920 /* life cycle of block is bound to it's base device */ 1921 dasd_get_device(block->base); 1922 tasklet_hi_schedule(&block->tasklet); 1923} |
|
1738 | 1924 |
1925 |
|
1739/* | 1926/* |
1927 * SECTION: external block device operations 1928 * (request queue handling, open, release, etc.) 1929 */ 1930 1931/* |
|
1740 * Dasd request queue function. Called from ll_rw_blk.c 1741 */ | 1932 * Dasd request queue function. Called from ll_rw_blk.c 1933 */ |
1742static void 1743do_dasd_request(struct request_queue * queue) | 1934static void do_dasd_request(struct request_queue *queue) |
1744{ | 1935{ |
1745 struct dasd_device *device; | 1936 struct dasd_block *block; |
1746 | 1937 |
1747 device = (struct dasd_device *) queue->queuedata; 1748 spin_lock(get_ccwdev_lock(device->cdev)); | 1938 block = queue->queuedata; 1939 spin_lock(&block->queue_lock); |
1749 /* Get new request from the block device request queue */ | 1940 /* Get new request from the block device request queue */ |
1750 __dasd_process_blk_queue(device); | 1941 __dasd_process_request_queue(block); |
1751 /* Now check if the head of the ccw queue needs to be started. */ | 1942 /* Now check if the head of the ccw queue needs to be started. */ |
1752 __dasd_start_head(device); 1753 spin_unlock(get_ccwdev_lock(device->cdev)); | 1943 __dasd_block_start_head(block); 1944 spin_unlock(&block->queue_lock); |
1754} 1755 1756/* 1757 * Allocate and initialize request queue and default I/O scheduler. 1758 */ | 1945} 1946 1947/* 1948 * Allocate and initialize request queue and default I/O scheduler. 1949 */ |
1759static int 1760dasd_alloc_queue(struct dasd_device * device) | 1950static int dasd_alloc_queue(struct dasd_block *block) |
1761{ 1762 int rc; 1763 | 1951{ 1952 int rc; 1953 |
1764 device->request_queue = blk_init_queue(do_dasd_request, 1765 &device->request_queue_lock); 1766 if (device->request_queue == NULL) | 1954 block->request_queue = blk_init_queue(do_dasd_request, 1955 &block->request_queue_lock); 1956 if (block->request_queue == NULL) |
1767 return -ENOMEM; 1768 | 1957 return -ENOMEM; 1958 |
1769 device->request_queue->queuedata = device; | 1959 block->request_queue->queuedata = block; |
1770 | 1960 |
1771 elevator_exit(device->request_queue->elevator); 1772 rc = elevator_init(device->request_queue, "deadline"); | 1961 elevator_exit(block->request_queue->elevator); 1962 rc = elevator_init(block->request_queue, "deadline"); |
1773 if (rc) { | 1963 if (rc) { |
1774 blk_cleanup_queue(device->request_queue); | 1964 blk_cleanup_queue(block->request_queue); |
1775 return rc; 1776 } 1777 return 0; 1778} 1779 1780/* 1781 * Allocate and initialize request queue. 1782 */ | 1965 return rc; 1966 } 1967 return 0; 1968} 1969 1970/* 1971 * Allocate and initialize request queue. 1972 */ |
1783static void 1784dasd_setup_queue(struct dasd_device * device) | 1973static void dasd_setup_queue(struct dasd_block *block) |
1785{ 1786 int max; 1787 | 1974{ 1975 int max; 1976 |
1788 blk_queue_hardsect_size(device->request_queue, device->bp_block); 1789 max = device->discipline->max_blocks << device->s2b_shift; 1790 blk_queue_max_sectors(device->request_queue, max); 1791 blk_queue_max_phys_segments(device->request_queue, -1L); 1792 blk_queue_max_hw_segments(device->request_queue, -1L); 1793 blk_queue_max_segment_size(device->request_queue, -1L); 1794 blk_queue_segment_boundary(device->request_queue, -1L); 1795 blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL); | 1977 blk_queue_hardsect_size(block->request_queue, block->bp_block); 1978 max = block->base->discipline->max_blocks << block->s2b_shift; 1979 blk_queue_max_sectors(block->request_queue, max); 1980 blk_queue_max_phys_segments(block->request_queue, -1L); 1981 blk_queue_max_hw_segments(block->request_queue, -1L); 1982 blk_queue_max_segment_size(block->request_queue, -1L); 1983 blk_queue_segment_boundary(block->request_queue, -1L); 1984 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); |
1796} 1797 1798/* 1799 * Deactivate and free request queue. 1800 */ | 1985} 1986 1987/* 1988 * Deactivate and free request queue. 1989 */ |
1801static void 1802dasd_free_queue(struct dasd_device * device) | 1990static void dasd_free_queue(struct dasd_block *block) |
1803{ | 1991{ |
1804 if (device->request_queue) { 1805 blk_cleanup_queue(device->request_queue); 1806 device->request_queue = NULL; | 1992 if (block->request_queue) { 1993 blk_cleanup_queue(block->request_queue); 1994 block->request_queue = NULL; |
1807 } 1808} 1809 1810/* 1811 * Flush request on the request queue. 1812 */ | 1995 } 1996} 1997 1998/* 1999 * Flush request on the request queue. 2000 */ |
1813static void 1814dasd_flush_request_queue(struct dasd_device * device) | 2001static void dasd_flush_request_queue(struct dasd_block *block) |
1815{ 1816 struct request *req; 1817 | 2002{ 2003 struct request *req; 2004 |
1818 if (!device->request_queue) | 2005 if (!block->request_queue) |
1819 return; 1820 | 2006 return; 2007 |
1821 spin_lock_irq(&device->request_queue_lock); 1822 while ((req = elv_next_request(device->request_queue))) { | 2008 spin_lock_irq(&block->request_queue_lock); 2009 while ((req = elv_next_request(block->request_queue))) { |
1823 blkdev_dequeue_request(req); 1824 dasd_end_request(req, 0); 1825 } | 2010 blkdev_dequeue_request(req); 2011 dasd_end_request(req, 0); 2012 } |
1826 spin_unlock_irq(&device->request_queue_lock); | 2013 spin_unlock_irq(&block->request_queue_lock); |
1827} 1828 | 2014} 2015 |
1829static int 1830dasd_open(struct inode *inp, struct file *filp) | 2016static int dasd_open(struct inode *inp, struct file *filp) |
1831{ 1832 struct gendisk *disk = inp->i_bdev->bd_disk; | 2017{ 2018 struct gendisk *disk = inp->i_bdev->bd_disk; |
1833 struct dasd_device *device = disk->private_data; | 2019 struct dasd_block *block = disk->private_data; 2020 struct dasd_device *base = block->base; |
1834 int rc; 1835 | 2021 int rc; 2022 |
1836 atomic_inc(&device->open_count); 1837 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { | 2023 atomic_inc(&block->open_count); 2024 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { |
1838 rc = -ENODEV; 1839 goto unlock; 1840 } 1841 | 2025 rc = -ENODEV; 2026 goto unlock; 2027 } 2028 |
1842 if (!try_module_get(device->discipline->owner)) { | 2029 if (!try_module_get(base->discipline->owner)) { |
1843 rc = -EINVAL; 1844 goto unlock; 1845 } 1846 1847 if (dasd_probeonly) { | 2030 rc = -EINVAL; 2031 goto unlock; 2032 } 2033 2034 if (dasd_probeonly) { |
1848 DEV_MESSAGE(KERN_INFO, device, "%s", | 2035 DEV_MESSAGE(KERN_INFO, base, "%s", |
1849 "No access to device due to probeonly mode"); 1850 rc = -EPERM; 1851 goto out; 1852 } 1853 | 2036 "No access to device due to probeonly mode"); 2037 rc = -EPERM; 2038 goto out; 2039 } 2040 |
1854 if (device->state <= DASD_STATE_BASIC) { 1855 DBF_DEV_EVENT(DBF_ERR, device, " %s", | 2041 if (base->state <= DASD_STATE_BASIC) { 2042 DBF_DEV_EVENT(DBF_ERR, base, " %s", |
1856 " Cannot open unrecognized device"); 1857 rc = -ENODEV; 1858 goto out; 1859 } 1860 1861 return 0; 1862 1863out: | 2043 " Cannot open unrecognized device"); 2044 rc = -ENODEV; 2045 goto out; 2046 } 2047 2048 return 0; 2049 2050out: |
1864 module_put(device->discipline->owner); | 2051 module_put(base->discipline->owner); |
1865unlock: | 2052unlock: |
1866 atomic_dec(&device->open_count); | 2053 atomic_dec(&block->open_count); |
1867 return rc; 1868} 1869 | 2054 return rc; 2055} 2056 |
1870static int 1871dasd_release(struct inode *inp, struct file *filp) | 2057static int dasd_release(struct inode *inp, struct file *filp) |
1872{ 1873 struct gendisk *disk = inp->i_bdev->bd_disk; | 2058{ 2059 struct gendisk *disk = inp->i_bdev->bd_disk; |
1874 struct dasd_device *device = disk->private_data; | 2060 struct dasd_block *block = disk->private_data; |
1875 | 2061 |
1876 atomic_dec(&device->open_count); 1877 module_put(device->discipline->owner); | 2062 atomic_dec(&block->open_count); 2063 module_put(block->base->discipline->owner); |
1878 return 0; 1879} 1880 1881/* 1882 * Return disk geometry. 1883 */ | 2064 return 0; 2065} 2066 2067/* 2068 * Return disk geometry. 2069 */ |
1884static int 1885dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 2070static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
1886{ | 2071{ |
1887 struct dasd_device *device; | 2072 struct dasd_block *block; 2073 struct dasd_device *base; |
1888 | 2074 |
1889 device = bdev->bd_disk->private_data; 1890 if (!device) | 2075 block = bdev->bd_disk->private_data; 2076 base = block->base; 2077 if (!block) |
1891 return -ENODEV; 1892 | 2078 return -ENODEV; 2079 |
1893 if (!device->discipline || 1894 !device->discipline->fill_geometry) | 2080 if (!base->discipline || 2081 !base->discipline->fill_geometry) |
1895 return -EINVAL; 1896 | 2082 return -EINVAL; 2083 |
1897 device->discipline->fill_geometry(device, geo); 1898 geo->start = get_start_sect(bdev) >> device->s2b_shift; | 2084 base->discipline->fill_geometry(block, geo); 2085 geo->start = get_start_sect(bdev) >> block->s2b_shift; |
1899 return 0; 1900} 1901 1902struct block_device_operations 1903dasd_device_operations = { 1904 .owner = THIS_MODULE, 1905 .open = dasd_open, 1906 .release = dasd_release, 1907 .ioctl = dasd_ioctl, 1908 .compat_ioctl = dasd_compat_ioctl, 1909 .getgeo = dasd_getgeo, 1910}; 1911 | 2086 return 0; 2087} 2088 2089struct block_device_operations 2090dasd_device_operations = { 2091 .owner = THIS_MODULE, 2092 .open = dasd_open, 2093 .release = dasd_release, 2094 .ioctl = dasd_ioctl, 2095 .compat_ioctl = dasd_compat_ioctl, 2096 .getgeo = dasd_getgeo, 2097}; 2098 |
2099/******************************************************************************* 2100 * end of block device operations 2101 */ |
|
1912 1913static void 1914dasd_exit(void) 1915{ 1916#ifdef CONFIG_PROC_FS 1917 dasd_proc_exit(); 1918#endif 1919 dasd_eer_exit(); --- 12 unchanged lines hidden (view full) --- 1932/* 1933 * SECTION: common functions for ccw_driver use 1934 */ 1935 1936/* 1937 * Initial attempt at a probe function. this can be simplified once 1938 * the other detection code is gone. 1939 */ | 2102 2103static void 2104dasd_exit(void) 2105{ 2106#ifdef CONFIG_PROC_FS 2107 dasd_proc_exit(); 2108#endif 2109 dasd_eer_exit(); --- 12 unchanged lines hidden (view full) --- 2122/* 2123 * SECTION: common functions for ccw_driver use 2124 */ 2125 2126/* 2127 * Initial attempt at a probe function. this can be simplified once 2128 * the other detection code is gone. 2129 */ |
1940int 1941dasd_generic_probe (struct ccw_device *cdev, 1942 struct dasd_discipline *discipline) | 2130int dasd_generic_probe(struct ccw_device *cdev, 2131 struct dasd_discipline *discipline) |
1943{ 1944 int ret; 1945 1946 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 1947 if (ret) { 1948 printk(KERN_WARNING 1949 "dasd_generic_probe: could not set ccw-device options " 1950 "for %s\n", cdev->dev.bus_id); --- 23 unchanged lines hidden (view full) --- 1974 cdev->dev.bus_id, ret); 1975 return 0; 1976} 1977 1978/* 1979 * This will one day be called from a global not_oper handler. 1980 * It is also used by driver_unregister during module unload. 1981 */ | 2132{ 2133 int ret; 2134 2135 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 2136 if (ret) { 2137 printk(KERN_WARNING 2138 "dasd_generic_probe: could not set ccw-device options " 2139 "for %s\n", cdev->dev.bus_id); --- 23 unchanged lines hidden (view full) --- 2163 cdev->dev.bus_id, ret); 2164 return 0; 2165} 2166 2167/* 2168 * This will one day be called from a global not_oper handler. 2169 * It is also used by driver_unregister during module unload. 2170 */ |
1982void 1983dasd_generic_remove (struct ccw_device *cdev) | 2171void dasd_generic_remove(struct ccw_device *cdev) |
1984{ 1985 struct dasd_device *device; | 2172{ 2173 struct dasd_device *device; |
2174 struct dasd_block *block; |
|
1986 1987 cdev->handler = NULL; 1988 1989 dasd_remove_sysfs_files(cdev); 1990 device = dasd_device_from_cdev(cdev); 1991 if (IS_ERR(device)) 1992 return; 1993 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 1994 /* Already doing offline processing */ 1995 dasd_put_device(device); 1996 return; 1997 } 1998 /* 1999 * This device is removed unconditionally. Set offline 2000 * flag to prevent dasd_open from opening it while it is 2001 * no quite down yet. 2002 */ 2003 dasd_set_target_state(device, DASD_STATE_NEW); 2004 /* dasd_delete_device destroys the device reference. */ | 2175 2176 cdev->handler = NULL; 2177 2178 dasd_remove_sysfs_files(cdev); 2179 device = dasd_device_from_cdev(cdev); 2180 if (IS_ERR(device)) 2181 return; 2182 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2183 /* Already doing offline processing */ 2184 dasd_put_device(device); 2185 return; 2186 } 2187 /* 2188 * This device is removed unconditionally. Set offline 2189 * flag to prevent dasd_open from opening it while it is 2190 * no quite down yet. 2191 */ 2192 dasd_set_target_state(device, DASD_STATE_NEW); 2193 /* dasd_delete_device destroys the device reference. */ |
2194 block = device->block; 2195 device->block = NULL; |
|
2005 dasd_delete_device(device); | 2196 dasd_delete_device(device); |
2197 /* 2198 * life cycle of block is bound to device, so delete it after 2199 * device was safely removed 2200 */ 2201 if (block) 2202 dasd_free_block(block); |
|
2006} 2007 2008/* 2009 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2010 * the device is detected for the first time and is supposed to be used 2011 * or the user has started activation through sysfs. 2012 */ | 2203} 2204 2205/* 2206 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2207 * the device is detected for the first time and is supposed to be used 2208 * or the user has started activation through sysfs. 2209 */ |
2013int 2014dasd_generic_set_online (struct ccw_device *cdev, 2015 struct dasd_discipline *base_discipline) 2016 | 2210int dasd_generic_set_online(struct ccw_device *cdev, 2211 struct dasd_discipline *base_discipline) |
2017{ 2018 struct dasd_discipline *discipline; 2019 struct dasd_device *device; 2020 int rc; 2021 2022 /* first online clears initial online feature flag */ 2023 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2024 device = dasd_create_device(cdev); --- 19 unchanged lines hidden (view full) --- 2044 if (!try_module_get(discipline->owner)) { 2045 module_put(base_discipline->owner); 2046 dasd_delete_device(device); 2047 return -EINVAL; 2048 } 2049 device->base_discipline = base_discipline; 2050 device->discipline = discipline; 2051 | 2212{ 2213 struct dasd_discipline *discipline; 2214 struct dasd_device *device; 2215 int rc; 2216 2217 /* first online clears initial online feature flag */ 2218 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2219 device = dasd_create_device(cdev); --- 19 unchanged lines hidden (view full) --- 2239 if (!try_module_get(discipline->owner)) { 2240 module_put(base_discipline->owner); 2241 dasd_delete_device(device); 2242 return -EINVAL; 2243 } 2244 device->base_discipline = base_discipline; 2245 device->discipline = discipline; 2246 |
2247 /* check_device will allocate block device if necessary */ |
|
2052 rc = discipline->check_device(device); 2053 if (rc) { 2054 printk (KERN_WARNING 2055 "dasd_generic couldn't online device %s " 2056 "with discipline %s rc=%i\n", 2057 cdev->dev.bus_id, discipline->name, rc); 2058 module_put(discipline->owner); 2059 module_put(base_discipline->owner); 2060 dasd_delete_device(device); 2061 return rc; 2062 } 2063 2064 dasd_set_target_state(device, DASD_STATE_ONLINE); 2065 if (device->state <= DASD_STATE_KNOWN) { 2066 printk (KERN_WARNING 2067 "dasd_generic discipline not found for %s\n", 2068 cdev->dev.bus_id); 2069 rc = -ENODEV; 2070 dasd_set_target_state(device, DASD_STATE_NEW); | 2248 rc = discipline->check_device(device); 2249 if (rc) { 2250 printk (KERN_WARNING 2251 "dasd_generic couldn't online device %s " 2252 "with discipline %s rc=%i\n", 2253 cdev->dev.bus_id, discipline->name, rc); 2254 module_put(discipline->owner); 2255 module_put(base_discipline->owner); 2256 dasd_delete_device(device); 2257 return rc; 2258 } 2259 2260 dasd_set_target_state(device, DASD_STATE_ONLINE); 2261 if (device->state <= DASD_STATE_KNOWN) { 2262 printk (KERN_WARNING 2263 "dasd_generic discipline not found for %s\n", 2264 cdev->dev.bus_id); 2265 rc = -ENODEV; 2266 dasd_set_target_state(device, DASD_STATE_NEW); |
2267 if (device->block) 2268 dasd_free_block(device->block); |
|
2071 dasd_delete_device(device); 2072 } else 2073 pr_debug("dasd_generic device %s found\n", 2074 cdev->dev.bus_id); 2075 2076 /* FIXME: we have to wait for the root device but we don't want 2077 * to wait for each single device but for all at once. */ 2078 wait_event(dasd_init_waitq, _wait_for_device(device)); 2079 2080 dasd_put_device(device); 2081 2082 return rc; 2083} 2084 | 2269 dasd_delete_device(device); 2270 } else 2271 pr_debug("dasd_generic device %s found\n", 2272 cdev->dev.bus_id); 2273 2274 /* FIXME: we have to wait for the root device but we don't want 2275 * to wait for each single device but for all at once. */ 2276 wait_event(dasd_init_waitq, _wait_for_device(device)); 2277 2278 dasd_put_device(device); 2279 2280 return rc; 2281} 2282 |
2085int 2086dasd_generic_set_offline (struct ccw_device *cdev) | 2283int dasd_generic_set_offline(struct ccw_device *cdev) |
2087{ 2088 struct dasd_device *device; | 2284{ 2285 struct dasd_device *device; |
2286 struct dasd_block *block; |
|
2089 int max_count, open_count; 2090 2091 device = dasd_device_from_cdev(cdev); 2092 if (IS_ERR(device)) 2093 return PTR_ERR(device); 2094 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2095 /* Already doing offline processing */ 2096 dasd_put_device(device); 2097 return 0; 2098 } 2099 /* 2100 * We must make sure that this device is currently not in use. 2101 * The open_count is increased for every opener, that includes 2102 * the blkdev_get in dasd_scan_partitions. We are only interested 2103 * in the other openers. 2104 */ | 2287 int max_count, open_count; 2288 2289 device = dasd_device_from_cdev(cdev); 2290 if (IS_ERR(device)) 2291 return PTR_ERR(device); 2292 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2293 /* Already doing offline processing */ 2294 dasd_put_device(device); 2295 return 0; 2296 } 2297 /* 2298 * We must make sure that this device is currently not in use. 2299 * The open_count is increased for every opener, that includes 2300 * the blkdev_get in dasd_scan_partitions. We are only interested 2301 * in the other openers. 2302 */ |
2105 max_count = device->bdev ? 0 : -1; 2106 open_count = (int) atomic_read(&device->open_count); 2107 if (open_count > max_count) { 2108 if (open_count > 0) 2109 printk (KERN_WARNING "Can't offline dasd device with " 2110 "open count = %i.\n", 2111 open_count); 2112 else 2113 printk (KERN_WARNING "%s", 2114 "Can't offline dasd device due to internal " 2115 "use\n"); 2116 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2117 dasd_put_device(device); 2118 return -EBUSY; | 2303 if (device->block) { 2304 struct dasd_block *block = device->block; 2305 max_count = block->bdev ? 0 : -1; 2306 open_count = (int) atomic_read(&block->open_count); 2307 if (open_count > max_count) { 2308 if (open_count > 0) 2309 printk(KERN_WARNING "Can't offline dasd " 2310 "device with open count = %i.\n", 2311 open_count); 2312 else 2313 printk(KERN_WARNING "%s", 2314 "Can't offline dasd device due " 2315 "to internal use\n"); 2316 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2317 dasd_put_device(device); 2318 return -EBUSY; 2319 } |
2119 } 2120 dasd_set_target_state(device, DASD_STATE_NEW); 2121 /* dasd_delete_device destroys the device reference. */ | 2320 } 2321 dasd_set_target_state(device, DASD_STATE_NEW); 2322 /* dasd_delete_device destroys the device reference. */ |
2323 block = device->block; 2324 device->block = NULL; |
|
2122 dasd_delete_device(device); | 2325 dasd_delete_device(device); |
2123 | 2326 /* 2327 * life cycle of block is bound to device, so delete it after 2328 * device was safely removed 2329 */ 2330 if (block) 2331 dasd_free_block(block); |
2124 return 0; 2125} 2126 | 2332 return 0; 2333} 2334 |
2127int 2128dasd_generic_notify(struct ccw_device *cdev, int event) | 2335int dasd_generic_notify(struct ccw_device *cdev, int event) |
2129{ 2130 struct dasd_device *device; 2131 struct dasd_ccw_req *cqr; 2132 unsigned long flags; 2133 int ret; 2134 2135 device = dasd_device_from_cdev(cdev); 2136 if (IS_ERR(device)) --- 4 unchanged lines hidden (view full) --- 2141 case CIO_GONE: 2142 case CIO_NO_PATH: 2143 /* First of all call extended error reporting. */ 2144 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2145 2146 if (device->state < DASD_STATE_BASIC) 2147 break; 2148 /* Device is active. We want to keep it. */ | 2336{ 2337 struct dasd_device *device; 2338 struct dasd_ccw_req *cqr; 2339 unsigned long flags; 2340 int ret; 2341 2342 device = dasd_device_from_cdev(cdev); 2343 if (IS_ERR(device)) --- 4 unchanged lines hidden (view full) --- 2348 case CIO_GONE: 2349 case CIO_NO_PATH: 2350 /* First of all call extended error reporting. */ 2351 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2352 2353 if (device->state < DASD_STATE_BASIC) 2354 break; 2355 /* Device is active. We want to keep it. */ |
2149 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) { 2150 list_for_each_entry(cqr, &device->ccw_queue, list) 2151 if (cqr->status == DASD_CQR_IN_IO) 2152 cqr->status = DASD_CQR_FAILED; 2153 device->stopped |= DASD_STOPPED_DC_EIO; 2154 } else { 2155 list_for_each_entry(cqr, &device->ccw_queue, list) 2156 if (cqr->status == DASD_CQR_IN_IO) { 2157 cqr->status = DASD_CQR_QUEUED; 2158 cqr->retries++; 2159 } 2160 device->stopped |= DASD_STOPPED_DC_WAIT; 2161 dasd_set_timer(device, 0); 2162 } 2163 dasd_schedule_bh(device); | 2356 list_for_each_entry(cqr, &device->ccw_queue, devlist) 2357 if (cqr->status == DASD_CQR_IN_IO) { 2358 cqr->status = DASD_CQR_QUEUED; 2359 cqr->retries++; 2360 } 2361 device->stopped |= DASD_STOPPED_DC_WAIT; 2362 dasd_device_clear_timer(device); 2363 dasd_schedule_device_bh(device); |
2164 ret = 1; 2165 break; 2166 case CIO_OPER: 2167 /* FIXME: add a sanity check. */ | 2364 ret = 1; 2365 break; 2366 case CIO_OPER: 2367 /* FIXME: add a sanity check. */ |
2168 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO); 2169 dasd_schedule_bh(device); | 2368 device->stopped &= ~DASD_STOPPED_DC_WAIT; 2369 dasd_schedule_device_bh(device); 2370 if (device->block) 2371 dasd_schedule_block_bh(device->block); |
2170 ret = 1; 2171 break; 2172 } 2173 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 2174 dasd_put_device(device); 2175 return ret; 2176} 2177 --- 13 unchanged lines hidden (view full) --- 2191 return cqr; 2192 } 2193 2194 ccw = cqr->cpaddr; 2195 ccw->cmd_code = CCW_CMD_RDC; 2196 ccw->cda = (__u32)(addr_t)rdc_buffer; 2197 ccw->count = rdc_buffer_size; 2198 | 2372 ret = 1; 2373 break; 2374 } 2375 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 2376 dasd_put_device(device); 2377 return ret; 2378} 2379 --- 13 unchanged lines hidden (view full) --- 2393 return cqr; 2394 } 2395 2396 ccw = cqr->cpaddr; 2397 ccw->cmd_code = CCW_CMD_RDC; 2398 ccw->cda = (__u32)(addr_t)rdc_buffer; 2399 ccw->count = rdc_buffer_size; 2400 |
2199 cqr->device = device; | 2401 cqr->startdev = device; 2402 cqr->memdev = device; |
2200 cqr->expires = 10*HZ; 2201 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2202 cqr->retries = 2; 2203 cqr->buildclk = get_clock(); 2204 cqr->status = DASD_CQR_FILLED; 2205 return cqr; 2206} 2207 --- 5 unchanged lines hidden (view full) --- 2213 struct dasd_ccw_req *cqr; 2214 2215 cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size, 2216 magic); 2217 if (IS_ERR(cqr)) 2218 return PTR_ERR(cqr); 2219 2220 ret = dasd_sleep_on(cqr); | 2403 cqr->expires = 10*HZ; 2404 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2405 cqr->retries = 2; 2406 cqr->buildclk = get_clock(); 2407 cqr->status = DASD_CQR_FILLED; 2408 return cqr; 2409} 2410 --- 5 unchanged lines hidden (view full) --- 2416 struct dasd_ccw_req *cqr; 2417 2418 cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size, 2419 magic); 2420 if (IS_ERR(cqr)) 2421 return PTR_ERR(cqr); 2422 2423 ret = dasd_sleep_on(cqr); |
2221 dasd_sfree_request(cqr, cqr->device); | 2424 dasd_sfree_request(cqr, cqr->memdev); |
2222 return ret; 2223} 2224EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2225 | 2425 return ret; 2426} 2427EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2428 |
2226static int __init 2227dasd_init(void) | 2429static int __init dasd_init(void) |
2228{ 2229 int rc; 2230 2231 init_waitqueue_head(&dasd_init_waitq); 2232 init_waitqueue_head(&dasd_flush_wq); 2233 2234 /* register 'common' DASD debug area, used for all DBF_XXX calls */ | 2430{ 2431 int rc; 2432 2433 init_waitqueue_head(&dasd_init_waitq); 2434 init_waitqueue_head(&dasd_flush_wq); 2435 2436 /* register 'common' DASD debug area, used for all DBF_XXX calls */ |
2235 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); | 2437 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof(long)); |
2236 if (dasd_debug_area == NULL) { 2237 rc = -ENOMEM; 2238 goto failed; 2239 } 2240 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2241 debug_set_level(dasd_debug_area, DBF_WARNING); 2242 2243 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); --- 29 unchanged lines hidden (view full) --- 2273module_exit(dasd_exit); 2274 2275EXPORT_SYMBOL(dasd_debug_area); 2276EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2277 2278EXPORT_SYMBOL(dasd_add_request_head); 2279EXPORT_SYMBOL(dasd_add_request_tail); 2280EXPORT_SYMBOL(dasd_cancel_req); | 2438 if (dasd_debug_area == NULL) { 2439 rc = -ENOMEM; 2440 goto failed; 2441 } 2442 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2443 debug_set_level(dasd_debug_area, DBF_WARNING); 2444 2445 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); --- 29 unchanged lines hidden (view full) --- 2475module_exit(dasd_exit); 2476 2477EXPORT_SYMBOL(dasd_debug_area); 2478EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2479 2480EXPORT_SYMBOL(dasd_add_request_head); 2481EXPORT_SYMBOL(dasd_add_request_tail); 2482EXPORT_SYMBOL(dasd_cancel_req); |
2281EXPORT_SYMBOL(dasd_clear_timer); | 2483EXPORT_SYMBOL(dasd_device_clear_timer); 2484EXPORT_SYMBOL(dasd_block_clear_timer); |
2282EXPORT_SYMBOL(dasd_enable_device); 2283EXPORT_SYMBOL(dasd_int_handler); 2284EXPORT_SYMBOL(dasd_kfree_request); 2285EXPORT_SYMBOL(dasd_kick_device); 2286EXPORT_SYMBOL(dasd_kmalloc_request); | 2485EXPORT_SYMBOL(dasd_enable_device); 2486EXPORT_SYMBOL(dasd_int_handler); 2487EXPORT_SYMBOL(dasd_kfree_request); 2488EXPORT_SYMBOL(dasd_kick_device); 2489EXPORT_SYMBOL(dasd_kmalloc_request); |
2287EXPORT_SYMBOL(dasd_schedule_bh); | 2490EXPORT_SYMBOL(dasd_schedule_device_bh); 2491EXPORT_SYMBOL(dasd_schedule_block_bh); |
2288EXPORT_SYMBOL(dasd_set_target_state); | 2492EXPORT_SYMBOL(dasd_set_target_state); |
2289EXPORT_SYMBOL(dasd_set_timer); | 2493EXPORT_SYMBOL(dasd_device_set_timer); 2494EXPORT_SYMBOL(dasd_block_set_timer); |
2290EXPORT_SYMBOL(dasd_sfree_request); 2291EXPORT_SYMBOL(dasd_sleep_on); 2292EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2293EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2294EXPORT_SYMBOL(dasd_smalloc_request); 2295EXPORT_SYMBOL(dasd_start_IO); 2296EXPORT_SYMBOL(dasd_term_IO); 2297 2298EXPORT_SYMBOL_GPL(dasd_generic_probe); 2299EXPORT_SYMBOL_GPL(dasd_generic_remove); 2300EXPORT_SYMBOL_GPL(dasd_generic_notify); 2301EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2302EXPORT_SYMBOL_GPL(dasd_generic_set_offline); | 2495EXPORT_SYMBOL(dasd_sfree_request); 2496EXPORT_SYMBOL(dasd_sleep_on); 2497EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2498EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2499EXPORT_SYMBOL(dasd_smalloc_request); 2500EXPORT_SYMBOL(dasd_start_IO); 2501EXPORT_SYMBOL(dasd_term_IO); 2502 2503EXPORT_SYMBOL_GPL(dasd_generic_probe); 2504EXPORT_SYMBOL_GPL(dasd_generic_remove); 2505EXPORT_SYMBOL_GPL(dasd_generic_notify); 2506EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2507EXPORT_SYMBOL_GPL(dasd_generic_set_offline); |
2303 | 2508EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 2509EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2510EXPORT_SYMBOL_GPL(dasd_alloc_block); 2511EXPORT_SYMBOL_GPL(dasd_free_block); |