1 /* 2 * drivers/s390/cio/css.c 3 * driver for channel subsystem 4 * $Revision: 1.85 $ 5 * 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 7 * IBM Corporation 8 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 9 * Cornelia Huck (cohuck@de.ibm.com) 10 */ 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/device.h> 14 #include <linux/slab.h> 15 #include <linux/errno.h> 16 #include <linux/list.h> 17 18 #include "css.h" 19 #include "cio.h" 20 #include "cio_debug.h" 21 #include "ioasm.h" 22 #include "chsc.h" 23 24 unsigned int highest_subchannel; 25 int need_rescan = 0; 26 int css_init_done = 0; 27 28 struct pgid global_pgid; 29 int css_characteristics_avail = 0; 30 31 struct device css_bus_device = { 32 .bus_id = "css0", 33 }; 34 35 static struct subchannel * 36 css_alloc_subchannel(int irq) 37 { 38 struct subchannel *sch; 39 int ret; 40 41 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 42 if (sch == NULL) 43 return ERR_PTR(-ENOMEM); 44 ret = cio_validate_subchannel (sch, irq); 45 if (ret < 0) { 46 kfree(sch); 47 return ERR_PTR(ret); 48 } 49 if (irq > highest_subchannel) 50 highest_subchannel = irq; 51 52 if (sch->st != SUBCHANNEL_TYPE_IO) { 53 /* For now we ignore all non-io subchannels. */ 54 kfree(sch); 55 return ERR_PTR(-EINVAL); 56 } 57 58 /* 59 * Set intparm to subchannel address. 60 * This is fine even on 64bit since the subchannel is always located 61 * under 2G. 62 */ 63 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 64 ret = cio_modify(sch); 65 if (ret) { 66 kfree(sch); 67 return ERR_PTR(ret); 68 } 69 return sch; 70 } 71 72 static void 73 css_free_subchannel(struct subchannel *sch) 74 { 75 if (sch) { 76 /* Reset intparm to zeroes. */ 77 sch->schib.pmcw.intparm = 0; 78 cio_modify(sch); 79 kfree(sch); 80 } 81 82 } 83 84 static void 85 css_subchannel_release(struct device *dev) 86 { 87 struct subchannel *sch; 88 89 sch = to_subchannel(dev); 90 if (!cio_is_console(sch->irq)) 91 kfree(sch); 92 } 93 94 extern int css_get_ssd_info(struct subchannel *sch); 95 96 static int 97 css_register_subchannel(struct subchannel *sch) 98 { 99 int ret; 100 101 /* Initialize the subchannel structure */ 102 sch->dev.parent = &css_bus_device; 103 sch->dev.bus = &css_bus_type; 104 sch->dev.release = &css_subchannel_release; 105 106 /* make it known to the system */ 107 ret = device_register(&sch->dev); 108 if (ret) 109 printk (KERN_WARNING "%s: could not register %s\n", 110 __func__, sch->dev.bus_id); 111 else 112 css_get_ssd_info(sch); 113 return ret; 114 } 115 116 int 117 css_probe_device(int irq) 118 { 119 int ret; 120 struct subchannel *sch; 121 122 sch = css_alloc_subchannel(irq); 123 if (IS_ERR(sch)) 124 return PTR_ERR(sch); 125 ret = css_register_subchannel(sch); 126 if (ret) 127 css_free_subchannel(sch); 128 return ret; 129 } 130 131 struct subchannel * 132 get_subchannel_by_schid(int irq) 133 { 134 struct subchannel *sch; 135 struct list_head *entry; 136 struct device *dev; 137 138 if (!get_bus(&css_bus_type)) 139 return NULL; 140 down_read(&css_bus_type.subsys.rwsem); 141 sch = NULL; 142 list_for_each(entry, &css_bus_type.devices.list) { 143 dev = get_device(container_of(entry, 144 struct device, bus_list)); 145 if (!dev) 146 continue; 147 sch = to_subchannel(dev); 148 if (sch->irq == irq) 149 break; 150 put_device(dev); 151 sch = NULL; 152 } 153 up_read(&css_bus_type.subsys.rwsem); 154 put_bus(&css_bus_type); 155 156 return sch; 157 } 158 159 static inline int 160 css_get_subchannel_status(struct subchannel *sch, int schid) 161 { 162 struct schib schib; 163 int cc; 164 165 cc = stsch(schid, &schib); 166 if (cc) 167 return CIO_GONE; 168 if (!schib.pmcw.dnv) 169 return CIO_GONE; 170 if (sch && sch->schib.pmcw.dnv && 171 (schib.pmcw.dev != sch->schib.pmcw.dev)) 172 return CIO_REVALIDATE; 173 if (sch && !sch->lpm) 174 return CIO_NO_PATH; 175 return CIO_OPER; 176 } 177 178 static int 179 css_evaluate_subchannel(int irq, int slow) 180 { 181 int event, ret, disc; 182 struct subchannel *sch; 183 unsigned long flags; 184 185 sch = get_subchannel_by_schid(irq); 186 disc = sch ? device_is_disconnected(sch) : 0; 187 if (disc && slow) { 188 if (sch) 189 put_device(&sch->dev); 190 return 0; /* Already processed. */ 191 } 192 /* 193 * We've got a machine check, so running I/O won't get an interrupt. 194 * Kill any pending timers. 195 */ 196 if (sch) 197 device_kill_pending_timer(sch); 198 if (!disc && !slow) { 199 if (sch) 200 put_device(&sch->dev); 201 return -EAGAIN; /* Will be done on the slow path. */ 202 } 203 event = css_get_subchannel_status(sch, irq); 204 CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n", 205 irq, event, sch?(disc?"disconnected":"normal"):"unknown", 206 slow?"slow":"fast"); 207 switch (event) { 208 case CIO_NO_PATH: 209 case CIO_GONE: 210 if (!sch) { 211 /* Never used this subchannel. Ignore. */ 212 ret = 0; 213 break; 214 } 215 if (disc && (event == CIO_NO_PATH)) { 216 /* 217 * Uargh, hack again. Because we don't get a machine 218 * check on configure on, our path bookkeeping can 219 * be out of date here (it's fine while we only do 220 * logical varying or get chsc machine checks). We 221 * need to force reprobing or we might miss devices 222 * coming operational again. It won't do harm in real 223 * no path situations. 224 */ 225 spin_lock_irqsave(&sch->lock, flags); 226 device_trigger_reprobe(sch); 227 spin_unlock_irqrestore(&sch->lock, flags); 228 ret = 0; 229 break; 230 } 231 if (sch->driver && sch->driver->notify && 232 sch->driver->notify(&sch->dev, event)) { 233 cio_disable_subchannel(sch); 234 device_set_disconnected(sch); 235 ret = 0; 236 break; 237 } 238 /* 239 * Unregister subchannel. 240 * The device will be killed automatically. 241 */ 242 cio_disable_subchannel(sch); 243 device_unregister(&sch->dev); 244 /* Reset intparm to zeroes. */ 245 sch->schib.pmcw.intparm = 0; 246 cio_modify(sch); 247 put_device(&sch->dev); 248 ret = 0; 249 break; 250 case CIO_REVALIDATE: 251 /* 252 * Revalidation machine check. Sick. 253 * We don't notify the driver since we have to throw the device 254 * away in any case. 255 */ 256 if (!disc) { 257 device_unregister(&sch->dev); 258 /* Reset intparm to zeroes. */ 259 sch->schib.pmcw.intparm = 0; 260 cio_modify(sch); 261 put_device(&sch->dev); 262 ret = css_probe_device(irq); 263 } else { 264 /* 265 * We can't immediately deregister the disconnected 266 * device since it might block. 267 */ 268 spin_lock_irqsave(&sch->lock, flags); 269 device_trigger_reprobe(sch); 270 spin_unlock_irqrestore(&sch->lock, flags); 271 ret = 0; 272 } 273 break; 274 case CIO_OPER: 275 if (disc) { 276 spin_lock_irqsave(&sch->lock, flags); 277 /* Get device operational again. */ 278 device_trigger_reprobe(sch); 279 spin_unlock_irqrestore(&sch->lock, flags); 280 } 281 ret = sch ? 0 : css_probe_device(irq); 282 break; 283 default: 284 BUG(); 285 ret = 0; 286 } 287 return ret; 288 } 289 290 static void 291 css_rescan_devices(void) 292 { 293 int irq, ret; 294 295 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 296 ret = css_evaluate_subchannel(irq, 1); 297 /* No more memory. It doesn't make sense to continue. No 298 * panic because this can happen in midflight and just 299 * because we can't use a new device is no reason to crash 300 * the system. */ 301 if (ret == -ENOMEM) 302 break; 303 /* -ENXIO indicates that there are no more subchannels. */ 304 if (ret == -ENXIO) 305 break; 306 } 307 } 308 309 struct slow_subchannel { 310 struct list_head slow_list; 311 unsigned long schid; 312 }; 313 314 static LIST_HEAD(slow_subchannels_head); 315 static DEFINE_SPINLOCK(slow_subchannel_lock); 316 317 static void 318 css_trigger_slow_path(void) 319 { 320 CIO_TRACE_EVENT(4, "slowpath"); 321 322 if (need_rescan) { 323 need_rescan = 0; 324 css_rescan_devices(); 325 return; 326 } 327 328 spin_lock_irq(&slow_subchannel_lock); 329 while (!list_empty(&slow_subchannels_head)) { 330 struct slow_subchannel *slow_sch = 331 list_entry(slow_subchannels_head.next, 332 struct slow_subchannel, slow_list); 333 334 list_del_init(slow_subchannels_head.next); 335 spin_unlock_irq(&slow_subchannel_lock); 336 css_evaluate_subchannel(slow_sch->schid, 1); 337 spin_lock_irq(&slow_subchannel_lock); 338 kfree(slow_sch); 339 } 340 spin_unlock_irq(&slow_subchannel_lock); 341 } 342 343 typedef void (*workfunc)(void *); 344 DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL); 345 struct workqueue_struct *slow_path_wq; 346 347 /* 348 * Rescan for new devices. FIXME: This is slow. 349 * This function is called when we have lost CRWs due to overflows and we have 350 * to do subchannel housekeeping. 351 */ 352 void 353 css_reiterate_subchannels(void) 354 { 355 css_clear_subchannel_slow_list(); 356 need_rescan = 1; 357 } 358 359 /* 360 * Called from the machine check handler for subchannel report words. 361 */ 362 int 363 css_process_crw(int irq) 364 { 365 int ret; 366 367 CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq); 368 369 if (need_rescan) 370 /* We need to iterate all subchannels anyway. */ 371 return -EAGAIN; 372 /* 373 * Since we are always presented with IPI in the CRW, we have to 374 * use stsch() to find out if the subchannel in question has come 375 * or gone. 376 */ 377 ret = css_evaluate_subchannel(irq, 0); 378 if (ret == -EAGAIN) { 379 if (css_enqueue_subchannel_slow(irq)) { 380 css_clear_subchannel_slow_list(); 381 need_rescan = 1; 382 } 383 } 384 return ret; 385 } 386 387 static void __init 388 css_generate_pgid(void) 389 { 390 /* Let's build our path group ID here. */ 391 if (css_characteristics_avail && css_general_characteristics.mcss) 392 global_pgid.cpu_addr = 0x8000; 393 else { 394 #ifdef CONFIG_SMP 395 global_pgid.cpu_addr = hard_smp_processor_id(); 396 #else 397 global_pgid.cpu_addr = 0; 398 #endif 399 } 400 global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; 401 global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; 402 global_pgid.tod_high = (__u32) (get_clock() >> 32); 403 } 404 405 /* 406 * Now that the driver core is running, we can setup our channel subsystem. 407 * The struct subchannel's are created during probing (except for the 408 * static console subchannel). 409 */ 410 static int __init 411 init_channel_subsystem (void) 412 { 413 int ret, irq; 414 415 if (chsc_determine_css_characteristics() == 0) 416 css_characteristics_avail = 1; 417 418 css_generate_pgid(); 419 420 if ((ret = bus_register(&css_bus_type))) 421 goto out; 422 if ((ret = device_register (&css_bus_device))) 423 goto out_bus; 424 425 css_init_done = 1; 426 427 ctl_set_bit(6, 28); 428 429 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 430 struct subchannel *sch; 431 432 if (cio_is_console(irq)) 433 sch = cio_get_console_subchannel(); 434 else { 435 sch = css_alloc_subchannel(irq); 436 if (IS_ERR(sch)) 437 ret = PTR_ERR(sch); 438 else 439 ret = 0; 440 if (ret == -ENOMEM) 441 panic("Out of memory in " 442 "init_channel_subsystem\n"); 443 /* -ENXIO: no more subchannels. */ 444 if (ret == -ENXIO) 445 break; 446 if (ret) 447 continue; 448 } 449 /* 450 * We register ALL valid subchannels in ioinfo, even those 451 * that have been present before init_channel_subsystem. 452 * These subchannels can't have been registered yet (kmalloc 453 * not working) so we do it now. This is true e.g. for the 454 * console subchannel. 455 */ 456 css_register_subchannel(sch); 457 } 458 return 0; 459 460 out_bus: 461 bus_unregister(&css_bus_type); 462 out: 463 return ret; 464 } 465 466 /* 467 * find a driver for a subchannel. They identify by the subchannel 468 * type with the exception that the console subchannel driver has its own 469 * subchannel type although the device is an i/o subchannel 470 */ 471 static int 472 css_bus_match (struct device *dev, struct device_driver *drv) 473 { 474 struct subchannel *sch = container_of (dev, struct subchannel, dev); 475 struct css_driver *driver = container_of (drv, struct css_driver, drv); 476 477 if (sch->st == driver->subchannel_type) 478 return 1; 479 480 return 0; 481 } 482 483 struct bus_type css_bus_type = { 484 .name = "css", 485 .match = &css_bus_match, 486 }; 487 488 subsys_initcall(init_channel_subsystem); 489 490 /* 491 * Register root devices for some drivers. The release function must not be 492 * in the device drivers, so we do it here. 493 */ 494 static void 495 s390_root_dev_release(struct device *dev) 496 { 497 kfree(dev); 498 } 499 500 struct device * 501 s390_root_dev_register(const char *name) 502 { 503 struct device *dev; 504 int ret; 505 506 if (!strlen(name)) 507 return ERR_PTR(-EINVAL); 508 dev = kmalloc(sizeof(struct device), GFP_KERNEL); 509 if (!dev) 510 return ERR_PTR(-ENOMEM); 511 memset(dev, 0, sizeof(struct device)); 512 strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); 513 dev->release = s390_root_dev_release; 514 ret = device_register(dev); 515 if (ret) { 516 kfree(dev); 517 return ERR_PTR(ret); 518 } 519 return dev; 520 } 521 522 void 523 s390_root_dev_unregister(struct device *dev) 524 { 525 if (dev) 526 device_unregister(dev); 527 } 528 529 int 530 css_enqueue_subchannel_slow(unsigned long schid) 531 { 532 struct slow_subchannel *new_slow_sch; 533 unsigned long flags; 534 535 new_slow_sch = kmalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); 536 if (!new_slow_sch) 537 return -ENOMEM; 538 memset(new_slow_sch, 0, sizeof(struct slow_subchannel)); 539 new_slow_sch->schid = schid; 540 spin_lock_irqsave(&slow_subchannel_lock, flags); 541 list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); 542 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 543 return 0; 544 } 545 546 void 547 css_clear_subchannel_slow_list(void) 548 { 549 unsigned long flags; 550 551 spin_lock_irqsave(&slow_subchannel_lock, flags); 552 while (!list_empty(&slow_subchannels_head)) { 553 struct slow_subchannel *slow_sch = 554 list_entry(slow_subchannels_head.next, 555 struct slow_subchannel, slow_list); 556 557 list_del_init(slow_subchannels_head.next); 558 kfree(slow_sch); 559 } 560 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 561 } 562 563 564 565 int 566 css_slow_subchannels_exist(void) 567 { 568 return (!list_empty(&slow_subchannels_head)); 569 } 570 571 MODULE_LICENSE("GPL"); 572 EXPORT_SYMBOL(css_bus_type); 573 EXPORT_SYMBOL(s390_root_dev_register); 574 EXPORT_SYMBOL(s390_root_dev_unregister); 575 EXPORT_SYMBOL_GPL(css_characteristics_avail); 576