1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for s390 chsc subchannels 4 * 5 * Copyright IBM Corp. 2008, 2011 6 * 7 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 8 * 9 */ 10 11 #include <linux/slab.h> 12 #include <linux/compat.h> 13 #include <linux/device.h> 14 #include <linux/module.h> 15 #include <linux/uaccess.h> 16 #include <linux/miscdevice.h> 17 #include <linux/kernel_stat.h> 18 19 #include <asm/cio.h> 20 #include <asm/chsc.h> 21 #include <asm/isc.h> 22 23 #include "cio.h" 24 #include "cio_debug.h" 25 #include "css.h" 26 #include "chsc_sch.h" 27 #include "ioasm.h" 28 29 static debug_info_t *chsc_debug_msg_id; 30 static debug_info_t *chsc_debug_log_id; 31 32 static struct chsc_request *on_close_request; 33 static struct chsc_async_area *on_close_chsc_area; 34 static DEFINE_MUTEX(on_close_mutex); 35 36 #define CHSC_MSG(imp, args...) do { \ 37 debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \ 38 } while (0) 39 40 #define CHSC_LOG(imp, txt) do { \ 41 debug_text_event(chsc_debug_log_id, imp , txt); \ 42 } while (0) 43 44 static void CHSC_LOG_HEX(int level, void *data, int length) 45 { 46 debug_event(chsc_debug_log_id, level, data, length); 47 } 48 49 MODULE_AUTHOR("IBM Corporation"); 50 MODULE_DESCRIPTION("driver for s390 chsc subchannels"); 51 MODULE_LICENSE("GPL"); 52 53 static void chsc_subchannel_irq(struct subchannel *sch) 54 { 55 struct chsc_private *private = dev_get_drvdata(&sch->dev); 56 struct chsc_request *request = private->request; 57 struct irb *irb = this_cpu_ptr(&cio_irb); 58 59 CHSC_LOG(4, "irb"); 60 CHSC_LOG_HEX(4, irb, sizeof(*irb)); 61 inc_irq_stat(IRQIO_CSC); 62 63 /* Copy irb to provided request and set done. */ 64 if (!request) { 65 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", 66 sch->schid.ssid, sch->schid.sch_no); 67 return; 68 } 69 private->request = NULL; 70 memcpy(&request->irb, irb, sizeof(*irb)); 71 cio_update_schib(sch); 72 complete(&request->completion); 73 put_device(&sch->dev); 74 } 75 76 static int chsc_subchannel_probe(struct subchannel *sch) 77 { 78 struct chsc_private *private; 79 int ret; 80 81 CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n", 82 sch->schid.ssid, sch->schid.sch_no); 83 sch->isc = CHSC_SCH_ISC; 84 private = kzalloc(sizeof(*private), GFP_KERNEL); 85 if (!private) 86 return -ENOMEM; 87 dev_set_drvdata(&sch->dev, private); 88 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); 89 if (ret) { 90 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n", 91 sch->schid.ssid, sch->schid.sch_no, ret); 92 dev_set_drvdata(&sch->dev, NULL); 93 kfree(private); 94 } else { 95 if (dev_get_uevent_suppress(&sch->dev)) { 96 dev_set_uevent_suppress(&sch->dev, 0); 97 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 98 } 99 } 100 return ret; 101 } 102 103 static int chsc_subchannel_remove(struct subchannel *sch) 104 { 105 struct chsc_private *private; 106 107 cio_disable_subchannel(sch); 108 private = dev_get_drvdata(&sch->dev); 109 dev_set_drvdata(&sch->dev, NULL); 110 if (private->request) { 111 complete(&private->request->completion); 112 put_device(&sch->dev); 113 } 114 kfree(private); 115 return 0; 116 } 117 118 static void chsc_subchannel_shutdown(struct subchannel *sch) 119 { 120 cio_disable_subchannel(sch); 121 } 122 123 static struct css_device_id chsc_subchannel_ids[] = { 124 { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, }, 125 { /* end of list */ }, 126 }; 127 MODULE_DEVICE_TABLE(css, chsc_subchannel_ids); 128 129 static struct css_driver chsc_subchannel_driver = { 130 .drv = { 131 .owner = THIS_MODULE, 132 .name = "chsc_subchannel", 133 }, 134 .subchannel_type = chsc_subchannel_ids, 135 .irq = chsc_subchannel_irq, 136 .probe = chsc_subchannel_probe, 137 .remove = chsc_subchannel_remove, 138 .shutdown = chsc_subchannel_shutdown, 139 }; 140 141 static int __init chsc_init_dbfs(void) 142 { 143 chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long)); 144 if (!chsc_debug_msg_id) 145 goto out; 146 debug_register_view(chsc_debug_msg_id, &debug_sprintf_view); 147 debug_set_level(chsc_debug_msg_id, 2); 148 chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16); 149 if (!chsc_debug_log_id) 150 goto out; 151 debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view); 152 debug_set_level(chsc_debug_log_id, 2); 153 return 0; 154 out: 155 debug_unregister(chsc_debug_msg_id); 156 return -ENOMEM; 157 } 158 159 static void chsc_remove_dbfs(void) 160 { 161 debug_unregister(chsc_debug_log_id); 162 debug_unregister(chsc_debug_msg_id); 163 } 164 165 static int __init chsc_init_sch_driver(void) 166 { 167 return css_driver_register(&chsc_subchannel_driver); 168 } 169 170 static void chsc_cleanup_sch_driver(void) 171 { 172 css_driver_unregister(&chsc_subchannel_driver); 173 } 174 175 static DEFINE_SPINLOCK(chsc_lock); 176 177 static int chsc_subchannel_match_next_free(struct device *dev, const void *data) 178 { 179 struct subchannel *sch = to_subchannel(dev); 180 181 return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw); 182 } 183 184 static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch) 185 { 186 struct device *dev; 187 188 dev = driver_find_device(&chsc_subchannel_driver.drv, 189 sch ? &sch->dev : NULL, NULL, 190 chsc_subchannel_match_next_free); 191 return dev ? to_subchannel(dev) : NULL; 192 } 193 194 /** 195 * chsc_async() - try to start a chsc request asynchronously 196 * @chsc_area: request to be started 197 * @request: request structure to associate 198 * 199 * Tries to start a chsc request on one of the existing chsc subchannels. 200 * Returns: 201 * %0 if the request was performed synchronously 202 * %-EINPROGRESS if the request was successfully started 203 * %-EBUSY if all chsc subchannels are busy 204 * %-ENODEV if no chsc subchannels are available 205 * Context: 206 * interrupts disabled, chsc_lock held 207 */ 208 static int chsc_async(struct chsc_async_area *chsc_area, 209 struct chsc_request *request) 210 { 211 int cc; 212 struct chsc_private *private; 213 struct subchannel *sch = NULL; 214 int ret = -ENODEV; 215 char dbf[10]; 216 217 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4; 218 while ((sch = chsc_get_next_subchannel(sch))) { 219 spin_lock(sch->lock); 220 private = dev_get_drvdata(&sch->dev); 221 if (private->request) { 222 spin_unlock(sch->lock); 223 ret = -EBUSY; 224 continue; 225 } 226 chsc_area->header.sid = sch->schid; 227 CHSC_LOG(2, "schid"); 228 CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid)); 229 cc = chsc(chsc_area); 230 snprintf(dbf, sizeof(dbf), "cc:%d", cc); 231 CHSC_LOG(2, dbf); 232 switch (cc) { 233 case 0: 234 ret = 0; 235 break; 236 case 1: 237 sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC; 238 ret = -EINPROGRESS; 239 private->request = request; 240 break; 241 case 2: 242 ret = -EBUSY; 243 break; 244 default: 245 ret = -ENODEV; 246 } 247 spin_unlock(sch->lock); 248 CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n", 249 sch->schid.ssid, sch->schid.sch_no, cc); 250 if (ret == -EINPROGRESS) 251 return -EINPROGRESS; 252 put_device(&sch->dev); 253 if (ret == 0) 254 return 0; 255 } 256 return ret; 257 } 258 259 static void chsc_log_command(void *chsc_area) 260 { 261 char dbf[10]; 262 263 snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]); 264 CHSC_LOG(0, dbf); 265 CHSC_LOG_HEX(0, chsc_area, 32); 266 } 267 268 static int chsc_examine_irb(struct chsc_request *request) 269 { 270 int backed_up; 271 272 if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND)) 273 return -EIO; 274 backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK; 275 request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK; 276 if (scsw_cstat(&request->irb.scsw) == 0) 277 return 0; 278 if (!backed_up) 279 return 0; 280 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK) 281 return -EIO; 282 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK) 283 return -EPERM; 284 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK) 285 return -EAGAIN; 286 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK) 287 return -EAGAIN; 288 return -EIO; 289 } 290 291 static int chsc_ioctl_start(void __user *user_area) 292 { 293 struct chsc_request *request; 294 struct chsc_async_area *chsc_area; 295 int ret; 296 char dbf[10]; 297 298 if (!css_general_characteristics.dynio) 299 /* It makes no sense to try. */ 300 return -EOPNOTSUPP; 301 chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); 302 if (!chsc_area) 303 return -ENOMEM; 304 request = kzalloc(sizeof(*request), GFP_KERNEL); 305 if (!request) { 306 ret = -ENOMEM; 307 goto out_free; 308 } 309 init_completion(&request->completion); 310 if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { 311 ret = -EFAULT; 312 goto out_free; 313 } 314 chsc_log_command(chsc_area); 315 spin_lock_irq(&chsc_lock); 316 ret = chsc_async(chsc_area, request); 317 spin_unlock_irq(&chsc_lock); 318 if (ret == -EINPROGRESS) { 319 wait_for_completion(&request->completion); 320 ret = chsc_examine_irb(request); 321 } 322 /* copy area back to user */ 323 if (!ret) 324 if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) 325 ret = -EFAULT; 326 out_free: 327 snprintf(dbf, sizeof(dbf), "ret:%d", ret); 328 CHSC_LOG(0, dbf); 329 kfree(request); 330 free_page((unsigned long)chsc_area); 331 return ret; 332 } 333 334 static int chsc_ioctl_on_close_set(void __user *user_area) 335 { 336 char dbf[13]; 337 int ret; 338 339 mutex_lock(&on_close_mutex); 340 if (on_close_chsc_area) { 341 ret = -EBUSY; 342 goto out_unlock; 343 } 344 on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL); 345 if (!on_close_request) { 346 ret = -ENOMEM; 347 goto out_unlock; 348 } 349 on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); 350 if (!on_close_chsc_area) { 351 ret = -ENOMEM; 352 goto out_free_request; 353 } 354 if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) { 355 ret = -EFAULT; 356 goto out_free_chsc; 357 } 358 ret = 0; 359 goto out_unlock; 360 361 out_free_chsc: 362 free_page((unsigned long)on_close_chsc_area); 363 on_close_chsc_area = NULL; 364 out_free_request: 365 kfree(on_close_request); 366 on_close_request = NULL; 367 out_unlock: 368 mutex_unlock(&on_close_mutex); 369 snprintf(dbf, sizeof(dbf), "ocsret:%d", ret); 370 CHSC_LOG(0, dbf); 371 return ret; 372 } 373 374 static int chsc_ioctl_on_close_remove(void) 375 { 376 char dbf[13]; 377 int ret; 378 379 mutex_lock(&on_close_mutex); 380 if (!on_close_chsc_area) { 381 ret = -ENOENT; 382 goto out_unlock; 383 } 384 free_page((unsigned long)on_close_chsc_area); 385 on_close_chsc_area = NULL; 386 kfree(on_close_request); 387 on_close_request = NULL; 388 ret = 0; 389 out_unlock: 390 mutex_unlock(&on_close_mutex); 391 snprintf(dbf, sizeof(dbf), "ocrret:%d", ret); 392 CHSC_LOG(0, dbf); 393 return ret; 394 } 395 396 static int chsc_ioctl_start_sync(void __user *user_area) 397 { 398 struct chsc_sync_area *chsc_area; 399 int ret, ccode; 400 401 chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 402 if (!chsc_area) 403 return -ENOMEM; 404 if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { 405 ret = -EFAULT; 406 goto out_free; 407 } 408 if (chsc_area->header.code & 0x4000) { 409 ret = -EINVAL; 410 goto out_free; 411 } 412 chsc_log_command(chsc_area); 413 ccode = chsc(chsc_area); 414 if (ccode != 0) { 415 ret = -EIO; 416 goto out_free; 417 } 418 if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) 419 ret = -EFAULT; 420 else 421 ret = 0; 422 out_free: 423 free_page((unsigned long)chsc_area); 424 return ret; 425 } 426 427 static int chsc_ioctl_info_channel_path(void __user *user_cd) 428 { 429 struct chsc_chp_cd *cd; 430 int ret, ccode; 431 struct { 432 struct chsc_header request; 433 u32 : 2; 434 u32 m : 1; 435 u32 : 1; 436 u32 fmt1 : 4; 437 u32 cssid : 8; 438 u32 : 8; 439 u32 first_chpid : 8; 440 u32 : 24; 441 u32 last_chpid : 8; 442 u32 : 32; 443 struct chsc_header response; 444 u8 data[PAGE_SIZE - 20]; 445 } __attribute__ ((packed)) *scpcd_area; 446 447 scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 448 if (!scpcd_area) 449 return -ENOMEM; 450 cd = kzalloc(sizeof(*cd), GFP_KERNEL); 451 if (!cd) { 452 ret = -ENOMEM; 453 goto out_free; 454 } 455 if (copy_from_user(cd, user_cd, sizeof(*cd))) { 456 ret = -EFAULT; 457 goto out_free; 458 } 459 scpcd_area->request.length = 0x0010; 460 scpcd_area->request.code = 0x0028; 461 scpcd_area->m = cd->m; 462 scpcd_area->fmt1 = cd->fmt; 463 scpcd_area->cssid = cd->chpid.cssid; 464 scpcd_area->first_chpid = cd->chpid.id; 465 scpcd_area->last_chpid = cd->chpid.id; 466 467 ccode = chsc(scpcd_area); 468 if (ccode != 0) { 469 ret = -EIO; 470 goto out_free; 471 } 472 if (scpcd_area->response.code != 0x0001) { 473 ret = -EIO; 474 CHSC_MSG(0, "scpcd: response code=%x\n", 475 scpcd_area->response.code); 476 goto out_free; 477 } 478 memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length); 479 if (copy_to_user(user_cd, cd, sizeof(*cd))) 480 ret = -EFAULT; 481 else 482 ret = 0; 483 out_free: 484 kfree(cd); 485 free_page((unsigned long)scpcd_area); 486 return ret; 487 } 488 489 static int chsc_ioctl_info_cu(void __user *user_cd) 490 { 491 struct chsc_cu_cd *cd; 492 int ret, ccode; 493 struct { 494 struct chsc_header request; 495 u32 : 2; 496 u32 m : 1; 497 u32 : 1; 498 u32 fmt1 : 4; 499 u32 cssid : 8; 500 u32 : 8; 501 u32 first_cun : 8; 502 u32 : 24; 503 u32 last_cun : 8; 504 u32 : 32; 505 struct chsc_header response; 506 u8 data[PAGE_SIZE - 20]; 507 } __attribute__ ((packed)) *scucd_area; 508 509 scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 510 if (!scucd_area) 511 return -ENOMEM; 512 cd = kzalloc(sizeof(*cd), GFP_KERNEL); 513 if (!cd) { 514 ret = -ENOMEM; 515 goto out_free; 516 } 517 if (copy_from_user(cd, user_cd, sizeof(*cd))) { 518 ret = -EFAULT; 519 goto out_free; 520 } 521 scucd_area->request.length = 0x0010; 522 scucd_area->request.code = 0x0026; 523 scucd_area->m = cd->m; 524 scucd_area->fmt1 = cd->fmt; 525 scucd_area->cssid = cd->cssid; 526 scucd_area->first_cun = cd->cun; 527 scucd_area->last_cun = cd->cun; 528 529 ccode = chsc(scucd_area); 530 if (ccode != 0) { 531 ret = -EIO; 532 goto out_free; 533 } 534 if (scucd_area->response.code != 0x0001) { 535 ret = -EIO; 536 CHSC_MSG(0, "scucd: response code=%x\n", 537 scucd_area->response.code); 538 goto out_free; 539 } 540 memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length); 541 if (copy_to_user(user_cd, cd, sizeof(*cd))) 542 ret = -EFAULT; 543 else 544 ret = 0; 545 out_free: 546 kfree(cd); 547 free_page((unsigned long)scucd_area); 548 return ret; 549 } 550 551 static int chsc_ioctl_info_sch_cu(void __user *user_cud) 552 { 553 struct chsc_sch_cud *cud; 554 int ret, ccode; 555 struct { 556 struct chsc_header request; 557 u32 : 2; 558 u32 m : 1; 559 u32 : 5; 560 u32 fmt1 : 4; 561 u32 : 2; 562 u32 ssid : 2; 563 u32 first_sch : 16; 564 u32 : 8; 565 u32 cssid : 8; 566 u32 last_sch : 16; 567 u32 : 32; 568 struct chsc_header response; 569 u8 data[PAGE_SIZE - 20]; 570 } __attribute__ ((packed)) *sscud_area; 571 572 sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 573 if (!sscud_area) 574 return -ENOMEM; 575 cud = kzalloc(sizeof(*cud), GFP_KERNEL); 576 if (!cud) { 577 ret = -ENOMEM; 578 goto out_free; 579 } 580 if (copy_from_user(cud, user_cud, sizeof(*cud))) { 581 ret = -EFAULT; 582 goto out_free; 583 } 584 sscud_area->request.length = 0x0010; 585 sscud_area->request.code = 0x0006; 586 sscud_area->m = cud->schid.m; 587 sscud_area->fmt1 = cud->fmt; 588 sscud_area->ssid = cud->schid.ssid; 589 sscud_area->first_sch = cud->schid.sch_no; 590 sscud_area->cssid = cud->schid.cssid; 591 sscud_area->last_sch = cud->schid.sch_no; 592 593 ccode = chsc(sscud_area); 594 if (ccode != 0) { 595 ret = -EIO; 596 goto out_free; 597 } 598 if (sscud_area->response.code != 0x0001) { 599 ret = -EIO; 600 CHSC_MSG(0, "sscud: response code=%x\n", 601 sscud_area->response.code); 602 goto out_free; 603 } 604 memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length); 605 if (copy_to_user(user_cud, cud, sizeof(*cud))) 606 ret = -EFAULT; 607 else 608 ret = 0; 609 out_free: 610 kfree(cud); 611 free_page((unsigned long)sscud_area); 612 return ret; 613 } 614 615 static int chsc_ioctl_conf_info(void __user *user_ci) 616 { 617 struct chsc_conf_info *ci; 618 int ret, ccode; 619 struct { 620 struct chsc_header request; 621 u32 : 2; 622 u32 m : 1; 623 u32 : 1; 624 u32 fmt1 : 4; 625 u32 cssid : 8; 626 u32 : 6; 627 u32 ssid : 2; 628 u32 : 8; 629 u64 : 64; 630 struct chsc_header response; 631 u8 data[PAGE_SIZE - 20]; 632 } __attribute__ ((packed)) *sci_area; 633 634 sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 635 if (!sci_area) 636 return -ENOMEM; 637 ci = kzalloc(sizeof(*ci), GFP_KERNEL); 638 if (!ci) { 639 ret = -ENOMEM; 640 goto out_free; 641 } 642 if (copy_from_user(ci, user_ci, sizeof(*ci))) { 643 ret = -EFAULT; 644 goto out_free; 645 } 646 sci_area->request.length = 0x0010; 647 sci_area->request.code = 0x0012; 648 sci_area->m = ci->id.m; 649 sci_area->fmt1 = ci->fmt; 650 sci_area->cssid = ci->id.cssid; 651 sci_area->ssid = ci->id.ssid; 652 653 ccode = chsc(sci_area); 654 if (ccode != 0) { 655 ret = -EIO; 656 goto out_free; 657 } 658 if (sci_area->response.code != 0x0001) { 659 ret = -EIO; 660 CHSC_MSG(0, "sci: response code=%x\n", 661 sci_area->response.code); 662 goto out_free; 663 } 664 memcpy(&ci->scid, &sci_area->response, sci_area->response.length); 665 if (copy_to_user(user_ci, ci, sizeof(*ci))) 666 ret = -EFAULT; 667 else 668 ret = 0; 669 out_free: 670 kfree(ci); 671 free_page((unsigned long)sci_area); 672 return ret; 673 } 674 675 static int chsc_ioctl_conf_comp_list(void __user *user_ccl) 676 { 677 struct chsc_comp_list *ccl; 678 int ret, ccode; 679 struct { 680 struct chsc_header request; 681 u32 ctype : 8; 682 u32 : 4; 683 u32 fmt : 4; 684 u32 : 16; 685 u64 : 64; 686 u32 list_parm[2]; 687 u64 : 64; 688 struct chsc_header response; 689 u8 data[PAGE_SIZE - 36]; 690 } __attribute__ ((packed)) *sccl_area; 691 struct { 692 u32 m : 1; 693 u32 : 31; 694 u32 cssid : 8; 695 u32 : 16; 696 u32 chpid : 8; 697 } __attribute__ ((packed)) *chpid_parm; 698 struct { 699 u32 f_cssid : 8; 700 u32 l_cssid : 8; 701 u32 : 16; 702 u32 res; 703 } __attribute__ ((packed)) *cssids_parm; 704 705 sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 706 if (!sccl_area) 707 return -ENOMEM; 708 ccl = kzalloc(sizeof(*ccl), GFP_KERNEL); 709 if (!ccl) { 710 ret = -ENOMEM; 711 goto out_free; 712 } 713 if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) { 714 ret = -EFAULT; 715 goto out_free; 716 } 717 sccl_area->request.length = 0x0020; 718 sccl_area->request.code = 0x0030; 719 sccl_area->fmt = ccl->req.fmt; 720 sccl_area->ctype = ccl->req.ctype; 721 switch (sccl_area->ctype) { 722 case CCL_CU_ON_CHP: 723 case CCL_IOP_CHP: 724 chpid_parm = (void *)&sccl_area->list_parm; 725 chpid_parm->m = ccl->req.chpid.m; 726 chpid_parm->cssid = ccl->req.chpid.chp.cssid; 727 chpid_parm->chpid = ccl->req.chpid.chp.id; 728 break; 729 case CCL_CSS_IMG: 730 case CCL_CSS_IMG_CONF_CHAR: 731 cssids_parm = (void *)&sccl_area->list_parm; 732 cssids_parm->f_cssid = ccl->req.cssids.f_cssid; 733 cssids_parm->l_cssid = ccl->req.cssids.l_cssid; 734 break; 735 } 736 ccode = chsc(sccl_area); 737 if (ccode != 0) { 738 ret = -EIO; 739 goto out_free; 740 } 741 if (sccl_area->response.code != 0x0001) { 742 ret = -EIO; 743 CHSC_MSG(0, "sccl: response code=%x\n", 744 sccl_area->response.code); 745 goto out_free; 746 } 747 memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length); 748 if (copy_to_user(user_ccl, ccl, sizeof(*ccl))) 749 ret = -EFAULT; 750 else 751 ret = 0; 752 out_free: 753 kfree(ccl); 754 free_page((unsigned long)sccl_area); 755 return ret; 756 } 757 758 static int chsc_ioctl_chpd(void __user *user_chpd) 759 { 760 struct chsc_scpd *scpd_area; 761 struct chsc_cpd_info *chpd; 762 int ret; 763 764 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); 765 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 766 if (!scpd_area || !chpd) { 767 ret = -ENOMEM; 768 goto out_free; 769 } 770 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { 771 ret = -EFAULT; 772 goto out_free; 773 } 774 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, 775 chpd->rfmt, chpd->c, chpd->m, 776 scpd_area); 777 if (ret) 778 goto out_free; 779 memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length); 780 if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) 781 ret = -EFAULT; 782 out_free: 783 kfree(chpd); 784 free_page((unsigned long)scpd_area); 785 return ret; 786 } 787 788 static int chsc_ioctl_dcal(void __user *user_dcal) 789 { 790 struct chsc_dcal *dcal; 791 int ret, ccode; 792 struct { 793 struct chsc_header request; 794 u32 atype : 8; 795 u32 : 4; 796 u32 fmt : 4; 797 u32 : 16; 798 u32 res0[2]; 799 u32 list_parm[2]; 800 u32 res1[2]; 801 struct chsc_header response; 802 u8 data[PAGE_SIZE - 36]; 803 } __attribute__ ((packed)) *sdcal_area; 804 805 sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 806 if (!sdcal_area) 807 return -ENOMEM; 808 dcal = kzalloc(sizeof(*dcal), GFP_KERNEL); 809 if (!dcal) { 810 ret = -ENOMEM; 811 goto out_free; 812 } 813 if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) { 814 ret = -EFAULT; 815 goto out_free; 816 } 817 sdcal_area->request.length = 0x0020; 818 sdcal_area->request.code = 0x0034; 819 sdcal_area->atype = dcal->req.atype; 820 sdcal_area->fmt = dcal->req.fmt; 821 memcpy(&sdcal_area->list_parm, &dcal->req.list_parm, 822 sizeof(sdcal_area->list_parm)); 823 824 ccode = chsc(sdcal_area); 825 if (ccode != 0) { 826 ret = -EIO; 827 goto out_free; 828 } 829 if (sdcal_area->response.code != 0x0001) { 830 ret = -EIO; 831 CHSC_MSG(0, "sdcal: response code=%x\n", 832 sdcal_area->response.code); 833 goto out_free; 834 } 835 memcpy(&dcal->sdcal, &sdcal_area->response, 836 sdcal_area->response.length); 837 if (copy_to_user(user_dcal, dcal, sizeof(*dcal))) 838 ret = -EFAULT; 839 else 840 ret = 0; 841 out_free: 842 kfree(dcal); 843 free_page((unsigned long)sdcal_area); 844 return ret; 845 } 846 847 static long chsc_ioctl(struct file *filp, unsigned int cmd, 848 unsigned long arg) 849 { 850 void __user *argp; 851 852 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); 853 if (is_compat_task()) 854 argp = compat_ptr(arg); 855 else 856 argp = (void __user *)arg; 857 switch (cmd) { 858 case CHSC_START: 859 return chsc_ioctl_start(argp); 860 case CHSC_START_SYNC: 861 return chsc_ioctl_start_sync(argp); 862 case CHSC_INFO_CHANNEL_PATH: 863 return chsc_ioctl_info_channel_path(argp); 864 case CHSC_INFO_CU: 865 return chsc_ioctl_info_cu(argp); 866 case CHSC_INFO_SCH_CU: 867 return chsc_ioctl_info_sch_cu(argp); 868 case CHSC_INFO_CI: 869 return chsc_ioctl_conf_info(argp); 870 case CHSC_INFO_CCL: 871 return chsc_ioctl_conf_comp_list(argp); 872 case CHSC_INFO_CPD: 873 return chsc_ioctl_chpd(argp); 874 case CHSC_INFO_DCAL: 875 return chsc_ioctl_dcal(argp); 876 case CHSC_ON_CLOSE_SET: 877 return chsc_ioctl_on_close_set(argp); 878 case CHSC_ON_CLOSE_REMOVE: 879 return chsc_ioctl_on_close_remove(); 880 default: /* unknown ioctl number */ 881 return -ENOIOCTLCMD; 882 } 883 } 884 885 static atomic_t chsc_ready_for_use = ATOMIC_INIT(1); 886 887 static int chsc_open(struct inode *inode, struct file *file) 888 { 889 if (!atomic_dec_and_test(&chsc_ready_for_use)) { 890 atomic_inc(&chsc_ready_for_use); 891 return -EBUSY; 892 } 893 return nonseekable_open(inode, file); 894 } 895 896 static int chsc_release(struct inode *inode, struct file *filp) 897 { 898 char dbf[13]; 899 int ret; 900 901 mutex_lock(&on_close_mutex); 902 if (!on_close_chsc_area) 903 goto out_unlock; 904 init_completion(&on_close_request->completion); 905 CHSC_LOG(0, "on_close"); 906 chsc_log_command(on_close_chsc_area); 907 spin_lock_irq(&chsc_lock); 908 ret = chsc_async(on_close_chsc_area, on_close_request); 909 spin_unlock_irq(&chsc_lock); 910 if (ret == -EINPROGRESS) { 911 wait_for_completion(&on_close_request->completion); 912 ret = chsc_examine_irb(on_close_request); 913 } 914 snprintf(dbf, sizeof(dbf), "relret:%d", ret); 915 CHSC_LOG(0, dbf); 916 free_page((unsigned long)on_close_chsc_area); 917 on_close_chsc_area = NULL; 918 kfree(on_close_request); 919 on_close_request = NULL; 920 out_unlock: 921 mutex_unlock(&on_close_mutex); 922 atomic_inc(&chsc_ready_for_use); 923 return 0; 924 } 925 926 static const struct file_operations chsc_fops = { 927 .owner = THIS_MODULE, 928 .open = chsc_open, 929 .release = chsc_release, 930 .unlocked_ioctl = chsc_ioctl, 931 .compat_ioctl = chsc_ioctl, 932 .llseek = no_llseek, 933 }; 934 935 static struct miscdevice chsc_misc_device = { 936 .minor = MISC_DYNAMIC_MINOR, 937 .name = "chsc", 938 .fops = &chsc_fops, 939 }; 940 941 static int __init chsc_misc_init(void) 942 { 943 return misc_register(&chsc_misc_device); 944 } 945 946 static void chsc_misc_cleanup(void) 947 { 948 misc_deregister(&chsc_misc_device); 949 } 950 951 static int __init chsc_sch_init(void) 952 { 953 int ret; 954 955 ret = chsc_init_dbfs(); 956 if (ret) 957 return ret; 958 isc_register(CHSC_SCH_ISC); 959 ret = chsc_init_sch_driver(); 960 if (ret) 961 goto out_dbf; 962 ret = chsc_misc_init(); 963 if (ret) 964 goto out_driver; 965 return ret; 966 out_driver: 967 chsc_cleanup_sch_driver(); 968 out_dbf: 969 isc_unregister(CHSC_SCH_ISC); 970 chsc_remove_dbfs(); 971 return ret; 972 } 973 974 static void __exit chsc_sch_exit(void) 975 { 976 chsc_misc_cleanup(); 977 chsc_cleanup_sch_driver(); 978 isc_unregister(CHSC_SCH_ISC); 979 chsc_remove_dbfs(); 980 } 981 982 module_init(chsc_sch_init); 983 module_exit(chsc_sch_exit); 984