1 /* 2 * Driver for s390 chsc subchannels 3 * 4 * Copyright IBM Corp. 2008, 2011 5 * 6 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 7 * 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/compat.h> 12 #include <linux/device.h> 13 #include <linux/module.h> 14 #include <linux/uaccess.h> 15 #include <linux/miscdevice.h> 16 #include <linux/kernel_stat.h> 17 18 #include <asm/compat.h> 19 #include <asm/cio.h> 20 #include <asm/chsc.h> 21 #include <asm/isc.h> 22 23 #include "cio.h" 24 #include "cio_debug.h" 25 #include "css.h" 26 #include "chsc_sch.h" 27 #include "ioasm.h" 28 29 static debug_info_t *chsc_debug_msg_id; 30 static debug_info_t *chsc_debug_log_id; 31 32 #define CHSC_MSG(imp, args...) do { \ 33 debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \ 34 } while (0) 35 36 #define CHSC_LOG(imp, txt) do { \ 37 debug_text_event(chsc_debug_log_id, imp , txt); \ 38 } while (0) 39 40 static void CHSC_LOG_HEX(int level, void *data, int length) 41 { 42 while (length > 0) { 43 debug_event(chsc_debug_log_id, level, data, length); 44 length -= chsc_debug_log_id->buf_size; 45 data += chsc_debug_log_id->buf_size; 46 } 47 } 48 49 MODULE_AUTHOR("IBM Corporation"); 50 MODULE_DESCRIPTION("driver for s390 chsc subchannels"); 51 MODULE_LICENSE("GPL"); 52 53 static void chsc_subchannel_irq(struct subchannel *sch) 54 { 55 struct chsc_private *private = dev_get_drvdata(&sch->dev); 56 struct chsc_request *request = private->request; 57 struct irb *irb = (struct irb *)&S390_lowcore.irb; 58 59 CHSC_LOG(4, "irb"); 60 CHSC_LOG_HEX(4, irb, sizeof(*irb)); 61 kstat_cpu(smp_processor_id()).irqs[IOINT_CSC]++; 62 63 /* Copy irb to provided request and set done. */ 64 if (!request) { 65 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", 66 sch->schid.ssid, sch->schid.sch_no); 67 return; 68 } 69 private->request = NULL; 70 memcpy(&request->irb, irb, sizeof(*irb)); 71 cio_update_schib(sch); 72 complete(&request->completion); 73 put_device(&sch->dev); 74 } 75 76 static int chsc_subchannel_probe(struct subchannel *sch) 77 { 78 struct chsc_private *private; 79 int ret; 80 81 CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n", 82 sch->schid.ssid, sch->schid.sch_no); 83 sch->isc = CHSC_SCH_ISC; 84 private = kzalloc(sizeof(*private), GFP_KERNEL); 85 if (!private) 86 return -ENOMEM; 87 dev_set_drvdata(&sch->dev, private); 88 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); 89 if (ret) { 90 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n", 91 sch->schid.ssid, sch->schid.sch_no, ret); 92 dev_set_drvdata(&sch->dev, NULL); 93 kfree(private); 94 } else { 95 if (dev_get_uevent_suppress(&sch->dev)) { 96 dev_set_uevent_suppress(&sch->dev, 0); 97 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 98 } 99 } 100 return ret; 101 } 102 103 static int chsc_subchannel_remove(struct subchannel *sch) 104 { 105 struct chsc_private *private; 106 107 cio_disable_subchannel(sch); 108 private = dev_get_drvdata(&sch->dev); 109 dev_set_drvdata(&sch->dev, NULL); 110 if (private->request) { 111 complete(&private->request->completion); 112 put_device(&sch->dev); 113 } 114 kfree(private); 115 return 0; 116 } 117 118 static void chsc_subchannel_shutdown(struct subchannel *sch) 119 { 120 cio_disable_subchannel(sch); 121 } 122 123 static int chsc_subchannel_prepare(struct subchannel *sch) 124 { 125 int cc; 126 struct schib schib; 127 /* 128 * Don't allow suspend while the subchannel is not idle 129 * since we don't have a way to clear the subchannel and 130 * cannot disable it with a request running. 131 */ 132 cc = stsch_err(sch->schid, &schib); 133 if (!cc && scsw_stctl(&schib.scsw)) 134 return -EAGAIN; 135 return 0; 136 } 137 138 static int chsc_subchannel_freeze(struct subchannel *sch) 139 { 140 return cio_disable_subchannel(sch); 141 } 142 143 static int chsc_subchannel_restore(struct subchannel *sch) 144 { 145 return cio_enable_subchannel(sch, (u32)(unsigned long)sch); 146 } 147 148 static struct css_device_id chsc_subchannel_ids[] = { 149 { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, }, 150 { /* end of list */ }, 151 }; 152 MODULE_DEVICE_TABLE(css, chsc_subchannel_ids); 153 154 static struct css_driver chsc_subchannel_driver = { 155 .drv = { 156 .owner = THIS_MODULE, 157 .name = "chsc_subchannel", 158 }, 159 .subchannel_type = chsc_subchannel_ids, 160 .irq = chsc_subchannel_irq, 161 .probe = chsc_subchannel_probe, 162 .remove = chsc_subchannel_remove, 163 .shutdown = chsc_subchannel_shutdown, 164 .prepare = chsc_subchannel_prepare, 165 .freeze = chsc_subchannel_freeze, 166 .thaw = chsc_subchannel_restore, 167 .restore = chsc_subchannel_restore, 168 }; 169 170 static int __init chsc_init_dbfs(void) 171 { 172 chsc_debug_msg_id = debug_register("chsc_msg", 16, 1, 173 16 * sizeof(long)); 174 if (!chsc_debug_msg_id) 175 goto out; 176 debug_register_view(chsc_debug_msg_id, &debug_sprintf_view); 177 debug_set_level(chsc_debug_msg_id, 2); 178 chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16); 179 if (!chsc_debug_log_id) 180 goto out; 181 debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view); 182 debug_set_level(chsc_debug_log_id, 2); 183 return 0; 184 out: 185 if (chsc_debug_msg_id) 186 debug_unregister(chsc_debug_msg_id); 187 return -ENOMEM; 188 } 189 190 static void chsc_remove_dbfs(void) 191 { 192 debug_unregister(chsc_debug_log_id); 193 debug_unregister(chsc_debug_msg_id); 194 } 195 196 static int __init chsc_init_sch_driver(void) 197 { 198 return css_driver_register(&chsc_subchannel_driver); 199 } 200 201 static void chsc_cleanup_sch_driver(void) 202 { 203 css_driver_unregister(&chsc_subchannel_driver); 204 } 205 206 static DEFINE_SPINLOCK(chsc_lock); 207 208 static int chsc_subchannel_match_next_free(struct device *dev, void *data) 209 { 210 struct subchannel *sch = to_subchannel(dev); 211 212 return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw); 213 } 214 215 static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch) 216 { 217 struct device *dev; 218 219 dev = driver_find_device(&chsc_subchannel_driver.drv, 220 sch ? &sch->dev : NULL, NULL, 221 chsc_subchannel_match_next_free); 222 return dev ? to_subchannel(dev) : NULL; 223 } 224 225 /** 226 * chsc_async() - try to start a chsc request asynchronously 227 * @chsc_area: request to be started 228 * @request: request structure to associate 229 * 230 * Tries to start a chsc request on one of the existing chsc subchannels. 231 * Returns: 232 * %0 if the request was performed synchronously 233 * %-EINPROGRESS if the request was successfully started 234 * %-EBUSY if all chsc subchannels are busy 235 * %-ENODEV if no chsc subchannels are available 236 * Context: 237 * interrupts disabled, chsc_lock held 238 */ 239 static int chsc_async(struct chsc_async_area *chsc_area, 240 struct chsc_request *request) 241 { 242 int cc; 243 struct chsc_private *private; 244 struct subchannel *sch = NULL; 245 int ret = -ENODEV; 246 char dbf[10]; 247 248 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4; 249 while ((sch = chsc_get_next_subchannel(sch))) { 250 spin_lock(sch->lock); 251 private = dev_get_drvdata(&sch->dev); 252 if (private->request) { 253 spin_unlock(sch->lock); 254 ret = -EBUSY; 255 continue; 256 } 257 chsc_area->header.sid = sch->schid; 258 CHSC_LOG(2, "schid"); 259 CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid)); 260 cc = chsc(chsc_area); 261 sprintf(dbf, "cc:%d", cc); 262 CHSC_LOG(2, dbf); 263 switch (cc) { 264 case 0: 265 ret = 0; 266 break; 267 case 1: 268 sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC; 269 ret = -EINPROGRESS; 270 private->request = request; 271 break; 272 case 2: 273 ret = -EBUSY; 274 break; 275 default: 276 ret = -ENODEV; 277 } 278 spin_unlock(sch->lock); 279 CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n", 280 sch->schid.ssid, sch->schid.sch_no, cc); 281 if (ret == -EINPROGRESS) 282 return -EINPROGRESS; 283 put_device(&sch->dev); 284 if (ret == 0) 285 return 0; 286 } 287 return ret; 288 } 289 290 static void chsc_log_command(struct chsc_async_area *chsc_area) 291 { 292 char dbf[10]; 293 294 sprintf(dbf, "CHSC:%x", chsc_area->header.code); 295 CHSC_LOG(0, dbf); 296 CHSC_LOG_HEX(0, chsc_area, 32); 297 } 298 299 static int chsc_examine_irb(struct chsc_request *request) 300 { 301 int backed_up; 302 303 if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND)) 304 return -EIO; 305 backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK; 306 request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK; 307 if (scsw_cstat(&request->irb.scsw) == 0) 308 return 0; 309 if (!backed_up) 310 return 0; 311 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK) 312 return -EIO; 313 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK) 314 return -EPERM; 315 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK) 316 return -EAGAIN; 317 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK) 318 return -EAGAIN; 319 return -EIO; 320 } 321 322 static int chsc_ioctl_start(void __user *user_area) 323 { 324 struct chsc_request *request; 325 struct chsc_async_area *chsc_area; 326 int ret; 327 char dbf[10]; 328 329 if (!css_general_characteristics.dynio) 330 /* It makes no sense to try. */ 331 return -EOPNOTSUPP; 332 chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); 333 if (!chsc_area) 334 return -ENOMEM; 335 request = kzalloc(sizeof(*request), GFP_KERNEL); 336 if (!request) { 337 ret = -ENOMEM; 338 goto out_free; 339 } 340 init_completion(&request->completion); 341 if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { 342 ret = -EFAULT; 343 goto out_free; 344 } 345 chsc_log_command(chsc_area); 346 spin_lock_irq(&chsc_lock); 347 ret = chsc_async(chsc_area, request); 348 spin_unlock_irq(&chsc_lock); 349 if (ret == -EINPROGRESS) { 350 wait_for_completion(&request->completion); 351 ret = chsc_examine_irb(request); 352 } 353 /* copy area back to user */ 354 if (!ret) 355 if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) 356 ret = -EFAULT; 357 out_free: 358 sprintf(dbf, "ret:%d", ret); 359 CHSC_LOG(0, dbf); 360 kfree(request); 361 free_page((unsigned long)chsc_area); 362 return ret; 363 } 364 365 static int chsc_ioctl_info_channel_path(void __user *user_cd) 366 { 367 struct chsc_chp_cd *cd; 368 int ret, ccode; 369 struct { 370 struct chsc_header request; 371 u32 : 2; 372 u32 m : 1; 373 u32 : 1; 374 u32 fmt1 : 4; 375 u32 cssid : 8; 376 u32 : 8; 377 u32 first_chpid : 8; 378 u32 : 24; 379 u32 last_chpid : 8; 380 u32 : 32; 381 struct chsc_header response; 382 u8 data[PAGE_SIZE - 20]; 383 } __attribute__ ((packed)) *scpcd_area; 384 385 scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 386 if (!scpcd_area) 387 return -ENOMEM; 388 cd = kzalloc(sizeof(*cd), GFP_KERNEL); 389 if (!cd) { 390 ret = -ENOMEM; 391 goto out_free; 392 } 393 if (copy_from_user(cd, user_cd, sizeof(*cd))) { 394 ret = -EFAULT; 395 goto out_free; 396 } 397 scpcd_area->request.length = 0x0010; 398 scpcd_area->request.code = 0x0028; 399 scpcd_area->m = cd->m; 400 scpcd_area->fmt1 = cd->fmt; 401 scpcd_area->cssid = cd->chpid.cssid; 402 scpcd_area->first_chpid = cd->chpid.id; 403 scpcd_area->last_chpid = cd->chpid.id; 404 405 ccode = chsc(scpcd_area); 406 if (ccode != 0) { 407 ret = -EIO; 408 goto out_free; 409 } 410 if (scpcd_area->response.code != 0x0001) { 411 ret = -EIO; 412 CHSC_MSG(0, "scpcd: response code=%x\n", 413 scpcd_area->response.code); 414 goto out_free; 415 } 416 memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length); 417 if (copy_to_user(user_cd, cd, sizeof(*cd))) 418 ret = -EFAULT; 419 else 420 ret = 0; 421 out_free: 422 kfree(cd); 423 free_page((unsigned long)scpcd_area); 424 return ret; 425 } 426 427 static int chsc_ioctl_info_cu(void __user *user_cd) 428 { 429 struct chsc_cu_cd *cd; 430 int ret, ccode; 431 struct { 432 struct chsc_header request; 433 u32 : 2; 434 u32 m : 1; 435 u32 : 1; 436 u32 fmt1 : 4; 437 u32 cssid : 8; 438 u32 : 8; 439 u32 first_cun : 8; 440 u32 : 24; 441 u32 last_cun : 8; 442 u32 : 32; 443 struct chsc_header response; 444 u8 data[PAGE_SIZE - 20]; 445 } __attribute__ ((packed)) *scucd_area; 446 447 scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 448 if (!scucd_area) 449 return -ENOMEM; 450 cd = kzalloc(sizeof(*cd), GFP_KERNEL); 451 if (!cd) { 452 ret = -ENOMEM; 453 goto out_free; 454 } 455 if (copy_from_user(cd, user_cd, sizeof(*cd))) { 456 ret = -EFAULT; 457 goto out_free; 458 } 459 scucd_area->request.length = 0x0010; 460 scucd_area->request.code = 0x0028; 461 scucd_area->m = cd->m; 462 scucd_area->fmt1 = cd->fmt; 463 scucd_area->cssid = cd->cssid; 464 scucd_area->first_cun = cd->cun; 465 scucd_area->last_cun = cd->cun; 466 467 ccode = chsc(scucd_area); 468 if (ccode != 0) { 469 ret = -EIO; 470 goto out_free; 471 } 472 if (scucd_area->response.code != 0x0001) { 473 ret = -EIO; 474 CHSC_MSG(0, "scucd: response code=%x\n", 475 scucd_area->response.code); 476 goto out_free; 477 } 478 memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length); 479 if (copy_to_user(user_cd, cd, sizeof(*cd))) 480 ret = -EFAULT; 481 else 482 ret = 0; 483 out_free: 484 kfree(cd); 485 free_page((unsigned long)scucd_area); 486 return ret; 487 } 488 489 static int chsc_ioctl_info_sch_cu(void __user *user_cud) 490 { 491 struct chsc_sch_cud *cud; 492 int ret, ccode; 493 struct { 494 struct chsc_header request; 495 u32 : 2; 496 u32 m : 1; 497 u32 : 5; 498 u32 fmt1 : 4; 499 u32 : 2; 500 u32 ssid : 2; 501 u32 first_sch : 16; 502 u32 : 8; 503 u32 cssid : 8; 504 u32 last_sch : 16; 505 u32 : 32; 506 struct chsc_header response; 507 u8 data[PAGE_SIZE - 20]; 508 } __attribute__ ((packed)) *sscud_area; 509 510 sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 511 if (!sscud_area) 512 return -ENOMEM; 513 cud = kzalloc(sizeof(*cud), GFP_KERNEL); 514 if (!cud) { 515 ret = -ENOMEM; 516 goto out_free; 517 } 518 if (copy_from_user(cud, user_cud, sizeof(*cud))) { 519 ret = -EFAULT; 520 goto out_free; 521 } 522 sscud_area->request.length = 0x0010; 523 sscud_area->request.code = 0x0006; 524 sscud_area->m = cud->schid.m; 525 sscud_area->fmt1 = cud->fmt; 526 sscud_area->ssid = cud->schid.ssid; 527 sscud_area->first_sch = cud->schid.sch_no; 528 sscud_area->cssid = cud->schid.cssid; 529 sscud_area->last_sch = cud->schid.sch_no; 530 531 ccode = chsc(sscud_area); 532 if (ccode != 0) { 533 ret = -EIO; 534 goto out_free; 535 } 536 if (sscud_area->response.code != 0x0001) { 537 ret = -EIO; 538 CHSC_MSG(0, "sscud: response code=%x\n", 539 sscud_area->response.code); 540 goto out_free; 541 } 542 memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length); 543 if (copy_to_user(user_cud, cud, sizeof(*cud))) 544 ret = -EFAULT; 545 else 546 ret = 0; 547 out_free: 548 kfree(cud); 549 free_page((unsigned long)sscud_area); 550 return ret; 551 } 552 553 static int chsc_ioctl_conf_info(void __user *user_ci) 554 { 555 struct chsc_conf_info *ci; 556 int ret, ccode; 557 struct { 558 struct chsc_header request; 559 u32 : 2; 560 u32 m : 1; 561 u32 : 1; 562 u32 fmt1 : 4; 563 u32 cssid : 8; 564 u32 : 6; 565 u32 ssid : 2; 566 u32 : 8; 567 u64 : 64; 568 struct chsc_header response; 569 u8 data[PAGE_SIZE - 20]; 570 } __attribute__ ((packed)) *sci_area; 571 572 sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 573 if (!sci_area) 574 return -ENOMEM; 575 ci = kzalloc(sizeof(*ci), GFP_KERNEL); 576 if (!ci) { 577 ret = -ENOMEM; 578 goto out_free; 579 } 580 if (copy_from_user(ci, user_ci, sizeof(*ci))) { 581 ret = -EFAULT; 582 goto out_free; 583 } 584 sci_area->request.length = 0x0010; 585 sci_area->request.code = 0x0012; 586 sci_area->m = ci->id.m; 587 sci_area->fmt1 = ci->fmt; 588 sci_area->cssid = ci->id.cssid; 589 sci_area->ssid = ci->id.ssid; 590 591 ccode = chsc(sci_area); 592 if (ccode != 0) { 593 ret = -EIO; 594 goto out_free; 595 } 596 if (sci_area->response.code != 0x0001) { 597 ret = -EIO; 598 CHSC_MSG(0, "sci: response code=%x\n", 599 sci_area->response.code); 600 goto out_free; 601 } 602 memcpy(&ci->scid, &sci_area->response, sci_area->response.length); 603 if (copy_to_user(user_ci, ci, sizeof(*ci))) 604 ret = -EFAULT; 605 else 606 ret = 0; 607 out_free: 608 kfree(ci); 609 free_page((unsigned long)sci_area); 610 return ret; 611 } 612 613 static int chsc_ioctl_conf_comp_list(void __user *user_ccl) 614 { 615 struct chsc_comp_list *ccl; 616 int ret, ccode; 617 struct { 618 struct chsc_header request; 619 u32 ctype : 8; 620 u32 : 4; 621 u32 fmt : 4; 622 u32 : 16; 623 u64 : 64; 624 u32 list_parm[2]; 625 u64 : 64; 626 struct chsc_header response; 627 u8 data[PAGE_SIZE - 36]; 628 } __attribute__ ((packed)) *sccl_area; 629 struct { 630 u32 m : 1; 631 u32 : 31; 632 u32 cssid : 8; 633 u32 : 16; 634 u32 chpid : 8; 635 } __attribute__ ((packed)) *chpid_parm; 636 struct { 637 u32 f_cssid : 8; 638 u32 l_cssid : 8; 639 u32 : 16; 640 u32 res; 641 } __attribute__ ((packed)) *cssids_parm; 642 643 sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 644 if (!sccl_area) 645 return -ENOMEM; 646 ccl = kzalloc(sizeof(*ccl), GFP_KERNEL); 647 if (!ccl) { 648 ret = -ENOMEM; 649 goto out_free; 650 } 651 if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) { 652 ret = -EFAULT; 653 goto out_free; 654 } 655 sccl_area->request.length = 0x0020; 656 sccl_area->request.code = 0x0030; 657 sccl_area->fmt = ccl->req.fmt; 658 sccl_area->ctype = ccl->req.ctype; 659 switch (sccl_area->ctype) { 660 case CCL_CU_ON_CHP: 661 case CCL_IOP_CHP: 662 chpid_parm = (void *)&sccl_area->list_parm; 663 chpid_parm->m = ccl->req.chpid.m; 664 chpid_parm->cssid = ccl->req.chpid.chp.cssid; 665 chpid_parm->chpid = ccl->req.chpid.chp.id; 666 break; 667 case CCL_CSS_IMG: 668 case CCL_CSS_IMG_CONF_CHAR: 669 cssids_parm = (void *)&sccl_area->list_parm; 670 cssids_parm->f_cssid = ccl->req.cssids.f_cssid; 671 cssids_parm->l_cssid = ccl->req.cssids.l_cssid; 672 break; 673 } 674 ccode = chsc(sccl_area); 675 if (ccode != 0) { 676 ret = -EIO; 677 goto out_free; 678 } 679 if (sccl_area->response.code != 0x0001) { 680 ret = -EIO; 681 CHSC_MSG(0, "sccl: response code=%x\n", 682 sccl_area->response.code); 683 goto out_free; 684 } 685 memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length); 686 if (copy_to_user(user_ccl, ccl, sizeof(*ccl))) 687 ret = -EFAULT; 688 else 689 ret = 0; 690 out_free: 691 kfree(ccl); 692 free_page((unsigned long)sccl_area); 693 return ret; 694 } 695 696 static int chsc_ioctl_chpd(void __user *user_chpd) 697 { 698 struct chsc_scpd *scpd_area; 699 struct chsc_cpd_info *chpd; 700 int ret; 701 702 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); 703 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 704 if (!scpd_area || !chpd) { 705 ret = -ENOMEM; 706 goto out_free; 707 } 708 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { 709 ret = -EFAULT; 710 goto out_free; 711 } 712 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, 713 chpd->rfmt, chpd->c, chpd->m, 714 scpd_area); 715 if (ret) 716 goto out_free; 717 memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length); 718 if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) 719 ret = -EFAULT; 720 out_free: 721 kfree(chpd); 722 free_page((unsigned long)scpd_area); 723 return ret; 724 } 725 726 static int chsc_ioctl_dcal(void __user *user_dcal) 727 { 728 struct chsc_dcal *dcal; 729 int ret, ccode; 730 struct { 731 struct chsc_header request; 732 u32 atype : 8; 733 u32 : 4; 734 u32 fmt : 4; 735 u32 : 16; 736 u32 res0[2]; 737 u32 list_parm[2]; 738 u32 res1[2]; 739 struct chsc_header response; 740 u8 data[PAGE_SIZE - 36]; 741 } __attribute__ ((packed)) *sdcal_area; 742 743 sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 744 if (!sdcal_area) 745 return -ENOMEM; 746 dcal = kzalloc(sizeof(*dcal), GFP_KERNEL); 747 if (!dcal) { 748 ret = -ENOMEM; 749 goto out_free; 750 } 751 if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) { 752 ret = -EFAULT; 753 goto out_free; 754 } 755 sdcal_area->request.length = 0x0020; 756 sdcal_area->request.code = 0x0034; 757 sdcal_area->atype = dcal->req.atype; 758 sdcal_area->fmt = dcal->req.fmt; 759 memcpy(&sdcal_area->list_parm, &dcal->req.list_parm, 760 sizeof(sdcal_area->list_parm)); 761 762 ccode = chsc(sdcal_area); 763 if (ccode != 0) { 764 ret = -EIO; 765 goto out_free; 766 } 767 if (sdcal_area->response.code != 0x0001) { 768 ret = -EIO; 769 CHSC_MSG(0, "sdcal: response code=%x\n", 770 sdcal_area->response.code); 771 goto out_free; 772 } 773 memcpy(&dcal->sdcal, &sdcal_area->response, 774 sdcal_area->response.length); 775 if (copy_to_user(user_dcal, dcal, sizeof(*dcal))) 776 ret = -EFAULT; 777 else 778 ret = 0; 779 out_free: 780 kfree(dcal); 781 free_page((unsigned long)sdcal_area); 782 return ret; 783 } 784 785 static long chsc_ioctl(struct file *filp, unsigned int cmd, 786 unsigned long arg) 787 { 788 void __user *argp; 789 790 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); 791 if (is_compat_task()) 792 argp = compat_ptr(arg); 793 else 794 argp = (void __user *)arg; 795 switch (cmd) { 796 case CHSC_START: 797 return chsc_ioctl_start(argp); 798 case CHSC_INFO_CHANNEL_PATH: 799 return chsc_ioctl_info_channel_path(argp); 800 case CHSC_INFO_CU: 801 return chsc_ioctl_info_cu(argp); 802 case CHSC_INFO_SCH_CU: 803 return chsc_ioctl_info_sch_cu(argp); 804 case CHSC_INFO_CI: 805 return chsc_ioctl_conf_info(argp); 806 case CHSC_INFO_CCL: 807 return chsc_ioctl_conf_comp_list(argp); 808 case CHSC_INFO_CPD: 809 return chsc_ioctl_chpd(argp); 810 case CHSC_INFO_DCAL: 811 return chsc_ioctl_dcal(argp); 812 default: /* unknown ioctl number */ 813 return -ENOIOCTLCMD; 814 } 815 } 816 817 static const struct file_operations chsc_fops = { 818 .owner = THIS_MODULE, 819 .open = nonseekable_open, 820 .unlocked_ioctl = chsc_ioctl, 821 .compat_ioctl = chsc_ioctl, 822 .llseek = no_llseek, 823 }; 824 825 static struct miscdevice chsc_misc_device = { 826 .minor = MISC_DYNAMIC_MINOR, 827 .name = "chsc", 828 .fops = &chsc_fops, 829 }; 830 831 static int __init chsc_misc_init(void) 832 { 833 return misc_register(&chsc_misc_device); 834 } 835 836 static void chsc_misc_cleanup(void) 837 { 838 misc_deregister(&chsc_misc_device); 839 } 840 841 static int __init chsc_sch_init(void) 842 { 843 int ret; 844 845 ret = chsc_init_dbfs(); 846 if (ret) 847 return ret; 848 isc_register(CHSC_SCH_ISC); 849 ret = chsc_init_sch_driver(); 850 if (ret) 851 goto out_dbf; 852 ret = chsc_misc_init(); 853 if (ret) 854 goto out_driver; 855 return ret; 856 out_driver: 857 chsc_cleanup_sch_driver(); 858 out_dbf: 859 isc_unregister(CHSC_SCH_ISC); 860 chsc_remove_dbfs(); 861 return ret; 862 } 863 864 static void __exit chsc_sch_exit(void) 865 { 866 chsc_misc_cleanup(); 867 chsc_cleanup_sch_driver(); 868 isc_unregister(CHSC_SCH_ISC); 869 chsc_remove_dbfs(); 870 } 871 872 module_init(chsc_sch_init); 873 module_exit(chsc_sch_exit); 874