1 /* 2 * Driver for s390 chsc subchannels 3 * 4 * Copyright IBM Corp. 2008, 2009 5 * 6 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 7 * 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/device.h> 12 #include <linux/module.h> 13 #include <linux/uaccess.h> 14 #include <linux/miscdevice.h> 15 16 #include <asm/compat.h> 17 #include <asm/cio.h> 18 #include <asm/chsc.h> 19 #include <asm/isc.h> 20 21 #include "cio.h" 22 #include "cio_debug.h" 23 #include "css.h" 24 #include "chsc_sch.h" 25 #include "ioasm.h" 26 27 static debug_info_t *chsc_debug_msg_id; 28 static debug_info_t *chsc_debug_log_id; 29 30 #define CHSC_MSG(imp, args...) do { \ 31 debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \ 32 } while (0) 33 34 #define CHSC_LOG(imp, txt) do { \ 35 debug_text_event(chsc_debug_log_id, imp , txt); \ 36 } while (0) 37 38 static void CHSC_LOG_HEX(int level, void *data, int length) 39 { 40 while (length > 0) { 41 debug_event(chsc_debug_log_id, level, data, length); 42 length -= chsc_debug_log_id->buf_size; 43 data += chsc_debug_log_id->buf_size; 44 } 45 } 46 47 MODULE_AUTHOR("IBM Corporation"); 48 MODULE_DESCRIPTION("driver for s390 chsc subchannels"); 49 MODULE_LICENSE("GPL"); 50 51 static void chsc_subchannel_irq(struct subchannel *sch) 52 { 53 struct chsc_private *private = sch->private; 54 struct chsc_request *request = private->request; 55 struct irb *irb = (struct irb *)&S390_lowcore.irb; 56 57 CHSC_LOG(4, "irb"); 58 CHSC_LOG_HEX(4, irb, sizeof(*irb)); 59 /* Copy irb to provided request and set done. */ 60 if (!request) { 61 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", 62 sch->schid.ssid, sch->schid.sch_no); 63 return; 64 } 65 private->request = NULL; 66 memcpy(&request->irb, irb, sizeof(*irb)); 67 cio_update_schib(sch); 68 complete(&request->completion); 69 put_device(&sch->dev); 70 } 71 72 static int chsc_subchannel_probe(struct subchannel *sch) 73 { 74 struct chsc_private *private; 75 int ret; 76 77 CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n", 78 sch->schid.ssid, sch->schid.sch_no); 79 sch->isc = CHSC_SCH_ISC; 80 private = kzalloc(sizeof(*private), GFP_KERNEL); 81 if (!private) 82 return -ENOMEM; 83 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); 84 if (ret) { 85 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n", 86 sch->schid.ssid, sch->schid.sch_no, ret); 87 kfree(private); 88 } else { 89 sch->private = private; 90 if (dev_get_uevent_suppress(&sch->dev)) { 91 dev_set_uevent_suppress(&sch->dev, 0); 92 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 93 } 94 } 95 return ret; 96 } 97 98 static int chsc_subchannel_remove(struct subchannel *sch) 99 { 100 struct chsc_private *private; 101 102 cio_disable_subchannel(sch); 103 private = sch->private; 104 sch->private = NULL; 105 if (private->request) { 106 complete(&private->request->completion); 107 put_device(&sch->dev); 108 } 109 kfree(private); 110 return 0; 111 } 112 113 static void chsc_subchannel_shutdown(struct subchannel *sch) 114 { 115 cio_disable_subchannel(sch); 116 } 117 118 static int chsc_subchannel_prepare(struct subchannel *sch) 119 { 120 int cc; 121 struct schib schib; 122 /* 123 * Don't allow suspend while the subchannel is not idle 124 * since we don't have a way to clear the subchannel and 125 * cannot disable it with a request running. 126 */ 127 cc = stsch_err(sch->schid, &schib); 128 if (!cc && scsw_stctl(&schib.scsw)) 129 return -EAGAIN; 130 return 0; 131 } 132 133 static int chsc_subchannel_freeze(struct subchannel *sch) 134 { 135 return cio_disable_subchannel(sch); 136 } 137 138 static int chsc_subchannel_restore(struct subchannel *sch) 139 { 140 return cio_enable_subchannel(sch, (u32)(unsigned long)sch); 141 } 142 143 static struct css_device_id chsc_subchannel_ids[] = { 144 { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, }, 145 { /* end of list */ }, 146 }; 147 MODULE_DEVICE_TABLE(css, chsc_subchannel_ids); 148 149 static struct css_driver chsc_subchannel_driver = { 150 .owner = THIS_MODULE, 151 .subchannel_type = chsc_subchannel_ids, 152 .irq = chsc_subchannel_irq, 153 .probe = chsc_subchannel_probe, 154 .remove = chsc_subchannel_remove, 155 .shutdown = chsc_subchannel_shutdown, 156 .prepare = chsc_subchannel_prepare, 157 .freeze = chsc_subchannel_freeze, 158 .thaw = chsc_subchannel_restore, 159 .restore = chsc_subchannel_restore, 160 .name = "chsc_subchannel", 161 }; 162 163 static int __init chsc_init_dbfs(void) 164 { 165 chsc_debug_msg_id = debug_register("chsc_msg", 16, 1, 166 16 * sizeof(long)); 167 if (!chsc_debug_msg_id) 168 goto out; 169 debug_register_view(chsc_debug_msg_id, &debug_sprintf_view); 170 debug_set_level(chsc_debug_msg_id, 2); 171 chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16); 172 if (!chsc_debug_log_id) 173 goto out; 174 debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view); 175 debug_set_level(chsc_debug_log_id, 2); 176 return 0; 177 out: 178 if (chsc_debug_msg_id) 179 debug_unregister(chsc_debug_msg_id); 180 return -ENOMEM; 181 } 182 183 static void chsc_remove_dbfs(void) 184 { 185 debug_unregister(chsc_debug_log_id); 186 debug_unregister(chsc_debug_msg_id); 187 } 188 189 static int __init chsc_init_sch_driver(void) 190 { 191 return css_driver_register(&chsc_subchannel_driver); 192 } 193 194 static void chsc_cleanup_sch_driver(void) 195 { 196 css_driver_unregister(&chsc_subchannel_driver); 197 } 198 199 static DEFINE_SPINLOCK(chsc_lock); 200 201 static int chsc_subchannel_match_next_free(struct device *dev, void *data) 202 { 203 struct subchannel *sch = to_subchannel(dev); 204 205 return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw); 206 } 207 208 static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch) 209 { 210 struct device *dev; 211 212 dev = driver_find_device(&chsc_subchannel_driver.drv, 213 sch ? &sch->dev : NULL, NULL, 214 chsc_subchannel_match_next_free); 215 return dev ? to_subchannel(dev) : NULL; 216 } 217 218 /** 219 * chsc_async() - try to start a chsc request asynchronously 220 * @chsc_area: request to be started 221 * @request: request structure to associate 222 * 223 * Tries to start a chsc request on one of the existing chsc subchannels. 224 * Returns: 225 * %0 if the request was performed synchronously 226 * %-EINPROGRESS if the request was successfully started 227 * %-EBUSY if all chsc subchannels are busy 228 * %-ENODEV if no chsc subchannels are available 229 * Context: 230 * interrupts disabled, chsc_lock held 231 */ 232 static int chsc_async(struct chsc_async_area *chsc_area, 233 struct chsc_request *request) 234 { 235 int cc; 236 struct chsc_private *private; 237 struct subchannel *sch = NULL; 238 int ret = -ENODEV; 239 char dbf[10]; 240 241 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4; 242 while ((sch = chsc_get_next_subchannel(sch))) { 243 spin_lock(sch->lock); 244 private = sch->private; 245 if (private->request) { 246 spin_unlock(sch->lock); 247 ret = -EBUSY; 248 continue; 249 } 250 chsc_area->header.sid = sch->schid; 251 CHSC_LOG(2, "schid"); 252 CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid)); 253 cc = chsc(chsc_area); 254 sprintf(dbf, "cc:%d", cc); 255 CHSC_LOG(2, dbf); 256 switch (cc) { 257 case 0: 258 ret = 0; 259 break; 260 case 1: 261 sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC; 262 ret = -EINPROGRESS; 263 private->request = request; 264 break; 265 case 2: 266 ret = -EBUSY; 267 break; 268 default: 269 ret = -ENODEV; 270 } 271 spin_unlock(sch->lock); 272 CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n", 273 sch->schid.ssid, sch->schid.sch_no, cc); 274 if (ret == -EINPROGRESS) 275 return -EINPROGRESS; 276 put_device(&sch->dev); 277 if (ret == 0) 278 return 0; 279 } 280 return ret; 281 } 282 283 static void chsc_log_command(struct chsc_async_area *chsc_area) 284 { 285 char dbf[10]; 286 287 sprintf(dbf, "CHSC:%x", chsc_area->header.code); 288 CHSC_LOG(0, dbf); 289 CHSC_LOG_HEX(0, chsc_area, 32); 290 } 291 292 static int chsc_examine_irb(struct chsc_request *request) 293 { 294 int backed_up; 295 296 if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND)) 297 return -EIO; 298 backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK; 299 request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK; 300 if (scsw_cstat(&request->irb.scsw) == 0) 301 return 0; 302 if (!backed_up) 303 return 0; 304 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK) 305 return -EIO; 306 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK) 307 return -EPERM; 308 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK) 309 return -EAGAIN; 310 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK) 311 return -EAGAIN; 312 return -EIO; 313 } 314 315 static int chsc_ioctl_start(void __user *user_area) 316 { 317 struct chsc_request *request; 318 struct chsc_async_area *chsc_area; 319 int ret; 320 char dbf[10]; 321 322 if (!css_general_characteristics.dynio) 323 /* It makes no sense to try. */ 324 return -EOPNOTSUPP; 325 chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); 326 if (!chsc_area) 327 return -ENOMEM; 328 request = kzalloc(sizeof(*request), GFP_KERNEL); 329 if (!request) { 330 ret = -ENOMEM; 331 goto out_free; 332 } 333 init_completion(&request->completion); 334 if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { 335 ret = -EFAULT; 336 goto out_free; 337 } 338 chsc_log_command(chsc_area); 339 spin_lock_irq(&chsc_lock); 340 ret = chsc_async(chsc_area, request); 341 spin_unlock_irq(&chsc_lock); 342 if (ret == -EINPROGRESS) { 343 wait_for_completion(&request->completion); 344 ret = chsc_examine_irb(request); 345 } 346 /* copy area back to user */ 347 if (!ret) 348 if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) 349 ret = -EFAULT; 350 out_free: 351 sprintf(dbf, "ret:%d", ret); 352 CHSC_LOG(0, dbf); 353 kfree(request); 354 free_page((unsigned long)chsc_area); 355 return ret; 356 } 357 358 static int chsc_ioctl_info_channel_path(void __user *user_cd) 359 { 360 struct chsc_chp_cd *cd; 361 int ret, ccode; 362 struct { 363 struct chsc_header request; 364 u32 : 2; 365 u32 m : 1; 366 u32 : 1; 367 u32 fmt1 : 4; 368 u32 cssid : 8; 369 u32 : 8; 370 u32 first_chpid : 8; 371 u32 : 24; 372 u32 last_chpid : 8; 373 u32 : 32; 374 struct chsc_header response; 375 u8 data[PAGE_SIZE - 20]; 376 } __attribute__ ((packed)) *scpcd_area; 377 378 scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 379 if (!scpcd_area) 380 return -ENOMEM; 381 cd = kzalloc(sizeof(*cd), GFP_KERNEL); 382 if (!cd) { 383 ret = -ENOMEM; 384 goto out_free; 385 } 386 if (copy_from_user(cd, user_cd, sizeof(*cd))) { 387 ret = -EFAULT; 388 goto out_free; 389 } 390 scpcd_area->request.length = 0x0010; 391 scpcd_area->request.code = 0x0028; 392 scpcd_area->m = cd->m; 393 scpcd_area->fmt1 = cd->fmt; 394 scpcd_area->cssid = cd->chpid.cssid; 395 scpcd_area->first_chpid = cd->chpid.id; 396 scpcd_area->last_chpid = cd->chpid.id; 397 398 ccode = chsc(scpcd_area); 399 if (ccode != 0) { 400 ret = -EIO; 401 goto out_free; 402 } 403 if (scpcd_area->response.code != 0x0001) { 404 ret = -EIO; 405 CHSC_MSG(0, "scpcd: response code=%x\n", 406 scpcd_area->response.code); 407 goto out_free; 408 } 409 memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length); 410 if (copy_to_user(user_cd, cd, sizeof(*cd))) 411 ret = -EFAULT; 412 else 413 ret = 0; 414 out_free: 415 kfree(cd); 416 free_page((unsigned long)scpcd_area); 417 return ret; 418 } 419 420 static int chsc_ioctl_info_cu(void __user *user_cd) 421 { 422 struct chsc_cu_cd *cd; 423 int ret, ccode; 424 struct { 425 struct chsc_header request; 426 u32 : 2; 427 u32 m : 1; 428 u32 : 1; 429 u32 fmt1 : 4; 430 u32 cssid : 8; 431 u32 : 8; 432 u32 first_cun : 8; 433 u32 : 24; 434 u32 last_cun : 8; 435 u32 : 32; 436 struct chsc_header response; 437 u8 data[PAGE_SIZE - 20]; 438 } __attribute__ ((packed)) *scucd_area; 439 440 scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 441 if (!scucd_area) 442 return -ENOMEM; 443 cd = kzalloc(sizeof(*cd), GFP_KERNEL); 444 if (!cd) { 445 ret = -ENOMEM; 446 goto out_free; 447 } 448 if (copy_from_user(cd, user_cd, sizeof(*cd))) { 449 ret = -EFAULT; 450 goto out_free; 451 } 452 scucd_area->request.length = 0x0010; 453 scucd_area->request.code = 0x0028; 454 scucd_area->m = cd->m; 455 scucd_area->fmt1 = cd->fmt; 456 scucd_area->cssid = cd->cssid; 457 scucd_area->first_cun = cd->cun; 458 scucd_area->last_cun = cd->cun; 459 460 ccode = chsc(scucd_area); 461 if (ccode != 0) { 462 ret = -EIO; 463 goto out_free; 464 } 465 if (scucd_area->response.code != 0x0001) { 466 ret = -EIO; 467 CHSC_MSG(0, "scucd: response code=%x\n", 468 scucd_area->response.code); 469 goto out_free; 470 } 471 memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length); 472 if (copy_to_user(user_cd, cd, sizeof(*cd))) 473 ret = -EFAULT; 474 else 475 ret = 0; 476 out_free: 477 kfree(cd); 478 free_page((unsigned long)scucd_area); 479 return ret; 480 } 481 482 static int chsc_ioctl_info_sch_cu(void __user *user_cud) 483 { 484 struct chsc_sch_cud *cud; 485 int ret, ccode; 486 struct { 487 struct chsc_header request; 488 u32 : 2; 489 u32 m : 1; 490 u32 : 5; 491 u32 fmt1 : 4; 492 u32 : 2; 493 u32 ssid : 2; 494 u32 first_sch : 16; 495 u32 : 8; 496 u32 cssid : 8; 497 u32 last_sch : 16; 498 u32 : 32; 499 struct chsc_header response; 500 u8 data[PAGE_SIZE - 20]; 501 } __attribute__ ((packed)) *sscud_area; 502 503 sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 504 if (!sscud_area) 505 return -ENOMEM; 506 cud = kzalloc(sizeof(*cud), GFP_KERNEL); 507 if (!cud) { 508 ret = -ENOMEM; 509 goto out_free; 510 } 511 if (copy_from_user(cud, user_cud, sizeof(*cud))) { 512 ret = -EFAULT; 513 goto out_free; 514 } 515 sscud_area->request.length = 0x0010; 516 sscud_area->request.code = 0x0006; 517 sscud_area->m = cud->schid.m; 518 sscud_area->fmt1 = cud->fmt; 519 sscud_area->ssid = cud->schid.ssid; 520 sscud_area->first_sch = cud->schid.sch_no; 521 sscud_area->cssid = cud->schid.cssid; 522 sscud_area->last_sch = cud->schid.sch_no; 523 524 ccode = chsc(sscud_area); 525 if (ccode != 0) { 526 ret = -EIO; 527 goto out_free; 528 } 529 if (sscud_area->response.code != 0x0001) { 530 ret = -EIO; 531 CHSC_MSG(0, "sscud: response code=%x\n", 532 sscud_area->response.code); 533 goto out_free; 534 } 535 memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length); 536 if (copy_to_user(user_cud, cud, sizeof(*cud))) 537 ret = -EFAULT; 538 else 539 ret = 0; 540 out_free: 541 kfree(cud); 542 free_page((unsigned long)sscud_area); 543 return ret; 544 } 545 546 static int chsc_ioctl_conf_info(void __user *user_ci) 547 { 548 struct chsc_conf_info *ci; 549 int ret, ccode; 550 struct { 551 struct chsc_header request; 552 u32 : 2; 553 u32 m : 1; 554 u32 : 1; 555 u32 fmt1 : 4; 556 u32 cssid : 8; 557 u32 : 6; 558 u32 ssid : 2; 559 u32 : 8; 560 u64 : 64; 561 struct chsc_header response; 562 u8 data[PAGE_SIZE - 20]; 563 } __attribute__ ((packed)) *sci_area; 564 565 sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 566 if (!sci_area) 567 return -ENOMEM; 568 ci = kzalloc(sizeof(*ci), GFP_KERNEL); 569 if (!ci) { 570 ret = -ENOMEM; 571 goto out_free; 572 } 573 if (copy_from_user(ci, user_ci, sizeof(*ci))) { 574 ret = -EFAULT; 575 goto out_free; 576 } 577 sci_area->request.length = 0x0010; 578 sci_area->request.code = 0x0012; 579 sci_area->m = ci->id.m; 580 sci_area->fmt1 = ci->fmt; 581 sci_area->cssid = ci->id.cssid; 582 sci_area->ssid = ci->id.ssid; 583 584 ccode = chsc(sci_area); 585 if (ccode != 0) { 586 ret = -EIO; 587 goto out_free; 588 } 589 if (sci_area->response.code != 0x0001) { 590 ret = -EIO; 591 CHSC_MSG(0, "sci: response code=%x\n", 592 sci_area->response.code); 593 goto out_free; 594 } 595 memcpy(&ci->scid, &sci_area->response, sci_area->response.length); 596 if (copy_to_user(user_ci, ci, sizeof(*ci))) 597 ret = -EFAULT; 598 else 599 ret = 0; 600 out_free: 601 kfree(ci); 602 free_page((unsigned long)sci_area); 603 return ret; 604 } 605 606 static int chsc_ioctl_conf_comp_list(void __user *user_ccl) 607 { 608 struct chsc_comp_list *ccl; 609 int ret, ccode; 610 struct { 611 struct chsc_header request; 612 u32 ctype : 8; 613 u32 : 4; 614 u32 fmt : 4; 615 u32 : 16; 616 u64 : 64; 617 u32 list_parm[2]; 618 u64 : 64; 619 struct chsc_header response; 620 u8 data[PAGE_SIZE - 36]; 621 } __attribute__ ((packed)) *sccl_area; 622 struct { 623 u32 m : 1; 624 u32 : 31; 625 u32 cssid : 8; 626 u32 : 16; 627 u32 chpid : 8; 628 } __attribute__ ((packed)) *chpid_parm; 629 struct { 630 u32 f_cssid : 8; 631 u32 l_cssid : 8; 632 u32 : 16; 633 u32 res; 634 } __attribute__ ((packed)) *cssids_parm; 635 636 sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 637 if (!sccl_area) 638 return -ENOMEM; 639 ccl = kzalloc(sizeof(*ccl), GFP_KERNEL); 640 if (!ccl) { 641 ret = -ENOMEM; 642 goto out_free; 643 } 644 if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) { 645 ret = -EFAULT; 646 goto out_free; 647 } 648 sccl_area->request.length = 0x0020; 649 sccl_area->request.code = 0x0030; 650 sccl_area->fmt = ccl->req.fmt; 651 sccl_area->ctype = ccl->req.ctype; 652 switch (sccl_area->ctype) { 653 case CCL_CU_ON_CHP: 654 case CCL_IOP_CHP: 655 chpid_parm = (void *)&sccl_area->list_parm; 656 chpid_parm->m = ccl->req.chpid.m; 657 chpid_parm->cssid = ccl->req.chpid.chp.cssid; 658 chpid_parm->chpid = ccl->req.chpid.chp.id; 659 break; 660 case CCL_CSS_IMG: 661 case CCL_CSS_IMG_CONF_CHAR: 662 cssids_parm = (void *)&sccl_area->list_parm; 663 cssids_parm->f_cssid = ccl->req.cssids.f_cssid; 664 cssids_parm->l_cssid = ccl->req.cssids.l_cssid; 665 break; 666 } 667 ccode = chsc(sccl_area); 668 if (ccode != 0) { 669 ret = -EIO; 670 goto out_free; 671 } 672 if (sccl_area->response.code != 0x0001) { 673 ret = -EIO; 674 CHSC_MSG(0, "sccl: response code=%x\n", 675 sccl_area->response.code); 676 goto out_free; 677 } 678 memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length); 679 if (copy_to_user(user_ccl, ccl, sizeof(*ccl))) 680 ret = -EFAULT; 681 else 682 ret = 0; 683 out_free: 684 kfree(ccl); 685 free_page((unsigned long)sccl_area); 686 return ret; 687 } 688 689 static int chsc_ioctl_chpd(void __user *user_chpd) 690 { 691 struct chsc_scpd *scpd_area; 692 struct chsc_cpd_info *chpd; 693 int ret; 694 695 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); 696 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 697 if (!scpd_area || !chpd) { 698 ret = -ENOMEM; 699 goto out_free; 700 } 701 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { 702 ret = -EFAULT; 703 goto out_free; 704 } 705 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, 706 chpd->rfmt, chpd->c, chpd->m, 707 scpd_area); 708 if (ret) 709 goto out_free; 710 memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length); 711 if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) 712 ret = -EFAULT; 713 out_free: 714 kfree(chpd); 715 free_page((unsigned long)scpd_area); 716 return ret; 717 } 718 719 static int chsc_ioctl_dcal(void __user *user_dcal) 720 { 721 struct chsc_dcal *dcal; 722 int ret, ccode; 723 struct { 724 struct chsc_header request; 725 u32 atype : 8; 726 u32 : 4; 727 u32 fmt : 4; 728 u32 : 16; 729 u32 res0[2]; 730 u32 list_parm[2]; 731 u32 res1[2]; 732 struct chsc_header response; 733 u8 data[PAGE_SIZE - 36]; 734 } __attribute__ ((packed)) *sdcal_area; 735 736 sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 737 if (!sdcal_area) 738 return -ENOMEM; 739 dcal = kzalloc(sizeof(*dcal), GFP_KERNEL); 740 if (!dcal) { 741 ret = -ENOMEM; 742 goto out_free; 743 } 744 if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) { 745 ret = -EFAULT; 746 goto out_free; 747 } 748 sdcal_area->request.length = 0x0020; 749 sdcal_area->request.code = 0x0034; 750 sdcal_area->atype = dcal->req.atype; 751 sdcal_area->fmt = dcal->req.fmt; 752 memcpy(&sdcal_area->list_parm, &dcal->req.list_parm, 753 sizeof(sdcal_area->list_parm)); 754 755 ccode = chsc(sdcal_area); 756 if (ccode != 0) { 757 ret = -EIO; 758 goto out_free; 759 } 760 if (sdcal_area->response.code != 0x0001) { 761 ret = -EIO; 762 CHSC_MSG(0, "sdcal: response code=%x\n", 763 sdcal_area->response.code); 764 goto out_free; 765 } 766 memcpy(&dcal->sdcal, &sdcal_area->response, 767 sdcal_area->response.length); 768 if (copy_to_user(user_dcal, dcal, sizeof(*dcal))) 769 ret = -EFAULT; 770 else 771 ret = 0; 772 out_free: 773 kfree(dcal); 774 free_page((unsigned long)sdcal_area); 775 return ret; 776 } 777 778 static long chsc_ioctl(struct file *filp, unsigned int cmd, 779 unsigned long arg) 780 { 781 void __user *argp; 782 783 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); 784 if (is_compat_task()) 785 argp = compat_ptr(arg); 786 else 787 argp = (void __user *)arg; 788 switch (cmd) { 789 case CHSC_START: 790 return chsc_ioctl_start(argp); 791 case CHSC_INFO_CHANNEL_PATH: 792 return chsc_ioctl_info_channel_path(argp); 793 case CHSC_INFO_CU: 794 return chsc_ioctl_info_cu(argp); 795 case CHSC_INFO_SCH_CU: 796 return chsc_ioctl_info_sch_cu(argp); 797 case CHSC_INFO_CI: 798 return chsc_ioctl_conf_info(argp); 799 case CHSC_INFO_CCL: 800 return chsc_ioctl_conf_comp_list(argp); 801 case CHSC_INFO_CPD: 802 return chsc_ioctl_chpd(argp); 803 case CHSC_INFO_DCAL: 804 return chsc_ioctl_dcal(argp); 805 default: /* unknown ioctl number */ 806 return -ENOIOCTLCMD; 807 } 808 } 809 810 static const struct file_operations chsc_fops = { 811 .owner = THIS_MODULE, 812 .open = nonseekable_open, 813 .unlocked_ioctl = chsc_ioctl, 814 .compat_ioctl = chsc_ioctl, 815 .llseek = no_llseek, 816 }; 817 818 static struct miscdevice chsc_misc_device = { 819 .minor = MISC_DYNAMIC_MINOR, 820 .name = "chsc", 821 .fops = &chsc_fops, 822 }; 823 824 static int __init chsc_misc_init(void) 825 { 826 return misc_register(&chsc_misc_device); 827 } 828 829 static void chsc_misc_cleanup(void) 830 { 831 misc_deregister(&chsc_misc_device); 832 } 833 834 static int __init chsc_sch_init(void) 835 { 836 int ret; 837 838 ret = chsc_init_dbfs(); 839 if (ret) 840 return ret; 841 isc_register(CHSC_SCH_ISC); 842 ret = chsc_init_sch_driver(); 843 if (ret) 844 goto out_dbf; 845 ret = chsc_misc_init(); 846 if (ret) 847 goto out_driver; 848 return ret; 849 out_driver: 850 chsc_cleanup_sch_driver(); 851 out_dbf: 852 isc_unregister(CHSC_SCH_ISC); 853 chsc_remove_dbfs(); 854 return ret; 855 } 856 857 static void __exit chsc_sch_exit(void) 858 { 859 chsc_misc_cleanup(); 860 chsc_cleanup_sch_driver(); 861 isc_unregister(CHSC_SCH_ISC); 862 chsc_remove_dbfs(); 863 } 864 865 module_init(chsc_sch_init); 866 module_exit(chsc_sch_exit); 867