1 /* 2 * IUCV base infrastructure. 3 * 4 * Copyright IBM Corp. 2001, 2009 5 * 6 * Author(s): 7 * Original source: 8 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 9 * Xenia Tkatschow (xenia@us.ibm.com) 10 * 2Gb awareness and general cleanup: 11 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) 12 * Rewritten for af_iucv: 13 * Martin Schwidefsky <schwidefsky@de.ibm.com> 14 * PM functions: 15 * Ursula Braun (ursula.braun@de.ibm.com) 16 * 17 * Documentation used: 18 * The original source 19 * CP Programming Service, IBM document # SC24-5760 20 * 21 * This program is free software; you can redistribute it and/or modify 22 * it under the terms of the GNU General Public License as published by 23 * the Free Software Foundation; either version 2, or (at your option) 24 * any later version. 25 * 26 * This program is distributed in the hope that it will be useful, 27 * but WITHOUT ANY WARRANTY; without even the implied warranty of 28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 29 * GNU General Public License for more details. 30 * 31 * You should have received a copy of the GNU General Public License 32 * along with this program; if not, write to the Free Software 33 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 34 */ 35 36 #define KMSG_COMPONENT "iucv" 37 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 38 39 #include <linux/kernel_stat.h> 40 #include <linux/module.h> 41 #include <linux/moduleparam.h> 42 #include <linux/spinlock.h> 43 #include <linux/kernel.h> 44 #include <linux/slab.h> 45 #include <linux/init.h> 46 #include <linux/interrupt.h> 47 #include <linux/list.h> 48 #include <linux/errno.h> 49 #include <linux/err.h> 50 #include <linux/device.h> 51 #include <linux/cpu.h> 52 #include <linux/reboot.h> 53 #include <net/iucv/iucv.h> 54 #include <linux/atomic.h> 55 #include <asm/ebcdic.h> 56 #include <asm/io.h> 57 #include <asm/irq.h> 58 #include <asm/smp.h> 59 60 /* 61 * FLAGS: 62 * All flags are defined in the field IPFLAGS1 of each function 63 * and can be found in CP Programming Services. 64 * IPSRCCLS - Indicates you have specified a source class. 65 * IPTRGCLS - Indicates you have specified a target class. 66 * IPFGPID - Indicates you have specified a pathid. 67 * IPFGMID - Indicates you have specified a message ID. 68 * IPNORPY - Indicates a one-way message. No reply expected. 69 * IPALL - Indicates that all paths are affected. 70 */ 71 #define IUCV_IPSRCCLS 0x01 72 #define IUCV_IPTRGCLS 0x01 73 #define IUCV_IPFGPID 0x02 74 #define IUCV_IPFGMID 0x04 75 #define IUCV_IPNORPY 0x10 76 #define IUCV_IPALL 0x80 77 78 static int iucv_bus_match(struct device *dev, struct device_driver *drv) 79 { 80 return 0; 81 } 82 83 enum iucv_pm_states { 84 IUCV_PM_INITIAL = 0, 85 IUCV_PM_FREEZING = 1, 86 IUCV_PM_THAWING = 2, 87 IUCV_PM_RESTORING = 3, 88 }; 89 static enum iucv_pm_states iucv_pm_state; 90 91 static int iucv_pm_prepare(struct device *); 92 static void iucv_pm_complete(struct device *); 93 static int iucv_pm_freeze(struct device *); 94 static int iucv_pm_thaw(struct device *); 95 static int iucv_pm_restore(struct device *); 96 97 static const struct dev_pm_ops iucv_pm_ops = { 98 .prepare = iucv_pm_prepare, 99 .complete = iucv_pm_complete, 100 .freeze = iucv_pm_freeze, 101 .thaw = iucv_pm_thaw, 102 .restore = iucv_pm_restore, 103 }; 104 105 struct bus_type iucv_bus = { 106 .name = "iucv", 107 .match = iucv_bus_match, 108 .pm = &iucv_pm_ops, 109 }; 110 EXPORT_SYMBOL(iucv_bus); 111 112 struct device *iucv_root; 113 EXPORT_SYMBOL(iucv_root); 114 115 static int iucv_available; 116 117 /* General IUCV interrupt structure */ 118 struct iucv_irq_data { 119 u16 ippathid; 120 u8 ipflags1; 121 u8 iptype; 122 u32 res2[8]; 123 }; 124 125 struct iucv_irq_list { 126 struct list_head list; 127 struct iucv_irq_data data; 128 }; 129 130 static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; 131 static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE }; 132 static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE }; 133 134 /* 135 * Queue of interrupt buffers lock for delivery via the tasklet 136 * (fast but can't call smp_call_function). 137 */ 138 static LIST_HEAD(iucv_task_queue); 139 140 /* 141 * The tasklet for fast delivery of iucv interrupts. 142 */ 143 static void iucv_tasklet_fn(unsigned long); 144 static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); 145 146 /* 147 * Queue of interrupt buffers for delivery via a work queue 148 * (slower but can call smp_call_function). 149 */ 150 static LIST_HEAD(iucv_work_queue); 151 152 /* 153 * The work element to deliver path pending interrupts. 154 */ 155 static void iucv_work_fn(struct work_struct *work); 156 static DECLARE_WORK(iucv_work, iucv_work_fn); 157 158 /* 159 * Spinlock protecting task and work queue. 160 */ 161 static DEFINE_SPINLOCK(iucv_queue_lock); 162 163 enum iucv_command_codes { 164 IUCV_QUERY = 0, 165 IUCV_RETRIEVE_BUFFER = 2, 166 IUCV_SEND = 4, 167 IUCV_RECEIVE = 5, 168 IUCV_REPLY = 6, 169 IUCV_REJECT = 8, 170 IUCV_PURGE = 9, 171 IUCV_ACCEPT = 10, 172 IUCV_CONNECT = 11, 173 IUCV_DECLARE_BUFFER = 12, 174 IUCV_QUIESCE = 13, 175 IUCV_RESUME = 14, 176 IUCV_SEVER = 15, 177 IUCV_SETMASK = 16, 178 IUCV_SETCONTROLMASK = 17, 179 }; 180 181 /* 182 * Error messages that are used with the iucv_sever function. They get 183 * converted to EBCDIC. 184 */ 185 static char iucv_error_no_listener[16] = "NO LISTENER"; 186 static char iucv_error_no_memory[16] = "NO MEMORY"; 187 static char iucv_error_pathid[16] = "INVALID PATHID"; 188 189 /* 190 * iucv_handler_list: List of registered handlers. 191 */ 192 static LIST_HEAD(iucv_handler_list); 193 194 /* 195 * iucv_path_table: an array of iucv_path structures. 196 */ 197 static struct iucv_path **iucv_path_table; 198 static unsigned long iucv_max_pathid; 199 200 /* 201 * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table 202 */ 203 static DEFINE_SPINLOCK(iucv_table_lock); 204 205 /* 206 * iucv_active_cpu: contains the number of the cpu executing the tasklet 207 * or the work handler. Needed for iucv_path_sever called from tasklet. 208 */ 209 static int iucv_active_cpu = -1; 210 211 /* 212 * Mutex and wait queue for iucv_register/iucv_unregister. 213 */ 214 static DEFINE_MUTEX(iucv_register_mutex); 215 216 /* 217 * Counter for number of non-smp capable handlers. 218 */ 219 static int iucv_nonsmp_handler; 220 221 /* 222 * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, 223 * iucv_path_quiesce and iucv_path_sever. 224 */ 225 struct iucv_cmd_control { 226 u16 ippathid; 227 u8 ipflags1; 228 u8 iprcode; 229 u16 ipmsglim; 230 u16 res1; 231 u8 ipvmid[8]; 232 u8 ipuser[16]; 233 u8 iptarget[8]; 234 } __attribute__ ((packed,aligned(8))); 235 236 /* 237 * Data in parameter list iucv structure. Used by iucv_message_send, 238 * iucv_message_send2way and iucv_message_reply. 239 */ 240 struct iucv_cmd_dpl { 241 u16 ippathid; 242 u8 ipflags1; 243 u8 iprcode; 244 u32 ipmsgid; 245 u32 iptrgcls; 246 u8 iprmmsg[8]; 247 u32 ipsrccls; 248 u32 ipmsgtag; 249 u32 ipbfadr2; 250 u32 ipbfln2f; 251 u32 res; 252 } __attribute__ ((packed,aligned(8))); 253 254 /* 255 * Data in buffer iucv structure. Used by iucv_message_receive, 256 * iucv_message_reject, iucv_message_send, iucv_message_send2way 257 * and iucv_declare_cpu. 258 */ 259 struct iucv_cmd_db { 260 u16 ippathid; 261 u8 ipflags1; 262 u8 iprcode; 263 u32 ipmsgid; 264 u32 iptrgcls; 265 u32 ipbfadr1; 266 u32 ipbfln1f; 267 u32 ipsrccls; 268 u32 ipmsgtag; 269 u32 ipbfadr2; 270 u32 ipbfln2f; 271 u32 res; 272 } __attribute__ ((packed,aligned(8))); 273 274 /* 275 * Purge message iucv structure. Used by iucv_message_purge. 276 */ 277 struct iucv_cmd_purge { 278 u16 ippathid; 279 u8 ipflags1; 280 u8 iprcode; 281 u32 ipmsgid; 282 u8 ipaudit[3]; 283 u8 res1[5]; 284 u32 res2; 285 u32 ipsrccls; 286 u32 ipmsgtag; 287 u32 res3[3]; 288 } __attribute__ ((packed,aligned(8))); 289 290 /* 291 * Set mask iucv structure. Used by iucv_enable_cpu. 292 */ 293 struct iucv_cmd_set_mask { 294 u8 ipmask; 295 u8 res1[2]; 296 u8 iprcode; 297 u32 res2[9]; 298 } __attribute__ ((packed,aligned(8))); 299 300 union iucv_param { 301 struct iucv_cmd_control ctrl; 302 struct iucv_cmd_dpl dpl; 303 struct iucv_cmd_db db; 304 struct iucv_cmd_purge purge; 305 struct iucv_cmd_set_mask set_mask; 306 }; 307 308 /* 309 * Anchor for per-cpu IUCV command parameter block. 310 */ 311 static union iucv_param *iucv_param[NR_CPUS]; 312 static union iucv_param *iucv_param_irq[NR_CPUS]; 313 314 /** 315 * iucv_call_b2f0 316 * @code: identifier of IUCV call to CP. 317 * @parm: pointer to a struct iucv_parm block 318 * 319 * Calls CP to execute IUCV commands. 320 * 321 * Returns the result of the CP IUCV call. 322 */ 323 static inline int __iucv_call_b2f0(int command, union iucv_param *parm) 324 { 325 register unsigned long reg0 asm ("0"); 326 register unsigned long reg1 asm ("1"); 327 int ccode; 328 329 reg0 = command; 330 reg1 = (unsigned long)parm; 331 asm volatile( 332 " .long 0xb2f01000\n" 333 " ipm %0\n" 334 " srl %0,28\n" 335 : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) 336 : "m" (*parm) : "cc"); 337 return ccode; 338 } 339 340 static inline int iucv_call_b2f0(int command, union iucv_param *parm) 341 { 342 int ccode; 343 344 ccode = __iucv_call_b2f0(command, parm); 345 return ccode == 1 ? parm->ctrl.iprcode : ccode; 346 } 347 348 /** 349 * iucv_query_maxconn 350 * 351 * Determines the maximum number of connections that may be established. 352 * 353 * Returns the maximum number of connections or -EPERM is IUCV is not 354 * available. 355 */ 356 static int __iucv_query_maxconn(void *param, unsigned long *max_pathid) 357 { 358 register unsigned long reg0 asm ("0"); 359 register unsigned long reg1 asm ("1"); 360 int ccode; 361 362 reg0 = IUCV_QUERY; 363 reg1 = (unsigned long) param; 364 asm volatile ( 365 " .long 0xb2f01000\n" 366 " ipm %0\n" 367 " srl %0,28\n" 368 : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); 369 *max_pathid = reg1; 370 return ccode; 371 } 372 373 static int iucv_query_maxconn(void) 374 { 375 unsigned long max_pathid; 376 void *param; 377 int ccode; 378 379 param = kzalloc(sizeof(union iucv_param), GFP_KERNEL | GFP_DMA); 380 if (!param) 381 return -ENOMEM; 382 ccode = __iucv_query_maxconn(param, &max_pathid); 383 if (ccode == 0) 384 iucv_max_pathid = max_pathid; 385 kfree(param); 386 return ccode ? -EPERM : 0; 387 } 388 389 /** 390 * iucv_allow_cpu 391 * @data: unused 392 * 393 * Allow iucv interrupts on this cpu. 394 */ 395 static void iucv_allow_cpu(void *data) 396 { 397 int cpu = smp_processor_id(); 398 union iucv_param *parm; 399 400 /* 401 * Enable all iucv interrupts. 402 * ipmask contains bits for the different interrupts 403 * 0x80 - Flag to allow nonpriority message pending interrupts 404 * 0x40 - Flag to allow priority message pending interrupts 405 * 0x20 - Flag to allow nonpriority message completion interrupts 406 * 0x10 - Flag to allow priority message completion interrupts 407 * 0x08 - Flag to allow IUCV control interrupts 408 */ 409 parm = iucv_param_irq[cpu]; 410 memset(parm, 0, sizeof(union iucv_param)); 411 parm->set_mask.ipmask = 0xf8; 412 iucv_call_b2f0(IUCV_SETMASK, parm); 413 414 /* 415 * Enable all iucv control interrupts. 416 * ipmask contains bits for the different interrupts 417 * 0x80 - Flag to allow pending connections interrupts 418 * 0x40 - Flag to allow connection complete interrupts 419 * 0x20 - Flag to allow connection severed interrupts 420 * 0x10 - Flag to allow connection quiesced interrupts 421 * 0x08 - Flag to allow connection resumed interrupts 422 */ 423 memset(parm, 0, sizeof(union iucv_param)); 424 parm->set_mask.ipmask = 0xf8; 425 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 426 /* Set indication that iucv interrupts are allowed for this cpu. */ 427 cpumask_set_cpu(cpu, &iucv_irq_cpumask); 428 } 429 430 /** 431 * iucv_block_cpu 432 * @data: unused 433 * 434 * Block iucv interrupts on this cpu. 435 */ 436 static void iucv_block_cpu(void *data) 437 { 438 int cpu = smp_processor_id(); 439 union iucv_param *parm; 440 441 /* Disable all iucv interrupts. */ 442 parm = iucv_param_irq[cpu]; 443 memset(parm, 0, sizeof(union iucv_param)); 444 iucv_call_b2f0(IUCV_SETMASK, parm); 445 446 /* Clear indication that iucv interrupts are allowed for this cpu. */ 447 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); 448 } 449 450 /** 451 * iucv_block_cpu_almost 452 * @data: unused 453 * 454 * Allow connection-severed interrupts only on this cpu. 455 */ 456 static void iucv_block_cpu_almost(void *data) 457 { 458 int cpu = smp_processor_id(); 459 union iucv_param *parm; 460 461 /* Allow iucv control interrupts only */ 462 parm = iucv_param_irq[cpu]; 463 memset(parm, 0, sizeof(union iucv_param)); 464 parm->set_mask.ipmask = 0x08; 465 iucv_call_b2f0(IUCV_SETMASK, parm); 466 /* Allow iucv-severed interrupt only */ 467 memset(parm, 0, sizeof(union iucv_param)); 468 parm->set_mask.ipmask = 0x20; 469 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 470 471 /* Clear indication that iucv interrupts are allowed for this cpu. */ 472 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); 473 } 474 475 /** 476 * iucv_declare_cpu 477 * @data: unused 478 * 479 * Declare a interrupt buffer on this cpu. 480 */ 481 static void iucv_declare_cpu(void *data) 482 { 483 int cpu = smp_processor_id(); 484 union iucv_param *parm; 485 int rc; 486 487 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) 488 return; 489 490 /* Declare interrupt buffer. */ 491 parm = iucv_param_irq[cpu]; 492 memset(parm, 0, sizeof(union iucv_param)); 493 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); 494 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); 495 if (rc) { 496 char *err = "Unknown"; 497 switch (rc) { 498 case 0x03: 499 err = "Directory error"; 500 break; 501 case 0x0a: 502 err = "Invalid length"; 503 break; 504 case 0x13: 505 err = "Buffer already exists"; 506 break; 507 case 0x3e: 508 err = "Buffer overlap"; 509 break; 510 case 0x5c: 511 err = "Paging or storage error"; 512 break; 513 } 514 pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n", 515 cpu, rc, err); 516 return; 517 } 518 519 /* Set indication that an iucv buffer exists for this cpu. */ 520 cpumask_set_cpu(cpu, &iucv_buffer_cpumask); 521 522 if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask)) 523 /* Enable iucv interrupts on this cpu. */ 524 iucv_allow_cpu(NULL); 525 else 526 /* Disable iucv interrupts on this cpu. */ 527 iucv_block_cpu(NULL); 528 } 529 530 /** 531 * iucv_retrieve_cpu 532 * @data: unused 533 * 534 * Retrieve interrupt buffer on this cpu. 535 */ 536 static void iucv_retrieve_cpu(void *data) 537 { 538 int cpu = smp_processor_id(); 539 union iucv_param *parm; 540 541 if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) 542 return; 543 544 /* Block iucv interrupts. */ 545 iucv_block_cpu(NULL); 546 547 /* Retrieve interrupt buffer. */ 548 parm = iucv_param_irq[cpu]; 549 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); 550 551 /* Clear indication that an iucv buffer exists for this cpu. */ 552 cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); 553 } 554 555 /** 556 * iucv_setmask_smp 557 * 558 * Allow iucv interrupts on all cpus. 559 */ 560 static void iucv_setmask_mp(void) 561 { 562 int cpu; 563 564 get_online_cpus(); 565 for_each_online_cpu(cpu) 566 /* Enable all cpus with a declared buffer. */ 567 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) && 568 !cpumask_test_cpu(cpu, &iucv_irq_cpumask)) 569 smp_call_function_single(cpu, iucv_allow_cpu, 570 NULL, 1); 571 put_online_cpus(); 572 } 573 574 /** 575 * iucv_setmask_up 576 * 577 * Allow iucv interrupts on a single cpu. 578 */ 579 static void iucv_setmask_up(void) 580 { 581 cpumask_t cpumask; 582 int cpu; 583 584 /* Disable all cpu but the first in cpu_irq_cpumask. */ 585 cpumask_copy(&cpumask, &iucv_irq_cpumask); 586 cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask); 587 for_each_cpu(cpu, &cpumask) 588 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); 589 } 590 591 /** 592 * iucv_enable 593 * 594 * This function makes iucv ready for use. It allocates the pathid 595 * table, declares an iucv interrupt buffer and enables the iucv 596 * interrupts. Called when the first user has registered an iucv 597 * handler. 598 */ 599 static int iucv_enable(void) 600 { 601 size_t alloc_size; 602 int cpu, rc; 603 604 get_online_cpus(); 605 rc = -ENOMEM; 606 alloc_size = iucv_max_pathid * sizeof(struct iucv_path); 607 iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); 608 if (!iucv_path_table) 609 goto out; 610 /* Declare per cpu buffers. */ 611 rc = -EIO; 612 for_each_online_cpu(cpu) 613 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 614 if (cpumask_empty(&iucv_buffer_cpumask)) 615 /* No cpu could declare an iucv buffer. */ 616 goto out; 617 put_online_cpus(); 618 return 0; 619 out: 620 kfree(iucv_path_table); 621 iucv_path_table = NULL; 622 put_online_cpus(); 623 return rc; 624 } 625 626 /** 627 * iucv_disable 628 * 629 * This function shuts down iucv. It disables iucv interrupts, retrieves 630 * the iucv interrupt buffer and frees the pathid table. Called after the 631 * last user unregister its iucv handler. 632 */ 633 static void iucv_disable(void) 634 { 635 get_online_cpus(); 636 on_each_cpu(iucv_retrieve_cpu, NULL, 1); 637 kfree(iucv_path_table); 638 iucv_path_table = NULL; 639 put_online_cpus(); 640 } 641 642 static int iucv_cpu_dead(unsigned int cpu) 643 { 644 kfree(iucv_param_irq[cpu]); 645 iucv_param_irq[cpu] = NULL; 646 kfree(iucv_param[cpu]); 647 iucv_param[cpu] = NULL; 648 kfree(iucv_irq_data[cpu]); 649 iucv_irq_data[cpu] = NULL; 650 return 0; 651 } 652 653 static int iucv_cpu_prepare(unsigned int cpu) 654 { 655 /* Note: GFP_DMA used to get memory below 2G */ 656 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), 657 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 658 if (!iucv_irq_data[cpu]) 659 goto out_free; 660 661 /* Allocate parameter blocks. */ 662 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), 663 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 664 if (!iucv_param[cpu]) 665 goto out_free; 666 667 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), 668 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 669 if (!iucv_param_irq[cpu]) 670 goto out_free; 671 672 return 0; 673 674 out_free: 675 iucv_cpu_dead(cpu); 676 return -ENOMEM; 677 } 678 679 static int iucv_cpu_online(unsigned int cpu) 680 { 681 if (!iucv_path_table) 682 return 0; 683 iucv_declare_cpu(NULL); 684 return 0; 685 } 686 687 static int iucv_cpu_down_prep(unsigned int cpu) 688 { 689 cpumask_t cpumask; 690 691 if (!iucv_path_table) 692 return 0; 693 694 cpumask_copy(&cpumask, &iucv_buffer_cpumask); 695 cpumask_clear_cpu(cpu, &cpumask); 696 if (cpumask_empty(&cpumask)) 697 /* Can't offline last IUCV enabled cpu. */ 698 return -EINVAL; 699 700 iucv_retrieve_cpu(NULL); 701 if (!cpumask_empty(&iucv_irq_cpumask)) 702 return 0; 703 smp_call_function_single(cpumask_first(&iucv_buffer_cpumask), 704 iucv_allow_cpu, NULL, 1); 705 return 0; 706 } 707 708 /** 709 * iucv_sever_pathid 710 * @pathid: path identification number. 711 * @userdata: 16-bytes of user data. 712 * 713 * Sever an iucv path to free up the pathid. Used internally. 714 */ 715 static int iucv_sever_pathid(u16 pathid, u8 *userdata) 716 { 717 union iucv_param *parm; 718 719 parm = iucv_param_irq[smp_processor_id()]; 720 memset(parm, 0, sizeof(union iucv_param)); 721 if (userdata) 722 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 723 parm->ctrl.ippathid = pathid; 724 return iucv_call_b2f0(IUCV_SEVER, parm); 725 } 726 727 /** 728 * __iucv_cleanup_queue 729 * @dummy: unused dummy argument 730 * 731 * Nop function called via smp_call_function to force work items from 732 * pending external iucv interrupts to the work queue. 733 */ 734 static void __iucv_cleanup_queue(void *dummy) 735 { 736 } 737 738 /** 739 * iucv_cleanup_queue 740 * 741 * Function called after a path has been severed to find all remaining 742 * work items for the now stale pathid. The caller needs to hold the 743 * iucv_table_lock. 744 */ 745 static void iucv_cleanup_queue(void) 746 { 747 struct iucv_irq_list *p, *n; 748 749 /* 750 * When a path is severed, the pathid can be reused immediately 751 * on a iucv connect or a connection pending interrupt. Remove 752 * all entries from the task queue that refer to a stale pathid 753 * (iucv_path_table[ix] == NULL). Only then do the iucv connect 754 * or deliver the connection pending interrupt. To get all the 755 * pending interrupts force them to the work queue by calling 756 * an empty function on all cpus. 757 */ 758 smp_call_function(__iucv_cleanup_queue, NULL, 1); 759 spin_lock_irq(&iucv_queue_lock); 760 list_for_each_entry_safe(p, n, &iucv_task_queue, list) { 761 /* Remove stale work items from the task queue. */ 762 if (iucv_path_table[p->data.ippathid] == NULL) { 763 list_del(&p->list); 764 kfree(p); 765 } 766 } 767 spin_unlock_irq(&iucv_queue_lock); 768 } 769 770 /** 771 * iucv_register: 772 * @handler: address of iucv handler structure 773 * @smp: != 0 indicates that the handler can deal with out of order messages 774 * 775 * Registers a driver with IUCV. 776 * 777 * Returns 0 on success, -ENOMEM if the memory allocation for the pathid 778 * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. 779 */ 780 int iucv_register(struct iucv_handler *handler, int smp) 781 { 782 int rc; 783 784 if (!iucv_available) 785 return -ENOSYS; 786 mutex_lock(&iucv_register_mutex); 787 if (!smp) 788 iucv_nonsmp_handler++; 789 if (list_empty(&iucv_handler_list)) { 790 rc = iucv_enable(); 791 if (rc) 792 goto out_mutex; 793 } else if (!smp && iucv_nonsmp_handler == 1) 794 iucv_setmask_up(); 795 INIT_LIST_HEAD(&handler->paths); 796 797 spin_lock_bh(&iucv_table_lock); 798 list_add_tail(&handler->list, &iucv_handler_list); 799 spin_unlock_bh(&iucv_table_lock); 800 rc = 0; 801 out_mutex: 802 mutex_unlock(&iucv_register_mutex); 803 return rc; 804 } 805 EXPORT_SYMBOL(iucv_register); 806 807 /** 808 * iucv_unregister 809 * @handler: address of iucv handler structure 810 * @smp: != 0 indicates that the handler can deal with out of order messages 811 * 812 * Unregister driver from IUCV. 813 */ 814 void iucv_unregister(struct iucv_handler *handler, int smp) 815 { 816 struct iucv_path *p, *n; 817 818 mutex_lock(&iucv_register_mutex); 819 spin_lock_bh(&iucv_table_lock); 820 /* Remove handler from the iucv_handler_list. */ 821 list_del_init(&handler->list); 822 /* Sever all pathids still referring to the handler. */ 823 list_for_each_entry_safe(p, n, &handler->paths, list) { 824 iucv_sever_pathid(p->pathid, NULL); 825 iucv_path_table[p->pathid] = NULL; 826 list_del(&p->list); 827 iucv_path_free(p); 828 } 829 spin_unlock_bh(&iucv_table_lock); 830 if (!smp) 831 iucv_nonsmp_handler--; 832 if (list_empty(&iucv_handler_list)) 833 iucv_disable(); 834 else if (!smp && iucv_nonsmp_handler == 0) 835 iucv_setmask_mp(); 836 mutex_unlock(&iucv_register_mutex); 837 } 838 EXPORT_SYMBOL(iucv_unregister); 839 840 static int iucv_reboot_event(struct notifier_block *this, 841 unsigned long event, void *ptr) 842 { 843 int i; 844 845 if (cpumask_empty(&iucv_irq_cpumask)) 846 return NOTIFY_DONE; 847 848 get_online_cpus(); 849 on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1); 850 preempt_disable(); 851 for (i = 0; i < iucv_max_pathid; i++) { 852 if (iucv_path_table[i]) 853 iucv_sever_pathid(i, NULL); 854 } 855 preempt_enable(); 856 put_online_cpus(); 857 iucv_disable(); 858 return NOTIFY_DONE; 859 } 860 861 static struct notifier_block iucv_reboot_notifier = { 862 .notifier_call = iucv_reboot_event, 863 }; 864 865 /** 866 * iucv_path_accept 867 * @path: address of iucv path structure 868 * @handler: address of iucv handler structure 869 * @userdata: 16 bytes of data reflected to the communication partner 870 * @private: private data passed to interrupt handlers for this path 871 * 872 * This function is issued after the user received a connection pending 873 * external interrupt and now wishes to complete the IUCV communication path. 874 * 875 * Returns the result of the CP IUCV call. 876 */ 877 int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, 878 u8 *userdata, void *private) 879 { 880 union iucv_param *parm; 881 int rc; 882 883 local_bh_disable(); 884 if (cpumask_empty(&iucv_buffer_cpumask)) { 885 rc = -EIO; 886 goto out; 887 } 888 /* Prepare parameter block. */ 889 parm = iucv_param[smp_processor_id()]; 890 memset(parm, 0, sizeof(union iucv_param)); 891 parm->ctrl.ippathid = path->pathid; 892 parm->ctrl.ipmsglim = path->msglim; 893 if (userdata) 894 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 895 parm->ctrl.ipflags1 = path->flags; 896 897 rc = iucv_call_b2f0(IUCV_ACCEPT, parm); 898 if (!rc) { 899 path->private = private; 900 path->msglim = parm->ctrl.ipmsglim; 901 path->flags = parm->ctrl.ipflags1; 902 } 903 out: 904 local_bh_enable(); 905 return rc; 906 } 907 EXPORT_SYMBOL(iucv_path_accept); 908 909 /** 910 * iucv_path_connect 911 * @path: address of iucv path structure 912 * @handler: address of iucv handler structure 913 * @userid: 8-byte user identification 914 * @system: 8-byte target system identification 915 * @userdata: 16 bytes of data reflected to the communication partner 916 * @private: private data passed to interrupt handlers for this path 917 * 918 * This function establishes an IUCV path. Although the connect may complete 919 * successfully, you are not able to use the path until you receive an IUCV 920 * Connection Complete external interrupt. 921 * 922 * Returns the result of the CP IUCV call. 923 */ 924 int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, 925 u8 *userid, u8 *system, u8 *userdata, 926 void *private) 927 { 928 union iucv_param *parm; 929 int rc; 930 931 spin_lock_bh(&iucv_table_lock); 932 iucv_cleanup_queue(); 933 if (cpumask_empty(&iucv_buffer_cpumask)) { 934 rc = -EIO; 935 goto out; 936 } 937 parm = iucv_param[smp_processor_id()]; 938 memset(parm, 0, sizeof(union iucv_param)); 939 parm->ctrl.ipmsglim = path->msglim; 940 parm->ctrl.ipflags1 = path->flags; 941 if (userid) { 942 memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); 943 ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); 944 EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); 945 } 946 if (system) { 947 memcpy(parm->ctrl.iptarget, system, 948 sizeof(parm->ctrl.iptarget)); 949 ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); 950 EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); 951 } 952 if (userdata) 953 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 954 955 rc = iucv_call_b2f0(IUCV_CONNECT, parm); 956 if (!rc) { 957 if (parm->ctrl.ippathid < iucv_max_pathid) { 958 path->pathid = parm->ctrl.ippathid; 959 path->msglim = parm->ctrl.ipmsglim; 960 path->flags = parm->ctrl.ipflags1; 961 path->handler = handler; 962 path->private = private; 963 list_add_tail(&path->list, &handler->paths); 964 iucv_path_table[path->pathid] = path; 965 } else { 966 iucv_sever_pathid(parm->ctrl.ippathid, 967 iucv_error_pathid); 968 rc = -EIO; 969 } 970 } 971 out: 972 spin_unlock_bh(&iucv_table_lock); 973 return rc; 974 } 975 EXPORT_SYMBOL(iucv_path_connect); 976 977 /** 978 * iucv_path_quiesce: 979 * @path: address of iucv path structure 980 * @userdata: 16 bytes of data reflected to the communication partner 981 * 982 * This function temporarily suspends incoming messages on an IUCV path. 983 * You can later reactivate the path by invoking the iucv_resume function. 984 * 985 * Returns the result from the CP IUCV call. 986 */ 987 int iucv_path_quiesce(struct iucv_path *path, u8 *userdata) 988 { 989 union iucv_param *parm; 990 int rc; 991 992 local_bh_disable(); 993 if (cpumask_empty(&iucv_buffer_cpumask)) { 994 rc = -EIO; 995 goto out; 996 } 997 parm = iucv_param[smp_processor_id()]; 998 memset(parm, 0, sizeof(union iucv_param)); 999 if (userdata) 1000 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 1001 parm->ctrl.ippathid = path->pathid; 1002 rc = iucv_call_b2f0(IUCV_QUIESCE, parm); 1003 out: 1004 local_bh_enable(); 1005 return rc; 1006 } 1007 EXPORT_SYMBOL(iucv_path_quiesce); 1008 1009 /** 1010 * iucv_path_resume: 1011 * @path: address of iucv path structure 1012 * @userdata: 16 bytes of data reflected to the communication partner 1013 * 1014 * This function resumes incoming messages on an IUCV path that has 1015 * been stopped with iucv_path_quiesce. 1016 * 1017 * Returns the result from the CP IUCV call. 1018 */ 1019 int iucv_path_resume(struct iucv_path *path, u8 *userdata) 1020 { 1021 union iucv_param *parm; 1022 int rc; 1023 1024 local_bh_disable(); 1025 if (cpumask_empty(&iucv_buffer_cpumask)) { 1026 rc = -EIO; 1027 goto out; 1028 } 1029 parm = iucv_param[smp_processor_id()]; 1030 memset(parm, 0, sizeof(union iucv_param)); 1031 if (userdata) 1032 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 1033 parm->ctrl.ippathid = path->pathid; 1034 rc = iucv_call_b2f0(IUCV_RESUME, parm); 1035 out: 1036 local_bh_enable(); 1037 return rc; 1038 } 1039 1040 /** 1041 * iucv_path_sever 1042 * @path: address of iucv path structure 1043 * @userdata: 16 bytes of data reflected to the communication partner 1044 * 1045 * This function terminates an IUCV path. 1046 * 1047 * Returns the result from the CP IUCV call. 1048 */ 1049 int iucv_path_sever(struct iucv_path *path, u8 *userdata) 1050 { 1051 int rc; 1052 1053 preempt_disable(); 1054 if (cpumask_empty(&iucv_buffer_cpumask)) { 1055 rc = -EIO; 1056 goto out; 1057 } 1058 if (iucv_active_cpu != smp_processor_id()) 1059 spin_lock_bh(&iucv_table_lock); 1060 rc = iucv_sever_pathid(path->pathid, userdata); 1061 iucv_path_table[path->pathid] = NULL; 1062 list_del_init(&path->list); 1063 if (iucv_active_cpu != smp_processor_id()) 1064 spin_unlock_bh(&iucv_table_lock); 1065 out: 1066 preempt_enable(); 1067 return rc; 1068 } 1069 EXPORT_SYMBOL(iucv_path_sever); 1070 1071 /** 1072 * iucv_message_purge 1073 * @path: address of iucv path structure 1074 * @msg: address of iucv msg structure 1075 * @srccls: source class of message 1076 * 1077 * Cancels a message you have sent. 1078 * 1079 * Returns the result from the CP IUCV call. 1080 */ 1081 int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, 1082 u32 srccls) 1083 { 1084 union iucv_param *parm; 1085 int rc; 1086 1087 local_bh_disable(); 1088 if (cpumask_empty(&iucv_buffer_cpumask)) { 1089 rc = -EIO; 1090 goto out; 1091 } 1092 parm = iucv_param[smp_processor_id()]; 1093 memset(parm, 0, sizeof(union iucv_param)); 1094 parm->purge.ippathid = path->pathid; 1095 parm->purge.ipmsgid = msg->id; 1096 parm->purge.ipsrccls = srccls; 1097 parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; 1098 rc = iucv_call_b2f0(IUCV_PURGE, parm); 1099 if (!rc) { 1100 msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; 1101 msg->tag = parm->purge.ipmsgtag; 1102 } 1103 out: 1104 local_bh_enable(); 1105 return rc; 1106 } 1107 EXPORT_SYMBOL(iucv_message_purge); 1108 1109 /** 1110 * iucv_message_receive_iprmdata 1111 * @path: address of iucv path structure 1112 * @msg: address of iucv msg structure 1113 * @flags: how the message is received (IUCV_IPBUFLST) 1114 * @buffer: address of data buffer or address of struct iucv_array 1115 * @size: length of data buffer 1116 * @residual: 1117 * 1118 * Internal function used by iucv_message_receive and __iucv_message_receive 1119 * to receive RMDATA data stored in struct iucv_message. 1120 */ 1121 static int iucv_message_receive_iprmdata(struct iucv_path *path, 1122 struct iucv_message *msg, 1123 u8 flags, void *buffer, 1124 size_t size, size_t *residual) 1125 { 1126 struct iucv_array *array; 1127 u8 *rmmsg; 1128 size_t copy; 1129 1130 /* 1131 * Message is 8 bytes long and has been stored to the 1132 * message descriptor itself. 1133 */ 1134 if (residual) 1135 *residual = abs(size - 8); 1136 rmmsg = msg->rmmsg; 1137 if (flags & IUCV_IPBUFLST) { 1138 /* Copy to struct iucv_array. */ 1139 size = (size < 8) ? size : 8; 1140 for (array = buffer; size > 0; array++) { 1141 copy = min_t(size_t, size, array->length); 1142 memcpy((u8 *)(addr_t) array->address, 1143 rmmsg, copy); 1144 rmmsg += copy; 1145 size -= copy; 1146 } 1147 } else { 1148 /* Copy to direct buffer. */ 1149 memcpy(buffer, rmmsg, min_t(size_t, size, 8)); 1150 } 1151 return 0; 1152 } 1153 1154 /** 1155 * __iucv_message_receive 1156 * @path: address of iucv path structure 1157 * @msg: address of iucv msg structure 1158 * @flags: how the message is received (IUCV_IPBUFLST) 1159 * @buffer: address of data buffer or address of struct iucv_array 1160 * @size: length of data buffer 1161 * @residual: 1162 * 1163 * This function receives messages that are being sent to you over 1164 * established paths. This function will deal with RMDATA messages 1165 * embedded in struct iucv_message as well. 1166 * 1167 * Locking: no locking 1168 * 1169 * Returns the result from the CP IUCV call. 1170 */ 1171 int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, 1172 u8 flags, void *buffer, size_t size, size_t *residual) 1173 { 1174 union iucv_param *parm; 1175 int rc; 1176 1177 if (msg->flags & IUCV_IPRMDATA) 1178 return iucv_message_receive_iprmdata(path, msg, flags, 1179 buffer, size, residual); 1180 if (cpumask_empty(&iucv_buffer_cpumask)) { 1181 rc = -EIO; 1182 goto out; 1183 } 1184 parm = iucv_param[smp_processor_id()]; 1185 memset(parm, 0, sizeof(union iucv_param)); 1186 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1187 parm->db.ipbfln1f = (u32) size; 1188 parm->db.ipmsgid = msg->id; 1189 parm->db.ippathid = path->pathid; 1190 parm->db.iptrgcls = msg->class; 1191 parm->db.ipflags1 = (flags | IUCV_IPFGPID | 1192 IUCV_IPFGMID | IUCV_IPTRGCLS); 1193 rc = iucv_call_b2f0(IUCV_RECEIVE, parm); 1194 if (!rc || rc == 5) { 1195 msg->flags = parm->db.ipflags1; 1196 if (residual) 1197 *residual = parm->db.ipbfln1f; 1198 } 1199 out: 1200 return rc; 1201 } 1202 EXPORT_SYMBOL(__iucv_message_receive); 1203 1204 /** 1205 * iucv_message_receive 1206 * @path: address of iucv path structure 1207 * @msg: address of iucv msg structure 1208 * @flags: how the message is received (IUCV_IPBUFLST) 1209 * @buffer: address of data buffer or address of struct iucv_array 1210 * @size: length of data buffer 1211 * @residual: 1212 * 1213 * This function receives messages that are being sent to you over 1214 * established paths. This function will deal with RMDATA messages 1215 * embedded in struct iucv_message as well. 1216 * 1217 * Locking: local_bh_enable/local_bh_disable 1218 * 1219 * Returns the result from the CP IUCV call. 1220 */ 1221 int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, 1222 u8 flags, void *buffer, size_t size, size_t *residual) 1223 { 1224 int rc; 1225 1226 if (msg->flags & IUCV_IPRMDATA) 1227 return iucv_message_receive_iprmdata(path, msg, flags, 1228 buffer, size, residual); 1229 local_bh_disable(); 1230 rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); 1231 local_bh_enable(); 1232 return rc; 1233 } 1234 EXPORT_SYMBOL(iucv_message_receive); 1235 1236 /** 1237 * iucv_message_reject 1238 * @path: address of iucv path structure 1239 * @msg: address of iucv msg structure 1240 * 1241 * The reject function refuses a specified message. Between the time you 1242 * are notified of a message and the time that you complete the message, 1243 * the message may be rejected. 1244 * 1245 * Returns the result from the CP IUCV call. 1246 */ 1247 int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) 1248 { 1249 union iucv_param *parm; 1250 int rc; 1251 1252 local_bh_disable(); 1253 if (cpumask_empty(&iucv_buffer_cpumask)) { 1254 rc = -EIO; 1255 goto out; 1256 } 1257 parm = iucv_param[smp_processor_id()]; 1258 memset(parm, 0, sizeof(union iucv_param)); 1259 parm->db.ippathid = path->pathid; 1260 parm->db.ipmsgid = msg->id; 1261 parm->db.iptrgcls = msg->class; 1262 parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); 1263 rc = iucv_call_b2f0(IUCV_REJECT, parm); 1264 out: 1265 local_bh_enable(); 1266 return rc; 1267 } 1268 EXPORT_SYMBOL(iucv_message_reject); 1269 1270 /** 1271 * iucv_message_reply 1272 * @path: address of iucv path structure 1273 * @msg: address of iucv msg structure 1274 * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1275 * @reply: address of reply data buffer or address of struct iucv_array 1276 * @size: length of reply data buffer 1277 * 1278 * This function responds to the two-way messages that you receive. You 1279 * must identify completely the message to which you wish to reply. ie, 1280 * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into 1281 * the parameter list. 1282 * 1283 * Returns the result from the CP IUCV call. 1284 */ 1285 int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, 1286 u8 flags, void *reply, size_t size) 1287 { 1288 union iucv_param *parm; 1289 int rc; 1290 1291 local_bh_disable(); 1292 if (cpumask_empty(&iucv_buffer_cpumask)) { 1293 rc = -EIO; 1294 goto out; 1295 } 1296 parm = iucv_param[smp_processor_id()]; 1297 memset(parm, 0, sizeof(union iucv_param)); 1298 if (flags & IUCV_IPRMDATA) { 1299 parm->dpl.ippathid = path->pathid; 1300 parm->dpl.ipflags1 = flags; 1301 parm->dpl.ipmsgid = msg->id; 1302 parm->dpl.iptrgcls = msg->class; 1303 memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); 1304 } else { 1305 parm->db.ipbfadr1 = (u32)(addr_t) reply; 1306 parm->db.ipbfln1f = (u32) size; 1307 parm->db.ippathid = path->pathid; 1308 parm->db.ipflags1 = flags; 1309 parm->db.ipmsgid = msg->id; 1310 parm->db.iptrgcls = msg->class; 1311 } 1312 rc = iucv_call_b2f0(IUCV_REPLY, parm); 1313 out: 1314 local_bh_enable(); 1315 return rc; 1316 } 1317 EXPORT_SYMBOL(iucv_message_reply); 1318 1319 /** 1320 * __iucv_message_send 1321 * @path: address of iucv path structure 1322 * @msg: address of iucv msg structure 1323 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1324 * @srccls: source class of message 1325 * @buffer: address of send buffer or address of struct iucv_array 1326 * @size: length of send buffer 1327 * 1328 * This function transmits data to another application. Data to be 1329 * transmitted is in a buffer and this is a one-way message and the 1330 * receiver will not reply to the message. 1331 * 1332 * Locking: no locking 1333 * 1334 * Returns the result from the CP IUCV call. 1335 */ 1336 int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, 1337 u8 flags, u32 srccls, void *buffer, size_t size) 1338 { 1339 union iucv_param *parm; 1340 int rc; 1341 1342 if (cpumask_empty(&iucv_buffer_cpumask)) { 1343 rc = -EIO; 1344 goto out; 1345 } 1346 parm = iucv_param[smp_processor_id()]; 1347 memset(parm, 0, sizeof(union iucv_param)); 1348 if (flags & IUCV_IPRMDATA) { 1349 /* Message of 8 bytes can be placed into the parameter list. */ 1350 parm->dpl.ippathid = path->pathid; 1351 parm->dpl.ipflags1 = flags | IUCV_IPNORPY; 1352 parm->dpl.iptrgcls = msg->class; 1353 parm->dpl.ipsrccls = srccls; 1354 parm->dpl.ipmsgtag = msg->tag; 1355 memcpy(parm->dpl.iprmmsg, buffer, 8); 1356 } else { 1357 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1358 parm->db.ipbfln1f = (u32) size; 1359 parm->db.ippathid = path->pathid; 1360 parm->db.ipflags1 = flags | IUCV_IPNORPY; 1361 parm->db.iptrgcls = msg->class; 1362 parm->db.ipsrccls = srccls; 1363 parm->db.ipmsgtag = msg->tag; 1364 } 1365 rc = iucv_call_b2f0(IUCV_SEND, parm); 1366 if (!rc) 1367 msg->id = parm->db.ipmsgid; 1368 out: 1369 return rc; 1370 } 1371 EXPORT_SYMBOL(__iucv_message_send); 1372 1373 /** 1374 * iucv_message_send 1375 * @path: address of iucv path structure 1376 * @msg: address of iucv msg structure 1377 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1378 * @srccls: source class of message 1379 * @buffer: address of send buffer or address of struct iucv_array 1380 * @size: length of send buffer 1381 * 1382 * This function transmits data to another application. Data to be 1383 * transmitted is in a buffer and this is a one-way message and the 1384 * receiver will not reply to the message. 1385 * 1386 * Locking: local_bh_enable/local_bh_disable 1387 * 1388 * Returns the result from the CP IUCV call. 1389 */ 1390 int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, 1391 u8 flags, u32 srccls, void *buffer, size_t size) 1392 { 1393 int rc; 1394 1395 local_bh_disable(); 1396 rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); 1397 local_bh_enable(); 1398 return rc; 1399 } 1400 EXPORT_SYMBOL(iucv_message_send); 1401 1402 /** 1403 * iucv_message_send2way 1404 * @path: address of iucv path structure 1405 * @msg: address of iucv msg structure 1406 * @flags: how the message is sent and the reply is received 1407 * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) 1408 * @srccls: source class of message 1409 * @buffer: address of send buffer or address of struct iucv_array 1410 * @size: length of send buffer 1411 * @ansbuf: address of answer buffer or address of struct iucv_array 1412 * @asize: size of reply buffer 1413 * 1414 * This function transmits data to another application. Data to be 1415 * transmitted is in a buffer. The receiver of the send is expected to 1416 * reply to the message and a buffer is provided into which IUCV moves 1417 * the reply to this message. 1418 * 1419 * Returns the result from the CP IUCV call. 1420 */ 1421 int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, 1422 u8 flags, u32 srccls, void *buffer, size_t size, 1423 void *answer, size_t asize, size_t *residual) 1424 { 1425 union iucv_param *parm; 1426 int rc; 1427 1428 local_bh_disable(); 1429 if (cpumask_empty(&iucv_buffer_cpumask)) { 1430 rc = -EIO; 1431 goto out; 1432 } 1433 parm = iucv_param[smp_processor_id()]; 1434 memset(parm, 0, sizeof(union iucv_param)); 1435 if (flags & IUCV_IPRMDATA) { 1436 parm->dpl.ippathid = path->pathid; 1437 parm->dpl.ipflags1 = path->flags; /* priority message */ 1438 parm->dpl.iptrgcls = msg->class; 1439 parm->dpl.ipsrccls = srccls; 1440 parm->dpl.ipmsgtag = msg->tag; 1441 parm->dpl.ipbfadr2 = (u32)(addr_t) answer; 1442 parm->dpl.ipbfln2f = (u32) asize; 1443 memcpy(parm->dpl.iprmmsg, buffer, 8); 1444 } else { 1445 parm->db.ippathid = path->pathid; 1446 parm->db.ipflags1 = path->flags; /* priority message */ 1447 parm->db.iptrgcls = msg->class; 1448 parm->db.ipsrccls = srccls; 1449 parm->db.ipmsgtag = msg->tag; 1450 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1451 parm->db.ipbfln1f = (u32) size; 1452 parm->db.ipbfadr2 = (u32)(addr_t) answer; 1453 parm->db.ipbfln2f = (u32) asize; 1454 } 1455 rc = iucv_call_b2f0(IUCV_SEND, parm); 1456 if (!rc) 1457 msg->id = parm->db.ipmsgid; 1458 out: 1459 local_bh_enable(); 1460 return rc; 1461 } 1462 EXPORT_SYMBOL(iucv_message_send2way); 1463 1464 /** 1465 * iucv_path_pending 1466 * @data: Pointer to external interrupt buffer 1467 * 1468 * Process connection pending work item. Called from tasklet while holding 1469 * iucv_table_lock. 1470 */ 1471 struct iucv_path_pending { 1472 u16 ippathid; 1473 u8 ipflags1; 1474 u8 iptype; 1475 u16 ipmsglim; 1476 u16 res1; 1477 u8 ipvmid[8]; 1478 u8 ipuser[16]; 1479 u32 res3; 1480 u8 ippollfg; 1481 u8 res4[3]; 1482 } __packed; 1483 1484 static void iucv_path_pending(struct iucv_irq_data *data) 1485 { 1486 struct iucv_path_pending *ipp = (void *) data; 1487 struct iucv_handler *handler; 1488 struct iucv_path *path; 1489 char *error; 1490 1491 BUG_ON(iucv_path_table[ipp->ippathid]); 1492 /* New pathid, handler found. Create a new path struct. */ 1493 error = iucv_error_no_memory; 1494 path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); 1495 if (!path) 1496 goto out_sever; 1497 path->pathid = ipp->ippathid; 1498 iucv_path_table[path->pathid] = path; 1499 EBCASC(ipp->ipvmid, 8); 1500 1501 /* Call registered handler until one is found that wants the path. */ 1502 list_for_each_entry(handler, &iucv_handler_list, list) { 1503 if (!handler->path_pending) 1504 continue; 1505 /* 1506 * Add path to handler to allow a call to iucv_path_sever 1507 * inside the path_pending function. If the handler returns 1508 * an error remove the path from the handler again. 1509 */ 1510 list_add(&path->list, &handler->paths); 1511 path->handler = handler; 1512 if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) 1513 return; 1514 list_del(&path->list); 1515 path->handler = NULL; 1516 } 1517 /* No handler wanted the path. */ 1518 iucv_path_table[path->pathid] = NULL; 1519 iucv_path_free(path); 1520 error = iucv_error_no_listener; 1521 out_sever: 1522 iucv_sever_pathid(ipp->ippathid, error); 1523 } 1524 1525 /** 1526 * iucv_path_complete 1527 * @data: Pointer to external interrupt buffer 1528 * 1529 * Process connection complete work item. Called from tasklet while holding 1530 * iucv_table_lock. 1531 */ 1532 struct iucv_path_complete { 1533 u16 ippathid; 1534 u8 ipflags1; 1535 u8 iptype; 1536 u16 ipmsglim; 1537 u16 res1; 1538 u8 res2[8]; 1539 u8 ipuser[16]; 1540 u32 res3; 1541 u8 ippollfg; 1542 u8 res4[3]; 1543 } __packed; 1544 1545 static void iucv_path_complete(struct iucv_irq_data *data) 1546 { 1547 struct iucv_path_complete *ipc = (void *) data; 1548 struct iucv_path *path = iucv_path_table[ipc->ippathid]; 1549 1550 if (path) 1551 path->flags = ipc->ipflags1; 1552 if (path && path->handler && path->handler->path_complete) 1553 path->handler->path_complete(path, ipc->ipuser); 1554 } 1555 1556 /** 1557 * iucv_path_severed 1558 * @data: Pointer to external interrupt buffer 1559 * 1560 * Process connection severed work item. Called from tasklet while holding 1561 * iucv_table_lock. 1562 */ 1563 struct iucv_path_severed { 1564 u16 ippathid; 1565 u8 res1; 1566 u8 iptype; 1567 u32 res2; 1568 u8 res3[8]; 1569 u8 ipuser[16]; 1570 u32 res4; 1571 u8 ippollfg; 1572 u8 res5[3]; 1573 } __packed; 1574 1575 static void iucv_path_severed(struct iucv_irq_data *data) 1576 { 1577 struct iucv_path_severed *ips = (void *) data; 1578 struct iucv_path *path = iucv_path_table[ips->ippathid]; 1579 1580 if (!path || !path->handler) /* Already severed */ 1581 return; 1582 if (path->handler->path_severed) 1583 path->handler->path_severed(path, ips->ipuser); 1584 else { 1585 iucv_sever_pathid(path->pathid, NULL); 1586 iucv_path_table[path->pathid] = NULL; 1587 list_del(&path->list); 1588 iucv_path_free(path); 1589 } 1590 } 1591 1592 /** 1593 * iucv_path_quiesced 1594 * @data: Pointer to external interrupt buffer 1595 * 1596 * Process connection quiesced work item. Called from tasklet while holding 1597 * iucv_table_lock. 1598 */ 1599 struct iucv_path_quiesced { 1600 u16 ippathid; 1601 u8 res1; 1602 u8 iptype; 1603 u32 res2; 1604 u8 res3[8]; 1605 u8 ipuser[16]; 1606 u32 res4; 1607 u8 ippollfg; 1608 u8 res5[3]; 1609 } __packed; 1610 1611 static void iucv_path_quiesced(struct iucv_irq_data *data) 1612 { 1613 struct iucv_path_quiesced *ipq = (void *) data; 1614 struct iucv_path *path = iucv_path_table[ipq->ippathid]; 1615 1616 if (path && path->handler && path->handler->path_quiesced) 1617 path->handler->path_quiesced(path, ipq->ipuser); 1618 } 1619 1620 /** 1621 * iucv_path_resumed 1622 * @data: Pointer to external interrupt buffer 1623 * 1624 * Process connection resumed work item. Called from tasklet while holding 1625 * iucv_table_lock. 1626 */ 1627 struct iucv_path_resumed { 1628 u16 ippathid; 1629 u8 res1; 1630 u8 iptype; 1631 u32 res2; 1632 u8 res3[8]; 1633 u8 ipuser[16]; 1634 u32 res4; 1635 u8 ippollfg; 1636 u8 res5[3]; 1637 } __packed; 1638 1639 static void iucv_path_resumed(struct iucv_irq_data *data) 1640 { 1641 struct iucv_path_resumed *ipr = (void *) data; 1642 struct iucv_path *path = iucv_path_table[ipr->ippathid]; 1643 1644 if (path && path->handler && path->handler->path_resumed) 1645 path->handler->path_resumed(path, ipr->ipuser); 1646 } 1647 1648 /** 1649 * iucv_message_complete 1650 * @data: Pointer to external interrupt buffer 1651 * 1652 * Process message complete work item. Called from tasklet while holding 1653 * iucv_table_lock. 1654 */ 1655 struct iucv_message_complete { 1656 u16 ippathid; 1657 u8 ipflags1; 1658 u8 iptype; 1659 u32 ipmsgid; 1660 u32 ipaudit; 1661 u8 iprmmsg[8]; 1662 u32 ipsrccls; 1663 u32 ipmsgtag; 1664 u32 res; 1665 u32 ipbfln2f; 1666 u8 ippollfg; 1667 u8 res2[3]; 1668 } __packed; 1669 1670 static void iucv_message_complete(struct iucv_irq_data *data) 1671 { 1672 struct iucv_message_complete *imc = (void *) data; 1673 struct iucv_path *path = iucv_path_table[imc->ippathid]; 1674 struct iucv_message msg; 1675 1676 if (path && path->handler && path->handler->message_complete) { 1677 msg.flags = imc->ipflags1; 1678 msg.id = imc->ipmsgid; 1679 msg.audit = imc->ipaudit; 1680 memcpy(msg.rmmsg, imc->iprmmsg, 8); 1681 msg.class = imc->ipsrccls; 1682 msg.tag = imc->ipmsgtag; 1683 msg.length = imc->ipbfln2f; 1684 path->handler->message_complete(path, &msg); 1685 } 1686 } 1687 1688 /** 1689 * iucv_message_pending 1690 * @data: Pointer to external interrupt buffer 1691 * 1692 * Process message pending work item. Called from tasklet while holding 1693 * iucv_table_lock. 1694 */ 1695 struct iucv_message_pending { 1696 u16 ippathid; 1697 u8 ipflags1; 1698 u8 iptype; 1699 u32 ipmsgid; 1700 u32 iptrgcls; 1701 union { 1702 u32 iprmmsg1_u32; 1703 u8 iprmmsg1[4]; 1704 } ln1msg1; 1705 union { 1706 u32 ipbfln1f; 1707 u8 iprmmsg2[4]; 1708 } ln1msg2; 1709 u32 res1[3]; 1710 u32 ipbfln2f; 1711 u8 ippollfg; 1712 u8 res2[3]; 1713 } __packed; 1714 1715 static void iucv_message_pending(struct iucv_irq_data *data) 1716 { 1717 struct iucv_message_pending *imp = (void *) data; 1718 struct iucv_path *path = iucv_path_table[imp->ippathid]; 1719 struct iucv_message msg; 1720 1721 if (path && path->handler && path->handler->message_pending) { 1722 msg.flags = imp->ipflags1; 1723 msg.id = imp->ipmsgid; 1724 msg.class = imp->iptrgcls; 1725 if (imp->ipflags1 & IUCV_IPRMDATA) { 1726 memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); 1727 msg.length = 8; 1728 } else 1729 msg.length = imp->ln1msg2.ipbfln1f; 1730 msg.reply_size = imp->ipbfln2f; 1731 path->handler->message_pending(path, &msg); 1732 } 1733 } 1734 1735 /** 1736 * iucv_tasklet_fn: 1737 * 1738 * This tasklet loops over the queue of irq buffers created by 1739 * iucv_external_interrupt, calls the appropriate action handler 1740 * and then frees the buffer. 1741 */ 1742 static void iucv_tasklet_fn(unsigned long ignored) 1743 { 1744 typedef void iucv_irq_fn(struct iucv_irq_data *); 1745 static iucv_irq_fn *irq_fn[] = { 1746 [0x02] = iucv_path_complete, 1747 [0x03] = iucv_path_severed, 1748 [0x04] = iucv_path_quiesced, 1749 [0x05] = iucv_path_resumed, 1750 [0x06] = iucv_message_complete, 1751 [0x07] = iucv_message_complete, 1752 [0x08] = iucv_message_pending, 1753 [0x09] = iucv_message_pending, 1754 }; 1755 LIST_HEAD(task_queue); 1756 struct iucv_irq_list *p, *n; 1757 1758 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ 1759 if (!spin_trylock(&iucv_table_lock)) { 1760 tasklet_schedule(&iucv_tasklet); 1761 return; 1762 } 1763 iucv_active_cpu = smp_processor_id(); 1764 1765 spin_lock_irq(&iucv_queue_lock); 1766 list_splice_init(&iucv_task_queue, &task_queue); 1767 spin_unlock_irq(&iucv_queue_lock); 1768 1769 list_for_each_entry_safe(p, n, &task_queue, list) { 1770 list_del_init(&p->list); 1771 irq_fn[p->data.iptype](&p->data); 1772 kfree(p); 1773 } 1774 1775 iucv_active_cpu = -1; 1776 spin_unlock(&iucv_table_lock); 1777 } 1778 1779 /** 1780 * iucv_work_fn: 1781 * 1782 * This work function loops over the queue of path pending irq blocks 1783 * created by iucv_external_interrupt, calls the appropriate action 1784 * handler and then frees the buffer. 1785 */ 1786 static void iucv_work_fn(struct work_struct *work) 1787 { 1788 LIST_HEAD(work_queue); 1789 struct iucv_irq_list *p, *n; 1790 1791 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ 1792 spin_lock_bh(&iucv_table_lock); 1793 iucv_active_cpu = smp_processor_id(); 1794 1795 spin_lock_irq(&iucv_queue_lock); 1796 list_splice_init(&iucv_work_queue, &work_queue); 1797 spin_unlock_irq(&iucv_queue_lock); 1798 1799 iucv_cleanup_queue(); 1800 list_for_each_entry_safe(p, n, &work_queue, list) { 1801 list_del_init(&p->list); 1802 iucv_path_pending(&p->data); 1803 kfree(p); 1804 } 1805 1806 iucv_active_cpu = -1; 1807 spin_unlock_bh(&iucv_table_lock); 1808 } 1809 1810 /** 1811 * iucv_external_interrupt 1812 * @code: irq code 1813 * 1814 * Handles external interrupts coming in from CP. 1815 * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). 1816 */ 1817 static void iucv_external_interrupt(struct ext_code ext_code, 1818 unsigned int param32, unsigned long param64) 1819 { 1820 struct iucv_irq_data *p; 1821 struct iucv_irq_list *work; 1822 1823 inc_irq_stat(IRQEXT_IUC); 1824 p = iucv_irq_data[smp_processor_id()]; 1825 if (p->ippathid >= iucv_max_pathid) { 1826 WARN_ON(p->ippathid >= iucv_max_pathid); 1827 iucv_sever_pathid(p->ippathid, iucv_error_no_listener); 1828 return; 1829 } 1830 BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); 1831 work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); 1832 if (!work) { 1833 pr_warn("iucv_external_interrupt: out of memory\n"); 1834 return; 1835 } 1836 memcpy(&work->data, p, sizeof(work->data)); 1837 spin_lock(&iucv_queue_lock); 1838 if (p->iptype == 0x01) { 1839 /* Path pending interrupt. */ 1840 list_add_tail(&work->list, &iucv_work_queue); 1841 schedule_work(&iucv_work); 1842 } else { 1843 /* The other interrupts. */ 1844 list_add_tail(&work->list, &iucv_task_queue); 1845 tasklet_schedule(&iucv_tasklet); 1846 } 1847 spin_unlock(&iucv_queue_lock); 1848 } 1849 1850 static int iucv_pm_prepare(struct device *dev) 1851 { 1852 int rc = 0; 1853 1854 #ifdef CONFIG_PM_DEBUG 1855 printk(KERN_INFO "iucv_pm_prepare\n"); 1856 #endif 1857 if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) 1858 rc = dev->driver->pm->prepare(dev); 1859 return rc; 1860 } 1861 1862 static void iucv_pm_complete(struct device *dev) 1863 { 1864 #ifdef CONFIG_PM_DEBUG 1865 printk(KERN_INFO "iucv_pm_complete\n"); 1866 #endif 1867 if (dev->driver && dev->driver->pm && dev->driver->pm->complete) 1868 dev->driver->pm->complete(dev); 1869 } 1870 1871 /** 1872 * iucv_path_table_empty() - determine if iucv path table is empty 1873 * 1874 * Returns 0 if there are still iucv pathes defined 1875 * 1 if there are no iucv pathes defined 1876 */ 1877 static int iucv_path_table_empty(void) 1878 { 1879 int i; 1880 1881 for (i = 0; i < iucv_max_pathid; i++) { 1882 if (iucv_path_table[i]) 1883 return 0; 1884 } 1885 return 1; 1886 } 1887 1888 /** 1889 * iucv_pm_freeze() - Freeze PM callback 1890 * @dev: iucv-based device 1891 * 1892 * disable iucv interrupts 1893 * invoke callback function of the iucv-based driver 1894 * shut down iucv, if no iucv-pathes are established anymore 1895 */ 1896 static int iucv_pm_freeze(struct device *dev) 1897 { 1898 int cpu; 1899 struct iucv_irq_list *p, *n; 1900 int rc = 0; 1901 1902 #ifdef CONFIG_PM_DEBUG 1903 printk(KERN_WARNING "iucv_pm_freeze\n"); 1904 #endif 1905 if (iucv_pm_state != IUCV_PM_FREEZING) { 1906 for_each_cpu(cpu, &iucv_irq_cpumask) 1907 smp_call_function_single(cpu, iucv_block_cpu_almost, 1908 NULL, 1); 1909 cancel_work_sync(&iucv_work); 1910 list_for_each_entry_safe(p, n, &iucv_work_queue, list) { 1911 list_del_init(&p->list); 1912 iucv_sever_pathid(p->data.ippathid, 1913 iucv_error_no_listener); 1914 kfree(p); 1915 } 1916 } 1917 iucv_pm_state = IUCV_PM_FREEZING; 1918 if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) 1919 rc = dev->driver->pm->freeze(dev); 1920 if (iucv_path_table_empty()) 1921 iucv_disable(); 1922 return rc; 1923 } 1924 1925 /** 1926 * iucv_pm_thaw() - Thaw PM callback 1927 * @dev: iucv-based device 1928 * 1929 * make iucv ready for use again: allocate path table, declare interrupt buffers 1930 * and enable iucv interrupts 1931 * invoke callback function of the iucv-based driver 1932 */ 1933 static int iucv_pm_thaw(struct device *dev) 1934 { 1935 int rc = 0; 1936 1937 #ifdef CONFIG_PM_DEBUG 1938 printk(KERN_WARNING "iucv_pm_thaw\n"); 1939 #endif 1940 iucv_pm_state = IUCV_PM_THAWING; 1941 if (!iucv_path_table) { 1942 rc = iucv_enable(); 1943 if (rc) 1944 goto out; 1945 } 1946 if (cpumask_empty(&iucv_irq_cpumask)) { 1947 if (iucv_nonsmp_handler) 1948 /* enable interrupts on one cpu */ 1949 iucv_allow_cpu(NULL); 1950 else 1951 /* enable interrupts on all cpus */ 1952 iucv_setmask_mp(); 1953 } 1954 if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) 1955 rc = dev->driver->pm->thaw(dev); 1956 out: 1957 return rc; 1958 } 1959 1960 /** 1961 * iucv_pm_restore() - Restore PM callback 1962 * @dev: iucv-based device 1963 * 1964 * make iucv ready for use again: allocate path table, declare interrupt buffers 1965 * and enable iucv interrupts 1966 * invoke callback function of the iucv-based driver 1967 */ 1968 static int iucv_pm_restore(struct device *dev) 1969 { 1970 int rc = 0; 1971 1972 #ifdef CONFIG_PM_DEBUG 1973 printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); 1974 #endif 1975 if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) 1976 pr_warn("Suspending Linux did not completely close all IUCV connections\n"); 1977 iucv_pm_state = IUCV_PM_RESTORING; 1978 if (cpumask_empty(&iucv_irq_cpumask)) { 1979 rc = iucv_query_maxconn(); 1980 rc = iucv_enable(); 1981 if (rc) 1982 goto out; 1983 } 1984 if (dev->driver && dev->driver->pm && dev->driver->pm->restore) 1985 rc = dev->driver->pm->restore(dev); 1986 out: 1987 return rc; 1988 } 1989 1990 struct iucv_interface iucv_if = { 1991 .message_receive = iucv_message_receive, 1992 .__message_receive = __iucv_message_receive, 1993 .message_reply = iucv_message_reply, 1994 .message_reject = iucv_message_reject, 1995 .message_send = iucv_message_send, 1996 .__message_send = __iucv_message_send, 1997 .message_send2way = iucv_message_send2way, 1998 .message_purge = iucv_message_purge, 1999 .path_accept = iucv_path_accept, 2000 .path_connect = iucv_path_connect, 2001 .path_quiesce = iucv_path_quiesce, 2002 .path_resume = iucv_path_resume, 2003 .path_sever = iucv_path_sever, 2004 .iucv_register = iucv_register, 2005 .iucv_unregister = iucv_unregister, 2006 .bus = NULL, 2007 .root = NULL, 2008 }; 2009 EXPORT_SYMBOL(iucv_if); 2010 2011 static enum cpuhp_state iucv_online; 2012 /** 2013 * iucv_init 2014 * 2015 * Allocates and initializes various data structures. 2016 */ 2017 static int __init iucv_init(void) 2018 { 2019 int rc; 2020 2021 if (!MACHINE_IS_VM) { 2022 rc = -EPROTONOSUPPORT; 2023 goto out; 2024 } 2025 ctl_set_bit(0, 1); 2026 rc = iucv_query_maxconn(); 2027 if (rc) 2028 goto out_ctl; 2029 rc = register_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); 2030 if (rc) 2031 goto out_ctl; 2032 iucv_root = root_device_register("iucv"); 2033 if (IS_ERR(iucv_root)) { 2034 rc = PTR_ERR(iucv_root); 2035 goto out_int; 2036 } 2037 2038 rc = cpuhp_setup_state(CPUHP_NET_IUCV_PREPARE, "net/iucv:prepare", 2039 iucv_cpu_prepare, iucv_cpu_dead); 2040 if (rc) 2041 goto out_dev; 2042 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "net/iucv:online", 2043 iucv_cpu_online, iucv_cpu_down_prep); 2044 if (rc < 0) 2045 goto out_prep; 2046 iucv_online = rc; 2047 2048 rc = register_reboot_notifier(&iucv_reboot_notifier); 2049 if (rc) 2050 goto out_remove_hp; 2051 ASCEBC(iucv_error_no_listener, 16); 2052 ASCEBC(iucv_error_no_memory, 16); 2053 ASCEBC(iucv_error_pathid, 16); 2054 iucv_available = 1; 2055 rc = bus_register(&iucv_bus); 2056 if (rc) 2057 goto out_reboot; 2058 iucv_if.root = iucv_root; 2059 iucv_if.bus = &iucv_bus; 2060 return 0; 2061 2062 out_reboot: 2063 unregister_reboot_notifier(&iucv_reboot_notifier); 2064 out_remove_hp: 2065 cpuhp_remove_state(iucv_online); 2066 out_prep: 2067 cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE); 2068 out_dev: 2069 root_device_unregister(iucv_root); 2070 out_int: 2071 unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); 2072 out_ctl: 2073 ctl_clear_bit(0, 1); 2074 out: 2075 return rc; 2076 } 2077 2078 /** 2079 * iucv_exit 2080 * 2081 * Frees everything allocated from iucv_init. 2082 */ 2083 static void __exit iucv_exit(void) 2084 { 2085 struct iucv_irq_list *p, *n; 2086 2087 spin_lock_irq(&iucv_queue_lock); 2088 list_for_each_entry_safe(p, n, &iucv_task_queue, list) 2089 kfree(p); 2090 list_for_each_entry_safe(p, n, &iucv_work_queue, list) 2091 kfree(p); 2092 spin_unlock_irq(&iucv_queue_lock); 2093 unregister_reboot_notifier(&iucv_reboot_notifier); 2094 2095 cpuhp_remove_state_nocalls(iucv_online); 2096 cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE); 2097 root_device_unregister(iucv_root); 2098 bus_unregister(&iucv_bus); 2099 unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); 2100 } 2101 2102 subsys_initcall(iucv_init); 2103 module_exit(iucv_exit); 2104 2105 MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); 2106 MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); 2107 MODULE_LICENSE("GPL"); 2108