1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IUCV base infrastructure. 4 * 5 * Copyright IBM Corp. 2001, 2009 6 * 7 * Author(s): 8 * Original source: 9 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 10 * Xenia Tkatschow (xenia@us.ibm.com) 11 * 2Gb awareness and general cleanup: 12 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) 13 * Rewritten for af_iucv: 14 * Martin Schwidefsky <schwidefsky@de.ibm.com> 15 * PM functions: 16 * Ursula Braun (ursula.braun@de.ibm.com) 17 * 18 * Documentation used: 19 * The original source 20 * CP Programming Service, IBM document # SC24-5760 21 */ 22 23 #define KMSG_COMPONENT "iucv" 24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 25 26 #include <linux/kernel_stat.h> 27 #include <linux/module.h> 28 #include <linux/moduleparam.h> 29 #include <linux/spinlock.h> 30 #include <linux/kernel.h> 31 #include <linux/slab.h> 32 #include <linux/init.h> 33 #include <linux/interrupt.h> 34 #include <linux/list.h> 35 #include <linux/errno.h> 36 #include <linux/err.h> 37 #include <linux/device.h> 38 #include <linux/cpu.h> 39 #include <linux/reboot.h> 40 #include <net/iucv/iucv.h> 41 #include <linux/atomic.h> 42 #include <asm/ebcdic.h> 43 #include <asm/io.h> 44 #include <asm/irq.h> 45 #include <asm/smp.h> 46 47 /* 48 * FLAGS: 49 * All flags are defined in the field IPFLAGS1 of each function 50 * and can be found in CP Programming Services. 51 * IPSRCCLS - Indicates you have specified a source class. 52 * IPTRGCLS - Indicates you have specified a target class. 53 * IPFGPID - Indicates you have specified a pathid. 54 * IPFGMID - Indicates you have specified a message ID. 55 * IPNORPY - Indicates a one-way message. No reply expected. 56 * IPALL - Indicates that all paths are affected. 57 */ 58 #define IUCV_IPSRCCLS 0x01 59 #define IUCV_IPTRGCLS 0x01 60 #define IUCV_IPFGPID 0x02 61 #define IUCV_IPFGMID 0x04 62 #define IUCV_IPNORPY 0x10 63 #define IUCV_IPALL 0x80 64 65 static int iucv_bus_match(struct device *dev, struct device_driver *drv) 66 { 67 return 0; 68 } 69 70 enum iucv_pm_states { 71 IUCV_PM_INITIAL = 0, 72 IUCV_PM_FREEZING = 1, 73 IUCV_PM_THAWING = 2, 74 IUCV_PM_RESTORING = 3, 75 }; 76 static enum iucv_pm_states iucv_pm_state; 77 78 static int iucv_pm_prepare(struct device *); 79 static void iucv_pm_complete(struct device *); 80 static int iucv_pm_freeze(struct device *); 81 static int iucv_pm_thaw(struct device *); 82 static int iucv_pm_restore(struct device *); 83 84 static const struct dev_pm_ops iucv_pm_ops = { 85 .prepare = iucv_pm_prepare, 86 .complete = iucv_pm_complete, 87 .freeze = iucv_pm_freeze, 88 .thaw = iucv_pm_thaw, 89 .restore = iucv_pm_restore, 90 }; 91 92 struct bus_type iucv_bus = { 93 .name = "iucv", 94 .match = iucv_bus_match, 95 .pm = &iucv_pm_ops, 96 }; 97 EXPORT_SYMBOL(iucv_bus); 98 99 struct device *iucv_root; 100 EXPORT_SYMBOL(iucv_root); 101 102 static int iucv_available; 103 104 /* General IUCV interrupt structure */ 105 struct iucv_irq_data { 106 u16 ippathid; 107 u8 ipflags1; 108 u8 iptype; 109 u32 res2[8]; 110 }; 111 112 struct iucv_irq_list { 113 struct list_head list; 114 struct iucv_irq_data data; 115 }; 116 117 static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; 118 static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE }; 119 static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE }; 120 121 /* 122 * Queue of interrupt buffers lock for delivery via the tasklet 123 * (fast but can't call smp_call_function). 124 */ 125 static LIST_HEAD(iucv_task_queue); 126 127 /* 128 * The tasklet for fast delivery of iucv interrupts. 129 */ 130 static void iucv_tasklet_fn(unsigned long); 131 static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); 132 133 /* 134 * Queue of interrupt buffers for delivery via a work queue 135 * (slower but can call smp_call_function). 136 */ 137 static LIST_HEAD(iucv_work_queue); 138 139 /* 140 * The work element to deliver path pending interrupts. 141 */ 142 static void iucv_work_fn(struct work_struct *work); 143 static DECLARE_WORK(iucv_work, iucv_work_fn); 144 145 /* 146 * Spinlock protecting task and work queue. 147 */ 148 static DEFINE_SPINLOCK(iucv_queue_lock); 149 150 enum iucv_command_codes { 151 IUCV_QUERY = 0, 152 IUCV_RETRIEVE_BUFFER = 2, 153 IUCV_SEND = 4, 154 IUCV_RECEIVE = 5, 155 IUCV_REPLY = 6, 156 IUCV_REJECT = 8, 157 IUCV_PURGE = 9, 158 IUCV_ACCEPT = 10, 159 IUCV_CONNECT = 11, 160 IUCV_DECLARE_BUFFER = 12, 161 IUCV_QUIESCE = 13, 162 IUCV_RESUME = 14, 163 IUCV_SEVER = 15, 164 IUCV_SETMASK = 16, 165 IUCV_SETCONTROLMASK = 17, 166 }; 167 168 /* 169 * Error messages that are used with the iucv_sever function. They get 170 * converted to EBCDIC. 171 */ 172 static char iucv_error_no_listener[16] = "NO LISTENER"; 173 static char iucv_error_no_memory[16] = "NO MEMORY"; 174 static char iucv_error_pathid[16] = "INVALID PATHID"; 175 176 /* 177 * iucv_handler_list: List of registered handlers. 178 */ 179 static LIST_HEAD(iucv_handler_list); 180 181 /* 182 * iucv_path_table: an array of iucv_path structures. 183 */ 184 static struct iucv_path **iucv_path_table; 185 static unsigned long iucv_max_pathid; 186 187 /* 188 * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table 189 */ 190 static DEFINE_SPINLOCK(iucv_table_lock); 191 192 /* 193 * iucv_active_cpu: contains the number of the cpu executing the tasklet 194 * or the work handler. Needed for iucv_path_sever called from tasklet. 195 */ 196 static int iucv_active_cpu = -1; 197 198 /* 199 * Mutex and wait queue for iucv_register/iucv_unregister. 200 */ 201 static DEFINE_MUTEX(iucv_register_mutex); 202 203 /* 204 * Counter for number of non-smp capable handlers. 205 */ 206 static int iucv_nonsmp_handler; 207 208 /* 209 * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, 210 * iucv_path_quiesce and iucv_path_sever. 211 */ 212 struct iucv_cmd_control { 213 u16 ippathid; 214 u8 ipflags1; 215 u8 iprcode; 216 u16 ipmsglim; 217 u16 res1; 218 u8 ipvmid[8]; 219 u8 ipuser[16]; 220 u8 iptarget[8]; 221 } __attribute__ ((packed,aligned(8))); 222 223 /* 224 * Data in parameter list iucv structure. Used by iucv_message_send, 225 * iucv_message_send2way and iucv_message_reply. 226 */ 227 struct iucv_cmd_dpl { 228 u16 ippathid; 229 u8 ipflags1; 230 u8 iprcode; 231 u32 ipmsgid; 232 u32 iptrgcls; 233 u8 iprmmsg[8]; 234 u32 ipsrccls; 235 u32 ipmsgtag; 236 u32 ipbfadr2; 237 u32 ipbfln2f; 238 u32 res; 239 } __attribute__ ((packed,aligned(8))); 240 241 /* 242 * Data in buffer iucv structure. Used by iucv_message_receive, 243 * iucv_message_reject, iucv_message_send, iucv_message_send2way 244 * and iucv_declare_cpu. 245 */ 246 struct iucv_cmd_db { 247 u16 ippathid; 248 u8 ipflags1; 249 u8 iprcode; 250 u32 ipmsgid; 251 u32 iptrgcls; 252 u32 ipbfadr1; 253 u32 ipbfln1f; 254 u32 ipsrccls; 255 u32 ipmsgtag; 256 u32 ipbfadr2; 257 u32 ipbfln2f; 258 u32 res; 259 } __attribute__ ((packed,aligned(8))); 260 261 /* 262 * Purge message iucv structure. Used by iucv_message_purge. 263 */ 264 struct iucv_cmd_purge { 265 u16 ippathid; 266 u8 ipflags1; 267 u8 iprcode; 268 u32 ipmsgid; 269 u8 ipaudit[3]; 270 u8 res1[5]; 271 u32 res2; 272 u32 ipsrccls; 273 u32 ipmsgtag; 274 u32 res3[3]; 275 } __attribute__ ((packed,aligned(8))); 276 277 /* 278 * Set mask iucv structure. Used by iucv_enable_cpu. 279 */ 280 struct iucv_cmd_set_mask { 281 u8 ipmask; 282 u8 res1[2]; 283 u8 iprcode; 284 u32 res2[9]; 285 } __attribute__ ((packed,aligned(8))); 286 287 union iucv_param { 288 struct iucv_cmd_control ctrl; 289 struct iucv_cmd_dpl dpl; 290 struct iucv_cmd_db db; 291 struct iucv_cmd_purge purge; 292 struct iucv_cmd_set_mask set_mask; 293 }; 294 295 /* 296 * Anchor for per-cpu IUCV command parameter block. 297 */ 298 static union iucv_param *iucv_param[NR_CPUS]; 299 static union iucv_param *iucv_param_irq[NR_CPUS]; 300 301 /** 302 * iucv_call_b2f0 303 * @code: identifier of IUCV call to CP. 304 * @parm: pointer to a struct iucv_parm block 305 * 306 * Calls CP to execute IUCV commands. 307 * 308 * Returns the result of the CP IUCV call. 309 */ 310 static inline int __iucv_call_b2f0(int command, union iucv_param *parm) 311 { 312 register unsigned long reg0 asm ("0"); 313 register unsigned long reg1 asm ("1"); 314 int ccode; 315 316 reg0 = command; 317 reg1 = (unsigned long)parm; 318 asm volatile( 319 " .long 0xb2f01000\n" 320 " ipm %0\n" 321 " srl %0,28\n" 322 : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) 323 : "m" (*parm) : "cc"); 324 return ccode; 325 } 326 327 static inline int iucv_call_b2f0(int command, union iucv_param *parm) 328 { 329 int ccode; 330 331 ccode = __iucv_call_b2f0(command, parm); 332 return ccode == 1 ? parm->ctrl.iprcode : ccode; 333 } 334 335 /** 336 * iucv_query_maxconn 337 * 338 * Determines the maximum number of connections that may be established. 339 * 340 * Returns the maximum number of connections or -EPERM is IUCV is not 341 * available. 342 */ 343 static int __iucv_query_maxconn(void *param, unsigned long *max_pathid) 344 { 345 register unsigned long reg0 asm ("0"); 346 register unsigned long reg1 asm ("1"); 347 int ccode; 348 349 reg0 = IUCV_QUERY; 350 reg1 = (unsigned long) param; 351 asm volatile ( 352 " .long 0xb2f01000\n" 353 " ipm %0\n" 354 " srl %0,28\n" 355 : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); 356 *max_pathid = reg1; 357 return ccode; 358 } 359 360 static int iucv_query_maxconn(void) 361 { 362 unsigned long max_pathid; 363 void *param; 364 int ccode; 365 366 param = kzalloc(sizeof(union iucv_param), GFP_KERNEL | GFP_DMA); 367 if (!param) 368 return -ENOMEM; 369 ccode = __iucv_query_maxconn(param, &max_pathid); 370 if (ccode == 0) 371 iucv_max_pathid = max_pathid; 372 kfree(param); 373 return ccode ? -EPERM : 0; 374 } 375 376 /** 377 * iucv_allow_cpu 378 * @data: unused 379 * 380 * Allow iucv interrupts on this cpu. 381 */ 382 static void iucv_allow_cpu(void *data) 383 { 384 int cpu = smp_processor_id(); 385 union iucv_param *parm; 386 387 /* 388 * Enable all iucv interrupts. 389 * ipmask contains bits for the different interrupts 390 * 0x80 - Flag to allow nonpriority message pending interrupts 391 * 0x40 - Flag to allow priority message pending interrupts 392 * 0x20 - Flag to allow nonpriority message completion interrupts 393 * 0x10 - Flag to allow priority message completion interrupts 394 * 0x08 - Flag to allow IUCV control interrupts 395 */ 396 parm = iucv_param_irq[cpu]; 397 memset(parm, 0, sizeof(union iucv_param)); 398 parm->set_mask.ipmask = 0xf8; 399 iucv_call_b2f0(IUCV_SETMASK, parm); 400 401 /* 402 * Enable all iucv control interrupts. 403 * ipmask contains bits for the different interrupts 404 * 0x80 - Flag to allow pending connections interrupts 405 * 0x40 - Flag to allow connection complete interrupts 406 * 0x20 - Flag to allow connection severed interrupts 407 * 0x10 - Flag to allow connection quiesced interrupts 408 * 0x08 - Flag to allow connection resumed interrupts 409 */ 410 memset(parm, 0, sizeof(union iucv_param)); 411 parm->set_mask.ipmask = 0xf8; 412 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 413 /* Set indication that iucv interrupts are allowed for this cpu. */ 414 cpumask_set_cpu(cpu, &iucv_irq_cpumask); 415 } 416 417 /** 418 * iucv_block_cpu 419 * @data: unused 420 * 421 * Block iucv interrupts on this cpu. 422 */ 423 static void iucv_block_cpu(void *data) 424 { 425 int cpu = smp_processor_id(); 426 union iucv_param *parm; 427 428 /* Disable all iucv interrupts. */ 429 parm = iucv_param_irq[cpu]; 430 memset(parm, 0, sizeof(union iucv_param)); 431 iucv_call_b2f0(IUCV_SETMASK, parm); 432 433 /* Clear indication that iucv interrupts are allowed for this cpu. */ 434 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); 435 } 436 437 /** 438 * iucv_block_cpu_almost 439 * @data: unused 440 * 441 * Allow connection-severed interrupts only on this cpu. 442 */ 443 static void iucv_block_cpu_almost(void *data) 444 { 445 int cpu = smp_processor_id(); 446 union iucv_param *parm; 447 448 /* Allow iucv control interrupts only */ 449 parm = iucv_param_irq[cpu]; 450 memset(parm, 0, sizeof(union iucv_param)); 451 parm->set_mask.ipmask = 0x08; 452 iucv_call_b2f0(IUCV_SETMASK, parm); 453 /* Allow iucv-severed interrupt only */ 454 memset(parm, 0, sizeof(union iucv_param)); 455 parm->set_mask.ipmask = 0x20; 456 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 457 458 /* Clear indication that iucv interrupts are allowed for this cpu. */ 459 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); 460 } 461 462 /** 463 * iucv_declare_cpu 464 * @data: unused 465 * 466 * Declare a interrupt buffer on this cpu. 467 */ 468 static void iucv_declare_cpu(void *data) 469 { 470 int cpu = smp_processor_id(); 471 union iucv_param *parm; 472 int rc; 473 474 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) 475 return; 476 477 /* Declare interrupt buffer. */ 478 parm = iucv_param_irq[cpu]; 479 memset(parm, 0, sizeof(union iucv_param)); 480 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); 481 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); 482 if (rc) { 483 char *err = "Unknown"; 484 switch (rc) { 485 case 0x03: 486 err = "Directory error"; 487 break; 488 case 0x0a: 489 err = "Invalid length"; 490 break; 491 case 0x13: 492 err = "Buffer already exists"; 493 break; 494 case 0x3e: 495 err = "Buffer overlap"; 496 break; 497 case 0x5c: 498 err = "Paging or storage error"; 499 break; 500 } 501 pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n", 502 cpu, rc, err); 503 return; 504 } 505 506 /* Set indication that an iucv buffer exists for this cpu. */ 507 cpumask_set_cpu(cpu, &iucv_buffer_cpumask); 508 509 if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask)) 510 /* Enable iucv interrupts on this cpu. */ 511 iucv_allow_cpu(NULL); 512 else 513 /* Disable iucv interrupts on this cpu. */ 514 iucv_block_cpu(NULL); 515 } 516 517 /** 518 * iucv_retrieve_cpu 519 * @data: unused 520 * 521 * Retrieve interrupt buffer on this cpu. 522 */ 523 static void iucv_retrieve_cpu(void *data) 524 { 525 int cpu = smp_processor_id(); 526 union iucv_param *parm; 527 528 if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) 529 return; 530 531 /* Block iucv interrupts. */ 532 iucv_block_cpu(NULL); 533 534 /* Retrieve interrupt buffer. */ 535 parm = iucv_param_irq[cpu]; 536 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); 537 538 /* Clear indication that an iucv buffer exists for this cpu. */ 539 cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); 540 } 541 542 /** 543 * iucv_setmask_smp 544 * 545 * Allow iucv interrupts on all cpus. 546 */ 547 static void iucv_setmask_mp(void) 548 { 549 int cpu; 550 551 get_online_cpus(); 552 for_each_online_cpu(cpu) 553 /* Enable all cpus with a declared buffer. */ 554 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) && 555 !cpumask_test_cpu(cpu, &iucv_irq_cpumask)) 556 smp_call_function_single(cpu, iucv_allow_cpu, 557 NULL, 1); 558 put_online_cpus(); 559 } 560 561 /** 562 * iucv_setmask_up 563 * 564 * Allow iucv interrupts on a single cpu. 565 */ 566 static void iucv_setmask_up(void) 567 { 568 cpumask_t cpumask; 569 int cpu; 570 571 /* Disable all cpu but the first in cpu_irq_cpumask. */ 572 cpumask_copy(&cpumask, &iucv_irq_cpumask); 573 cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask); 574 for_each_cpu(cpu, &cpumask) 575 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); 576 } 577 578 /** 579 * iucv_enable 580 * 581 * This function makes iucv ready for use. It allocates the pathid 582 * table, declares an iucv interrupt buffer and enables the iucv 583 * interrupts. Called when the first user has registered an iucv 584 * handler. 585 */ 586 static int iucv_enable(void) 587 { 588 size_t alloc_size; 589 int cpu, rc; 590 591 get_online_cpus(); 592 rc = -ENOMEM; 593 alloc_size = iucv_max_pathid * sizeof(struct iucv_path); 594 iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); 595 if (!iucv_path_table) 596 goto out; 597 /* Declare per cpu buffers. */ 598 rc = -EIO; 599 for_each_online_cpu(cpu) 600 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 601 if (cpumask_empty(&iucv_buffer_cpumask)) 602 /* No cpu could declare an iucv buffer. */ 603 goto out; 604 put_online_cpus(); 605 return 0; 606 out: 607 kfree(iucv_path_table); 608 iucv_path_table = NULL; 609 put_online_cpus(); 610 return rc; 611 } 612 613 /** 614 * iucv_disable 615 * 616 * This function shuts down iucv. It disables iucv interrupts, retrieves 617 * the iucv interrupt buffer and frees the pathid table. Called after the 618 * last user unregister its iucv handler. 619 */ 620 static void iucv_disable(void) 621 { 622 get_online_cpus(); 623 on_each_cpu(iucv_retrieve_cpu, NULL, 1); 624 kfree(iucv_path_table); 625 iucv_path_table = NULL; 626 put_online_cpus(); 627 } 628 629 static int iucv_cpu_dead(unsigned int cpu) 630 { 631 kfree(iucv_param_irq[cpu]); 632 iucv_param_irq[cpu] = NULL; 633 kfree(iucv_param[cpu]); 634 iucv_param[cpu] = NULL; 635 kfree(iucv_irq_data[cpu]); 636 iucv_irq_data[cpu] = NULL; 637 return 0; 638 } 639 640 static int iucv_cpu_prepare(unsigned int cpu) 641 { 642 /* Note: GFP_DMA used to get memory below 2G */ 643 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), 644 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 645 if (!iucv_irq_data[cpu]) 646 goto out_free; 647 648 /* Allocate parameter blocks. */ 649 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), 650 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 651 if (!iucv_param[cpu]) 652 goto out_free; 653 654 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), 655 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 656 if (!iucv_param_irq[cpu]) 657 goto out_free; 658 659 return 0; 660 661 out_free: 662 iucv_cpu_dead(cpu); 663 return -ENOMEM; 664 } 665 666 static int iucv_cpu_online(unsigned int cpu) 667 { 668 if (!iucv_path_table) 669 return 0; 670 iucv_declare_cpu(NULL); 671 return 0; 672 } 673 674 static int iucv_cpu_down_prep(unsigned int cpu) 675 { 676 cpumask_t cpumask; 677 678 if (!iucv_path_table) 679 return 0; 680 681 cpumask_copy(&cpumask, &iucv_buffer_cpumask); 682 cpumask_clear_cpu(cpu, &cpumask); 683 if (cpumask_empty(&cpumask)) 684 /* Can't offline last IUCV enabled cpu. */ 685 return -EINVAL; 686 687 iucv_retrieve_cpu(NULL); 688 if (!cpumask_empty(&iucv_irq_cpumask)) 689 return 0; 690 smp_call_function_single(cpumask_first(&iucv_buffer_cpumask), 691 iucv_allow_cpu, NULL, 1); 692 return 0; 693 } 694 695 /** 696 * iucv_sever_pathid 697 * @pathid: path identification number. 698 * @userdata: 16-bytes of user data. 699 * 700 * Sever an iucv path to free up the pathid. Used internally. 701 */ 702 static int iucv_sever_pathid(u16 pathid, u8 *userdata) 703 { 704 union iucv_param *parm; 705 706 parm = iucv_param_irq[smp_processor_id()]; 707 memset(parm, 0, sizeof(union iucv_param)); 708 if (userdata) 709 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 710 parm->ctrl.ippathid = pathid; 711 return iucv_call_b2f0(IUCV_SEVER, parm); 712 } 713 714 /** 715 * __iucv_cleanup_queue 716 * @dummy: unused dummy argument 717 * 718 * Nop function called via smp_call_function to force work items from 719 * pending external iucv interrupts to the work queue. 720 */ 721 static void __iucv_cleanup_queue(void *dummy) 722 { 723 } 724 725 /** 726 * iucv_cleanup_queue 727 * 728 * Function called after a path has been severed to find all remaining 729 * work items for the now stale pathid. The caller needs to hold the 730 * iucv_table_lock. 731 */ 732 static void iucv_cleanup_queue(void) 733 { 734 struct iucv_irq_list *p, *n; 735 736 /* 737 * When a path is severed, the pathid can be reused immediately 738 * on a iucv connect or a connection pending interrupt. Remove 739 * all entries from the task queue that refer to a stale pathid 740 * (iucv_path_table[ix] == NULL). Only then do the iucv connect 741 * or deliver the connection pending interrupt. To get all the 742 * pending interrupts force them to the work queue by calling 743 * an empty function on all cpus. 744 */ 745 smp_call_function(__iucv_cleanup_queue, NULL, 1); 746 spin_lock_irq(&iucv_queue_lock); 747 list_for_each_entry_safe(p, n, &iucv_task_queue, list) { 748 /* Remove stale work items from the task queue. */ 749 if (iucv_path_table[p->data.ippathid] == NULL) { 750 list_del(&p->list); 751 kfree(p); 752 } 753 } 754 spin_unlock_irq(&iucv_queue_lock); 755 } 756 757 /** 758 * iucv_register: 759 * @handler: address of iucv handler structure 760 * @smp: != 0 indicates that the handler can deal with out of order messages 761 * 762 * Registers a driver with IUCV. 763 * 764 * Returns 0 on success, -ENOMEM if the memory allocation for the pathid 765 * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. 766 */ 767 int iucv_register(struct iucv_handler *handler, int smp) 768 { 769 int rc; 770 771 if (!iucv_available) 772 return -ENOSYS; 773 mutex_lock(&iucv_register_mutex); 774 if (!smp) 775 iucv_nonsmp_handler++; 776 if (list_empty(&iucv_handler_list)) { 777 rc = iucv_enable(); 778 if (rc) 779 goto out_mutex; 780 } else if (!smp && iucv_nonsmp_handler == 1) 781 iucv_setmask_up(); 782 INIT_LIST_HEAD(&handler->paths); 783 784 spin_lock_bh(&iucv_table_lock); 785 list_add_tail(&handler->list, &iucv_handler_list); 786 spin_unlock_bh(&iucv_table_lock); 787 rc = 0; 788 out_mutex: 789 mutex_unlock(&iucv_register_mutex); 790 return rc; 791 } 792 EXPORT_SYMBOL(iucv_register); 793 794 /** 795 * iucv_unregister 796 * @handler: address of iucv handler structure 797 * @smp: != 0 indicates that the handler can deal with out of order messages 798 * 799 * Unregister driver from IUCV. 800 */ 801 void iucv_unregister(struct iucv_handler *handler, int smp) 802 { 803 struct iucv_path *p, *n; 804 805 mutex_lock(&iucv_register_mutex); 806 spin_lock_bh(&iucv_table_lock); 807 /* Remove handler from the iucv_handler_list. */ 808 list_del_init(&handler->list); 809 /* Sever all pathids still referring to the handler. */ 810 list_for_each_entry_safe(p, n, &handler->paths, list) { 811 iucv_sever_pathid(p->pathid, NULL); 812 iucv_path_table[p->pathid] = NULL; 813 list_del(&p->list); 814 iucv_path_free(p); 815 } 816 spin_unlock_bh(&iucv_table_lock); 817 if (!smp) 818 iucv_nonsmp_handler--; 819 if (list_empty(&iucv_handler_list)) 820 iucv_disable(); 821 else if (!smp && iucv_nonsmp_handler == 0) 822 iucv_setmask_mp(); 823 mutex_unlock(&iucv_register_mutex); 824 } 825 EXPORT_SYMBOL(iucv_unregister); 826 827 static int iucv_reboot_event(struct notifier_block *this, 828 unsigned long event, void *ptr) 829 { 830 int i; 831 832 if (cpumask_empty(&iucv_irq_cpumask)) 833 return NOTIFY_DONE; 834 835 get_online_cpus(); 836 on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1); 837 preempt_disable(); 838 for (i = 0; i < iucv_max_pathid; i++) { 839 if (iucv_path_table[i]) 840 iucv_sever_pathid(i, NULL); 841 } 842 preempt_enable(); 843 put_online_cpus(); 844 iucv_disable(); 845 return NOTIFY_DONE; 846 } 847 848 static struct notifier_block iucv_reboot_notifier = { 849 .notifier_call = iucv_reboot_event, 850 }; 851 852 /** 853 * iucv_path_accept 854 * @path: address of iucv path structure 855 * @handler: address of iucv handler structure 856 * @userdata: 16 bytes of data reflected to the communication partner 857 * @private: private data passed to interrupt handlers for this path 858 * 859 * This function is issued after the user received a connection pending 860 * external interrupt and now wishes to complete the IUCV communication path. 861 * 862 * Returns the result of the CP IUCV call. 863 */ 864 int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, 865 u8 *userdata, void *private) 866 { 867 union iucv_param *parm; 868 int rc; 869 870 local_bh_disable(); 871 if (cpumask_empty(&iucv_buffer_cpumask)) { 872 rc = -EIO; 873 goto out; 874 } 875 /* Prepare parameter block. */ 876 parm = iucv_param[smp_processor_id()]; 877 memset(parm, 0, sizeof(union iucv_param)); 878 parm->ctrl.ippathid = path->pathid; 879 parm->ctrl.ipmsglim = path->msglim; 880 if (userdata) 881 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 882 parm->ctrl.ipflags1 = path->flags; 883 884 rc = iucv_call_b2f0(IUCV_ACCEPT, parm); 885 if (!rc) { 886 path->private = private; 887 path->msglim = parm->ctrl.ipmsglim; 888 path->flags = parm->ctrl.ipflags1; 889 } 890 out: 891 local_bh_enable(); 892 return rc; 893 } 894 EXPORT_SYMBOL(iucv_path_accept); 895 896 /** 897 * iucv_path_connect 898 * @path: address of iucv path structure 899 * @handler: address of iucv handler structure 900 * @userid: 8-byte user identification 901 * @system: 8-byte target system identification 902 * @userdata: 16 bytes of data reflected to the communication partner 903 * @private: private data passed to interrupt handlers for this path 904 * 905 * This function establishes an IUCV path. Although the connect may complete 906 * successfully, you are not able to use the path until you receive an IUCV 907 * Connection Complete external interrupt. 908 * 909 * Returns the result of the CP IUCV call. 910 */ 911 int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, 912 u8 *userid, u8 *system, u8 *userdata, 913 void *private) 914 { 915 union iucv_param *parm; 916 int rc; 917 918 spin_lock_bh(&iucv_table_lock); 919 iucv_cleanup_queue(); 920 if (cpumask_empty(&iucv_buffer_cpumask)) { 921 rc = -EIO; 922 goto out; 923 } 924 parm = iucv_param[smp_processor_id()]; 925 memset(parm, 0, sizeof(union iucv_param)); 926 parm->ctrl.ipmsglim = path->msglim; 927 parm->ctrl.ipflags1 = path->flags; 928 if (userid) { 929 memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); 930 ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); 931 EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); 932 } 933 if (system) { 934 memcpy(parm->ctrl.iptarget, system, 935 sizeof(parm->ctrl.iptarget)); 936 ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); 937 EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); 938 } 939 if (userdata) 940 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 941 942 rc = iucv_call_b2f0(IUCV_CONNECT, parm); 943 if (!rc) { 944 if (parm->ctrl.ippathid < iucv_max_pathid) { 945 path->pathid = parm->ctrl.ippathid; 946 path->msglim = parm->ctrl.ipmsglim; 947 path->flags = parm->ctrl.ipflags1; 948 path->handler = handler; 949 path->private = private; 950 list_add_tail(&path->list, &handler->paths); 951 iucv_path_table[path->pathid] = path; 952 } else { 953 iucv_sever_pathid(parm->ctrl.ippathid, 954 iucv_error_pathid); 955 rc = -EIO; 956 } 957 } 958 out: 959 spin_unlock_bh(&iucv_table_lock); 960 return rc; 961 } 962 EXPORT_SYMBOL(iucv_path_connect); 963 964 /** 965 * iucv_path_quiesce: 966 * @path: address of iucv path structure 967 * @userdata: 16 bytes of data reflected to the communication partner 968 * 969 * This function temporarily suspends incoming messages on an IUCV path. 970 * You can later reactivate the path by invoking the iucv_resume function. 971 * 972 * Returns the result from the CP IUCV call. 973 */ 974 int iucv_path_quiesce(struct iucv_path *path, u8 *userdata) 975 { 976 union iucv_param *parm; 977 int rc; 978 979 local_bh_disable(); 980 if (cpumask_empty(&iucv_buffer_cpumask)) { 981 rc = -EIO; 982 goto out; 983 } 984 parm = iucv_param[smp_processor_id()]; 985 memset(parm, 0, sizeof(union iucv_param)); 986 if (userdata) 987 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 988 parm->ctrl.ippathid = path->pathid; 989 rc = iucv_call_b2f0(IUCV_QUIESCE, parm); 990 out: 991 local_bh_enable(); 992 return rc; 993 } 994 EXPORT_SYMBOL(iucv_path_quiesce); 995 996 /** 997 * iucv_path_resume: 998 * @path: address of iucv path structure 999 * @userdata: 16 bytes of data reflected to the communication partner 1000 * 1001 * This function resumes incoming messages on an IUCV path that has 1002 * been stopped with iucv_path_quiesce. 1003 * 1004 * Returns the result from the CP IUCV call. 1005 */ 1006 int iucv_path_resume(struct iucv_path *path, u8 *userdata) 1007 { 1008 union iucv_param *parm; 1009 int rc; 1010 1011 local_bh_disable(); 1012 if (cpumask_empty(&iucv_buffer_cpumask)) { 1013 rc = -EIO; 1014 goto out; 1015 } 1016 parm = iucv_param[smp_processor_id()]; 1017 memset(parm, 0, sizeof(union iucv_param)); 1018 if (userdata) 1019 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 1020 parm->ctrl.ippathid = path->pathid; 1021 rc = iucv_call_b2f0(IUCV_RESUME, parm); 1022 out: 1023 local_bh_enable(); 1024 return rc; 1025 } 1026 1027 /** 1028 * iucv_path_sever 1029 * @path: address of iucv path structure 1030 * @userdata: 16 bytes of data reflected to the communication partner 1031 * 1032 * This function terminates an IUCV path. 1033 * 1034 * Returns the result from the CP IUCV call. 1035 */ 1036 int iucv_path_sever(struct iucv_path *path, u8 *userdata) 1037 { 1038 int rc; 1039 1040 preempt_disable(); 1041 if (cpumask_empty(&iucv_buffer_cpumask)) { 1042 rc = -EIO; 1043 goto out; 1044 } 1045 if (iucv_active_cpu != smp_processor_id()) 1046 spin_lock_bh(&iucv_table_lock); 1047 rc = iucv_sever_pathid(path->pathid, userdata); 1048 iucv_path_table[path->pathid] = NULL; 1049 list_del_init(&path->list); 1050 if (iucv_active_cpu != smp_processor_id()) 1051 spin_unlock_bh(&iucv_table_lock); 1052 out: 1053 preempt_enable(); 1054 return rc; 1055 } 1056 EXPORT_SYMBOL(iucv_path_sever); 1057 1058 /** 1059 * iucv_message_purge 1060 * @path: address of iucv path structure 1061 * @msg: address of iucv msg structure 1062 * @srccls: source class of message 1063 * 1064 * Cancels a message you have sent. 1065 * 1066 * Returns the result from the CP IUCV call. 1067 */ 1068 int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, 1069 u32 srccls) 1070 { 1071 union iucv_param *parm; 1072 int rc; 1073 1074 local_bh_disable(); 1075 if (cpumask_empty(&iucv_buffer_cpumask)) { 1076 rc = -EIO; 1077 goto out; 1078 } 1079 parm = iucv_param[smp_processor_id()]; 1080 memset(parm, 0, sizeof(union iucv_param)); 1081 parm->purge.ippathid = path->pathid; 1082 parm->purge.ipmsgid = msg->id; 1083 parm->purge.ipsrccls = srccls; 1084 parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; 1085 rc = iucv_call_b2f0(IUCV_PURGE, parm); 1086 if (!rc) { 1087 msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; 1088 msg->tag = parm->purge.ipmsgtag; 1089 } 1090 out: 1091 local_bh_enable(); 1092 return rc; 1093 } 1094 EXPORT_SYMBOL(iucv_message_purge); 1095 1096 /** 1097 * iucv_message_receive_iprmdata 1098 * @path: address of iucv path structure 1099 * @msg: address of iucv msg structure 1100 * @flags: how the message is received (IUCV_IPBUFLST) 1101 * @buffer: address of data buffer or address of struct iucv_array 1102 * @size: length of data buffer 1103 * @residual: 1104 * 1105 * Internal function used by iucv_message_receive and __iucv_message_receive 1106 * to receive RMDATA data stored in struct iucv_message. 1107 */ 1108 static int iucv_message_receive_iprmdata(struct iucv_path *path, 1109 struct iucv_message *msg, 1110 u8 flags, void *buffer, 1111 size_t size, size_t *residual) 1112 { 1113 struct iucv_array *array; 1114 u8 *rmmsg; 1115 size_t copy; 1116 1117 /* 1118 * Message is 8 bytes long and has been stored to the 1119 * message descriptor itself. 1120 */ 1121 if (residual) 1122 *residual = abs(size - 8); 1123 rmmsg = msg->rmmsg; 1124 if (flags & IUCV_IPBUFLST) { 1125 /* Copy to struct iucv_array. */ 1126 size = (size < 8) ? size : 8; 1127 for (array = buffer; size > 0; array++) { 1128 copy = min_t(size_t, size, array->length); 1129 memcpy((u8 *)(addr_t) array->address, 1130 rmmsg, copy); 1131 rmmsg += copy; 1132 size -= copy; 1133 } 1134 } else { 1135 /* Copy to direct buffer. */ 1136 memcpy(buffer, rmmsg, min_t(size_t, size, 8)); 1137 } 1138 return 0; 1139 } 1140 1141 /** 1142 * __iucv_message_receive 1143 * @path: address of iucv path structure 1144 * @msg: address of iucv msg structure 1145 * @flags: how the message is received (IUCV_IPBUFLST) 1146 * @buffer: address of data buffer or address of struct iucv_array 1147 * @size: length of data buffer 1148 * @residual: 1149 * 1150 * This function receives messages that are being sent to you over 1151 * established paths. This function will deal with RMDATA messages 1152 * embedded in struct iucv_message as well. 1153 * 1154 * Locking: no locking 1155 * 1156 * Returns the result from the CP IUCV call. 1157 */ 1158 int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, 1159 u8 flags, void *buffer, size_t size, size_t *residual) 1160 { 1161 union iucv_param *parm; 1162 int rc; 1163 1164 if (msg->flags & IUCV_IPRMDATA) 1165 return iucv_message_receive_iprmdata(path, msg, flags, 1166 buffer, size, residual); 1167 if (cpumask_empty(&iucv_buffer_cpumask)) { 1168 rc = -EIO; 1169 goto out; 1170 } 1171 parm = iucv_param[smp_processor_id()]; 1172 memset(parm, 0, sizeof(union iucv_param)); 1173 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1174 parm->db.ipbfln1f = (u32) size; 1175 parm->db.ipmsgid = msg->id; 1176 parm->db.ippathid = path->pathid; 1177 parm->db.iptrgcls = msg->class; 1178 parm->db.ipflags1 = (flags | IUCV_IPFGPID | 1179 IUCV_IPFGMID | IUCV_IPTRGCLS); 1180 rc = iucv_call_b2f0(IUCV_RECEIVE, parm); 1181 if (!rc || rc == 5) { 1182 msg->flags = parm->db.ipflags1; 1183 if (residual) 1184 *residual = parm->db.ipbfln1f; 1185 } 1186 out: 1187 return rc; 1188 } 1189 EXPORT_SYMBOL(__iucv_message_receive); 1190 1191 /** 1192 * iucv_message_receive 1193 * @path: address of iucv path structure 1194 * @msg: address of iucv msg structure 1195 * @flags: how the message is received (IUCV_IPBUFLST) 1196 * @buffer: address of data buffer or address of struct iucv_array 1197 * @size: length of data buffer 1198 * @residual: 1199 * 1200 * This function receives messages that are being sent to you over 1201 * established paths. This function will deal with RMDATA messages 1202 * embedded in struct iucv_message as well. 1203 * 1204 * Locking: local_bh_enable/local_bh_disable 1205 * 1206 * Returns the result from the CP IUCV call. 1207 */ 1208 int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, 1209 u8 flags, void *buffer, size_t size, size_t *residual) 1210 { 1211 int rc; 1212 1213 if (msg->flags & IUCV_IPRMDATA) 1214 return iucv_message_receive_iprmdata(path, msg, flags, 1215 buffer, size, residual); 1216 local_bh_disable(); 1217 rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); 1218 local_bh_enable(); 1219 return rc; 1220 } 1221 EXPORT_SYMBOL(iucv_message_receive); 1222 1223 /** 1224 * iucv_message_reject 1225 * @path: address of iucv path structure 1226 * @msg: address of iucv msg structure 1227 * 1228 * The reject function refuses a specified message. Between the time you 1229 * are notified of a message and the time that you complete the message, 1230 * the message may be rejected. 1231 * 1232 * Returns the result from the CP IUCV call. 1233 */ 1234 int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) 1235 { 1236 union iucv_param *parm; 1237 int rc; 1238 1239 local_bh_disable(); 1240 if (cpumask_empty(&iucv_buffer_cpumask)) { 1241 rc = -EIO; 1242 goto out; 1243 } 1244 parm = iucv_param[smp_processor_id()]; 1245 memset(parm, 0, sizeof(union iucv_param)); 1246 parm->db.ippathid = path->pathid; 1247 parm->db.ipmsgid = msg->id; 1248 parm->db.iptrgcls = msg->class; 1249 parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); 1250 rc = iucv_call_b2f0(IUCV_REJECT, parm); 1251 out: 1252 local_bh_enable(); 1253 return rc; 1254 } 1255 EXPORT_SYMBOL(iucv_message_reject); 1256 1257 /** 1258 * iucv_message_reply 1259 * @path: address of iucv path structure 1260 * @msg: address of iucv msg structure 1261 * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1262 * @reply: address of reply data buffer or address of struct iucv_array 1263 * @size: length of reply data buffer 1264 * 1265 * This function responds to the two-way messages that you receive. You 1266 * must identify completely the message to which you wish to reply. ie, 1267 * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into 1268 * the parameter list. 1269 * 1270 * Returns the result from the CP IUCV call. 1271 */ 1272 int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, 1273 u8 flags, void *reply, size_t size) 1274 { 1275 union iucv_param *parm; 1276 int rc; 1277 1278 local_bh_disable(); 1279 if (cpumask_empty(&iucv_buffer_cpumask)) { 1280 rc = -EIO; 1281 goto out; 1282 } 1283 parm = iucv_param[smp_processor_id()]; 1284 memset(parm, 0, sizeof(union iucv_param)); 1285 if (flags & IUCV_IPRMDATA) { 1286 parm->dpl.ippathid = path->pathid; 1287 parm->dpl.ipflags1 = flags; 1288 parm->dpl.ipmsgid = msg->id; 1289 parm->dpl.iptrgcls = msg->class; 1290 memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); 1291 } else { 1292 parm->db.ipbfadr1 = (u32)(addr_t) reply; 1293 parm->db.ipbfln1f = (u32) size; 1294 parm->db.ippathid = path->pathid; 1295 parm->db.ipflags1 = flags; 1296 parm->db.ipmsgid = msg->id; 1297 parm->db.iptrgcls = msg->class; 1298 } 1299 rc = iucv_call_b2f0(IUCV_REPLY, parm); 1300 out: 1301 local_bh_enable(); 1302 return rc; 1303 } 1304 EXPORT_SYMBOL(iucv_message_reply); 1305 1306 /** 1307 * __iucv_message_send 1308 * @path: address of iucv path structure 1309 * @msg: address of iucv msg structure 1310 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1311 * @srccls: source class of message 1312 * @buffer: address of send buffer or address of struct iucv_array 1313 * @size: length of send buffer 1314 * 1315 * This function transmits data to another application. Data to be 1316 * transmitted is in a buffer and this is a one-way message and the 1317 * receiver will not reply to the message. 1318 * 1319 * Locking: no locking 1320 * 1321 * Returns the result from the CP IUCV call. 1322 */ 1323 int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, 1324 u8 flags, u32 srccls, void *buffer, size_t size) 1325 { 1326 union iucv_param *parm; 1327 int rc; 1328 1329 if (cpumask_empty(&iucv_buffer_cpumask)) { 1330 rc = -EIO; 1331 goto out; 1332 } 1333 parm = iucv_param[smp_processor_id()]; 1334 memset(parm, 0, sizeof(union iucv_param)); 1335 if (flags & IUCV_IPRMDATA) { 1336 /* Message of 8 bytes can be placed into the parameter list. */ 1337 parm->dpl.ippathid = path->pathid; 1338 parm->dpl.ipflags1 = flags | IUCV_IPNORPY; 1339 parm->dpl.iptrgcls = msg->class; 1340 parm->dpl.ipsrccls = srccls; 1341 parm->dpl.ipmsgtag = msg->tag; 1342 memcpy(parm->dpl.iprmmsg, buffer, 8); 1343 } else { 1344 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1345 parm->db.ipbfln1f = (u32) size; 1346 parm->db.ippathid = path->pathid; 1347 parm->db.ipflags1 = flags | IUCV_IPNORPY; 1348 parm->db.iptrgcls = msg->class; 1349 parm->db.ipsrccls = srccls; 1350 parm->db.ipmsgtag = msg->tag; 1351 } 1352 rc = iucv_call_b2f0(IUCV_SEND, parm); 1353 if (!rc) 1354 msg->id = parm->db.ipmsgid; 1355 out: 1356 return rc; 1357 } 1358 EXPORT_SYMBOL(__iucv_message_send); 1359 1360 /** 1361 * iucv_message_send 1362 * @path: address of iucv path structure 1363 * @msg: address of iucv msg structure 1364 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1365 * @srccls: source class of message 1366 * @buffer: address of send buffer or address of struct iucv_array 1367 * @size: length of send buffer 1368 * 1369 * This function transmits data to another application. Data to be 1370 * transmitted is in a buffer and this is a one-way message and the 1371 * receiver will not reply to the message. 1372 * 1373 * Locking: local_bh_enable/local_bh_disable 1374 * 1375 * Returns the result from the CP IUCV call. 1376 */ 1377 int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, 1378 u8 flags, u32 srccls, void *buffer, size_t size) 1379 { 1380 int rc; 1381 1382 local_bh_disable(); 1383 rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); 1384 local_bh_enable(); 1385 return rc; 1386 } 1387 EXPORT_SYMBOL(iucv_message_send); 1388 1389 /** 1390 * iucv_message_send2way 1391 * @path: address of iucv path structure 1392 * @msg: address of iucv msg structure 1393 * @flags: how the message is sent and the reply is received 1394 * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) 1395 * @srccls: source class of message 1396 * @buffer: address of send buffer or address of struct iucv_array 1397 * @size: length of send buffer 1398 * @ansbuf: address of answer buffer or address of struct iucv_array 1399 * @asize: size of reply buffer 1400 * 1401 * This function transmits data to another application. Data to be 1402 * transmitted is in a buffer. The receiver of the send is expected to 1403 * reply to the message and a buffer is provided into which IUCV moves 1404 * the reply to this message. 1405 * 1406 * Returns the result from the CP IUCV call. 1407 */ 1408 int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, 1409 u8 flags, u32 srccls, void *buffer, size_t size, 1410 void *answer, size_t asize, size_t *residual) 1411 { 1412 union iucv_param *parm; 1413 int rc; 1414 1415 local_bh_disable(); 1416 if (cpumask_empty(&iucv_buffer_cpumask)) { 1417 rc = -EIO; 1418 goto out; 1419 } 1420 parm = iucv_param[smp_processor_id()]; 1421 memset(parm, 0, sizeof(union iucv_param)); 1422 if (flags & IUCV_IPRMDATA) { 1423 parm->dpl.ippathid = path->pathid; 1424 parm->dpl.ipflags1 = path->flags; /* priority message */ 1425 parm->dpl.iptrgcls = msg->class; 1426 parm->dpl.ipsrccls = srccls; 1427 parm->dpl.ipmsgtag = msg->tag; 1428 parm->dpl.ipbfadr2 = (u32)(addr_t) answer; 1429 parm->dpl.ipbfln2f = (u32) asize; 1430 memcpy(parm->dpl.iprmmsg, buffer, 8); 1431 } else { 1432 parm->db.ippathid = path->pathid; 1433 parm->db.ipflags1 = path->flags; /* priority message */ 1434 parm->db.iptrgcls = msg->class; 1435 parm->db.ipsrccls = srccls; 1436 parm->db.ipmsgtag = msg->tag; 1437 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1438 parm->db.ipbfln1f = (u32) size; 1439 parm->db.ipbfadr2 = (u32)(addr_t) answer; 1440 parm->db.ipbfln2f = (u32) asize; 1441 } 1442 rc = iucv_call_b2f0(IUCV_SEND, parm); 1443 if (!rc) 1444 msg->id = parm->db.ipmsgid; 1445 out: 1446 local_bh_enable(); 1447 return rc; 1448 } 1449 EXPORT_SYMBOL(iucv_message_send2way); 1450 1451 /** 1452 * iucv_path_pending 1453 * @data: Pointer to external interrupt buffer 1454 * 1455 * Process connection pending work item. Called from tasklet while holding 1456 * iucv_table_lock. 1457 */ 1458 struct iucv_path_pending { 1459 u16 ippathid; 1460 u8 ipflags1; 1461 u8 iptype; 1462 u16 ipmsglim; 1463 u16 res1; 1464 u8 ipvmid[8]; 1465 u8 ipuser[16]; 1466 u32 res3; 1467 u8 ippollfg; 1468 u8 res4[3]; 1469 } __packed; 1470 1471 static void iucv_path_pending(struct iucv_irq_data *data) 1472 { 1473 struct iucv_path_pending *ipp = (void *) data; 1474 struct iucv_handler *handler; 1475 struct iucv_path *path; 1476 char *error; 1477 1478 BUG_ON(iucv_path_table[ipp->ippathid]); 1479 /* New pathid, handler found. Create a new path struct. */ 1480 error = iucv_error_no_memory; 1481 path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); 1482 if (!path) 1483 goto out_sever; 1484 path->pathid = ipp->ippathid; 1485 iucv_path_table[path->pathid] = path; 1486 EBCASC(ipp->ipvmid, 8); 1487 1488 /* Call registered handler until one is found that wants the path. */ 1489 list_for_each_entry(handler, &iucv_handler_list, list) { 1490 if (!handler->path_pending) 1491 continue; 1492 /* 1493 * Add path to handler to allow a call to iucv_path_sever 1494 * inside the path_pending function. If the handler returns 1495 * an error remove the path from the handler again. 1496 */ 1497 list_add(&path->list, &handler->paths); 1498 path->handler = handler; 1499 if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) 1500 return; 1501 list_del(&path->list); 1502 path->handler = NULL; 1503 } 1504 /* No handler wanted the path. */ 1505 iucv_path_table[path->pathid] = NULL; 1506 iucv_path_free(path); 1507 error = iucv_error_no_listener; 1508 out_sever: 1509 iucv_sever_pathid(ipp->ippathid, error); 1510 } 1511 1512 /** 1513 * iucv_path_complete 1514 * @data: Pointer to external interrupt buffer 1515 * 1516 * Process connection complete work item. Called from tasklet while holding 1517 * iucv_table_lock. 1518 */ 1519 struct iucv_path_complete { 1520 u16 ippathid; 1521 u8 ipflags1; 1522 u8 iptype; 1523 u16 ipmsglim; 1524 u16 res1; 1525 u8 res2[8]; 1526 u8 ipuser[16]; 1527 u32 res3; 1528 u8 ippollfg; 1529 u8 res4[3]; 1530 } __packed; 1531 1532 static void iucv_path_complete(struct iucv_irq_data *data) 1533 { 1534 struct iucv_path_complete *ipc = (void *) data; 1535 struct iucv_path *path = iucv_path_table[ipc->ippathid]; 1536 1537 if (path) 1538 path->flags = ipc->ipflags1; 1539 if (path && path->handler && path->handler->path_complete) 1540 path->handler->path_complete(path, ipc->ipuser); 1541 } 1542 1543 /** 1544 * iucv_path_severed 1545 * @data: Pointer to external interrupt buffer 1546 * 1547 * Process connection severed work item. Called from tasklet while holding 1548 * iucv_table_lock. 1549 */ 1550 struct iucv_path_severed { 1551 u16 ippathid; 1552 u8 res1; 1553 u8 iptype; 1554 u32 res2; 1555 u8 res3[8]; 1556 u8 ipuser[16]; 1557 u32 res4; 1558 u8 ippollfg; 1559 u8 res5[3]; 1560 } __packed; 1561 1562 static void iucv_path_severed(struct iucv_irq_data *data) 1563 { 1564 struct iucv_path_severed *ips = (void *) data; 1565 struct iucv_path *path = iucv_path_table[ips->ippathid]; 1566 1567 if (!path || !path->handler) /* Already severed */ 1568 return; 1569 if (path->handler->path_severed) 1570 path->handler->path_severed(path, ips->ipuser); 1571 else { 1572 iucv_sever_pathid(path->pathid, NULL); 1573 iucv_path_table[path->pathid] = NULL; 1574 list_del(&path->list); 1575 iucv_path_free(path); 1576 } 1577 } 1578 1579 /** 1580 * iucv_path_quiesced 1581 * @data: Pointer to external interrupt buffer 1582 * 1583 * Process connection quiesced work item. Called from tasklet while holding 1584 * iucv_table_lock. 1585 */ 1586 struct iucv_path_quiesced { 1587 u16 ippathid; 1588 u8 res1; 1589 u8 iptype; 1590 u32 res2; 1591 u8 res3[8]; 1592 u8 ipuser[16]; 1593 u32 res4; 1594 u8 ippollfg; 1595 u8 res5[3]; 1596 } __packed; 1597 1598 static void iucv_path_quiesced(struct iucv_irq_data *data) 1599 { 1600 struct iucv_path_quiesced *ipq = (void *) data; 1601 struct iucv_path *path = iucv_path_table[ipq->ippathid]; 1602 1603 if (path && path->handler && path->handler->path_quiesced) 1604 path->handler->path_quiesced(path, ipq->ipuser); 1605 } 1606 1607 /** 1608 * iucv_path_resumed 1609 * @data: Pointer to external interrupt buffer 1610 * 1611 * Process connection resumed work item. Called from tasklet while holding 1612 * iucv_table_lock. 1613 */ 1614 struct iucv_path_resumed { 1615 u16 ippathid; 1616 u8 res1; 1617 u8 iptype; 1618 u32 res2; 1619 u8 res3[8]; 1620 u8 ipuser[16]; 1621 u32 res4; 1622 u8 ippollfg; 1623 u8 res5[3]; 1624 } __packed; 1625 1626 static void iucv_path_resumed(struct iucv_irq_data *data) 1627 { 1628 struct iucv_path_resumed *ipr = (void *) data; 1629 struct iucv_path *path = iucv_path_table[ipr->ippathid]; 1630 1631 if (path && path->handler && path->handler->path_resumed) 1632 path->handler->path_resumed(path, ipr->ipuser); 1633 } 1634 1635 /** 1636 * iucv_message_complete 1637 * @data: Pointer to external interrupt buffer 1638 * 1639 * Process message complete work item. Called from tasklet while holding 1640 * iucv_table_lock. 1641 */ 1642 struct iucv_message_complete { 1643 u16 ippathid; 1644 u8 ipflags1; 1645 u8 iptype; 1646 u32 ipmsgid; 1647 u32 ipaudit; 1648 u8 iprmmsg[8]; 1649 u32 ipsrccls; 1650 u32 ipmsgtag; 1651 u32 res; 1652 u32 ipbfln2f; 1653 u8 ippollfg; 1654 u8 res2[3]; 1655 } __packed; 1656 1657 static void iucv_message_complete(struct iucv_irq_data *data) 1658 { 1659 struct iucv_message_complete *imc = (void *) data; 1660 struct iucv_path *path = iucv_path_table[imc->ippathid]; 1661 struct iucv_message msg; 1662 1663 if (path && path->handler && path->handler->message_complete) { 1664 msg.flags = imc->ipflags1; 1665 msg.id = imc->ipmsgid; 1666 msg.audit = imc->ipaudit; 1667 memcpy(msg.rmmsg, imc->iprmmsg, 8); 1668 msg.class = imc->ipsrccls; 1669 msg.tag = imc->ipmsgtag; 1670 msg.length = imc->ipbfln2f; 1671 path->handler->message_complete(path, &msg); 1672 } 1673 } 1674 1675 /** 1676 * iucv_message_pending 1677 * @data: Pointer to external interrupt buffer 1678 * 1679 * Process message pending work item. Called from tasklet while holding 1680 * iucv_table_lock. 1681 */ 1682 struct iucv_message_pending { 1683 u16 ippathid; 1684 u8 ipflags1; 1685 u8 iptype; 1686 u32 ipmsgid; 1687 u32 iptrgcls; 1688 union { 1689 u32 iprmmsg1_u32; 1690 u8 iprmmsg1[4]; 1691 } ln1msg1; 1692 union { 1693 u32 ipbfln1f; 1694 u8 iprmmsg2[4]; 1695 } ln1msg2; 1696 u32 res1[3]; 1697 u32 ipbfln2f; 1698 u8 ippollfg; 1699 u8 res2[3]; 1700 } __packed; 1701 1702 static void iucv_message_pending(struct iucv_irq_data *data) 1703 { 1704 struct iucv_message_pending *imp = (void *) data; 1705 struct iucv_path *path = iucv_path_table[imp->ippathid]; 1706 struct iucv_message msg; 1707 1708 if (path && path->handler && path->handler->message_pending) { 1709 msg.flags = imp->ipflags1; 1710 msg.id = imp->ipmsgid; 1711 msg.class = imp->iptrgcls; 1712 if (imp->ipflags1 & IUCV_IPRMDATA) { 1713 memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); 1714 msg.length = 8; 1715 } else 1716 msg.length = imp->ln1msg2.ipbfln1f; 1717 msg.reply_size = imp->ipbfln2f; 1718 path->handler->message_pending(path, &msg); 1719 } 1720 } 1721 1722 /** 1723 * iucv_tasklet_fn: 1724 * 1725 * This tasklet loops over the queue of irq buffers created by 1726 * iucv_external_interrupt, calls the appropriate action handler 1727 * and then frees the buffer. 1728 */ 1729 static void iucv_tasklet_fn(unsigned long ignored) 1730 { 1731 typedef void iucv_irq_fn(struct iucv_irq_data *); 1732 static iucv_irq_fn *irq_fn[] = { 1733 [0x02] = iucv_path_complete, 1734 [0x03] = iucv_path_severed, 1735 [0x04] = iucv_path_quiesced, 1736 [0x05] = iucv_path_resumed, 1737 [0x06] = iucv_message_complete, 1738 [0x07] = iucv_message_complete, 1739 [0x08] = iucv_message_pending, 1740 [0x09] = iucv_message_pending, 1741 }; 1742 LIST_HEAD(task_queue); 1743 struct iucv_irq_list *p, *n; 1744 1745 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ 1746 if (!spin_trylock(&iucv_table_lock)) { 1747 tasklet_schedule(&iucv_tasklet); 1748 return; 1749 } 1750 iucv_active_cpu = smp_processor_id(); 1751 1752 spin_lock_irq(&iucv_queue_lock); 1753 list_splice_init(&iucv_task_queue, &task_queue); 1754 spin_unlock_irq(&iucv_queue_lock); 1755 1756 list_for_each_entry_safe(p, n, &task_queue, list) { 1757 list_del_init(&p->list); 1758 irq_fn[p->data.iptype](&p->data); 1759 kfree(p); 1760 } 1761 1762 iucv_active_cpu = -1; 1763 spin_unlock(&iucv_table_lock); 1764 } 1765 1766 /** 1767 * iucv_work_fn: 1768 * 1769 * This work function loops over the queue of path pending irq blocks 1770 * created by iucv_external_interrupt, calls the appropriate action 1771 * handler and then frees the buffer. 1772 */ 1773 static void iucv_work_fn(struct work_struct *work) 1774 { 1775 LIST_HEAD(work_queue); 1776 struct iucv_irq_list *p, *n; 1777 1778 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ 1779 spin_lock_bh(&iucv_table_lock); 1780 iucv_active_cpu = smp_processor_id(); 1781 1782 spin_lock_irq(&iucv_queue_lock); 1783 list_splice_init(&iucv_work_queue, &work_queue); 1784 spin_unlock_irq(&iucv_queue_lock); 1785 1786 iucv_cleanup_queue(); 1787 list_for_each_entry_safe(p, n, &work_queue, list) { 1788 list_del_init(&p->list); 1789 iucv_path_pending(&p->data); 1790 kfree(p); 1791 } 1792 1793 iucv_active_cpu = -1; 1794 spin_unlock_bh(&iucv_table_lock); 1795 } 1796 1797 /** 1798 * iucv_external_interrupt 1799 * @code: irq code 1800 * 1801 * Handles external interrupts coming in from CP. 1802 * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). 1803 */ 1804 static void iucv_external_interrupt(struct ext_code ext_code, 1805 unsigned int param32, unsigned long param64) 1806 { 1807 struct iucv_irq_data *p; 1808 struct iucv_irq_list *work; 1809 1810 inc_irq_stat(IRQEXT_IUC); 1811 p = iucv_irq_data[smp_processor_id()]; 1812 if (p->ippathid >= iucv_max_pathid) { 1813 WARN_ON(p->ippathid >= iucv_max_pathid); 1814 iucv_sever_pathid(p->ippathid, iucv_error_no_listener); 1815 return; 1816 } 1817 BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); 1818 work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); 1819 if (!work) { 1820 pr_warn("iucv_external_interrupt: out of memory\n"); 1821 return; 1822 } 1823 memcpy(&work->data, p, sizeof(work->data)); 1824 spin_lock(&iucv_queue_lock); 1825 if (p->iptype == 0x01) { 1826 /* Path pending interrupt. */ 1827 list_add_tail(&work->list, &iucv_work_queue); 1828 schedule_work(&iucv_work); 1829 } else { 1830 /* The other interrupts. */ 1831 list_add_tail(&work->list, &iucv_task_queue); 1832 tasklet_schedule(&iucv_tasklet); 1833 } 1834 spin_unlock(&iucv_queue_lock); 1835 } 1836 1837 static int iucv_pm_prepare(struct device *dev) 1838 { 1839 int rc = 0; 1840 1841 #ifdef CONFIG_PM_DEBUG 1842 printk(KERN_INFO "iucv_pm_prepare\n"); 1843 #endif 1844 if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) 1845 rc = dev->driver->pm->prepare(dev); 1846 return rc; 1847 } 1848 1849 static void iucv_pm_complete(struct device *dev) 1850 { 1851 #ifdef CONFIG_PM_DEBUG 1852 printk(KERN_INFO "iucv_pm_complete\n"); 1853 #endif 1854 if (dev->driver && dev->driver->pm && dev->driver->pm->complete) 1855 dev->driver->pm->complete(dev); 1856 } 1857 1858 /** 1859 * iucv_path_table_empty() - determine if iucv path table is empty 1860 * 1861 * Returns 0 if there are still iucv pathes defined 1862 * 1 if there are no iucv pathes defined 1863 */ 1864 static int iucv_path_table_empty(void) 1865 { 1866 int i; 1867 1868 for (i = 0; i < iucv_max_pathid; i++) { 1869 if (iucv_path_table[i]) 1870 return 0; 1871 } 1872 return 1; 1873 } 1874 1875 /** 1876 * iucv_pm_freeze() - Freeze PM callback 1877 * @dev: iucv-based device 1878 * 1879 * disable iucv interrupts 1880 * invoke callback function of the iucv-based driver 1881 * shut down iucv, if no iucv-pathes are established anymore 1882 */ 1883 static int iucv_pm_freeze(struct device *dev) 1884 { 1885 int cpu; 1886 struct iucv_irq_list *p, *n; 1887 int rc = 0; 1888 1889 #ifdef CONFIG_PM_DEBUG 1890 printk(KERN_WARNING "iucv_pm_freeze\n"); 1891 #endif 1892 if (iucv_pm_state != IUCV_PM_FREEZING) { 1893 for_each_cpu(cpu, &iucv_irq_cpumask) 1894 smp_call_function_single(cpu, iucv_block_cpu_almost, 1895 NULL, 1); 1896 cancel_work_sync(&iucv_work); 1897 list_for_each_entry_safe(p, n, &iucv_work_queue, list) { 1898 list_del_init(&p->list); 1899 iucv_sever_pathid(p->data.ippathid, 1900 iucv_error_no_listener); 1901 kfree(p); 1902 } 1903 } 1904 iucv_pm_state = IUCV_PM_FREEZING; 1905 if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) 1906 rc = dev->driver->pm->freeze(dev); 1907 if (iucv_path_table_empty()) 1908 iucv_disable(); 1909 return rc; 1910 } 1911 1912 /** 1913 * iucv_pm_thaw() - Thaw PM callback 1914 * @dev: iucv-based device 1915 * 1916 * make iucv ready for use again: allocate path table, declare interrupt buffers 1917 * and enable iucv interrupts 1918 * invoke callback function of the iucv-based driver 1919 */ 1920 static int iucv_pm_thaw(struct device *dev) 1921 { 1922 int rc = 0; 1923 1924 #ifdef CONFIG_PM_DEBUG 1925 printk(KERN_WARNING "iucv_pm_thaw\n"); 1926 #endif 1927 iucv_pm_state = IUCV_PM_THAWING; 1928 if (!iucv_path_table) { 1929 rc = iucv_enable(); 1930 if (rc) 1931 goto out; 1932 } 1933 if (cpumask_empty(&iucv_irq_cpumask)) { 1934 if (iucv_nonsmp_handler) 1935 /* enable interrupts on one cpu */ 1936 iucv_allow_cpu(NULL); 1937 else 1938 /* enable interrupts on all cpus */ 1939 iucv_setmask_mp(); 1940 } 1941 if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) 1942 rc = dev->driver->pm->thaw(dev); 1943 out: 1944 return rc; 1945 } 1946 1947 /** 1948 * iucv_pm_restore() - Restore PM callback 1949 * @dev: iucv-based device 1950 * 1951 * make iucv ready for use again: allocate path table, declare interrupt buffers 1952 * and enable iucv interrupts 1953 * invoke callback function of the iucv-based driver 1954 */ 1955 static int iucv_pm_restore(struct device *dev) 1956 { 1957 int rc = 0; 1958 1959 #ifdef CONFIG_PM_DEBUG 1960 printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); 1961 #endif 1962 if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) 1963 pr_warn("Suspending Linux did not completely close all IUCV connections\n"); 1964 iucv_pm_state = IUCV_PM_RESTORING; 1965 if (cpumask_empty(&iucv_irq_cpumask)) { 1966 rc = iucv_query_maxconn(); 1967 rc = iucv_enable(); 1968 if (rc) 1969 goto out; 1970 } 1971 if (dev->driver && dev->driver->pm && dev->driver->pm->restore) 1972 rc = dev->driver->pm->restore(dev); 1973 out: 1974 return rc; 1975 } 1976 1977 struct iucv_interface iucv_if = { 1978 .message_receive = iucv_message_receive, 1979 .__message_receive = __iucv_message_receive, 1980 .message_reply = iucv_message_reply, 1981 .message_reject = iucv_message_reject, 1982 .message_send = iucv_message_send, 1983 .__message_send = __iucv_message_send, 1984 .message_send2way = iucv_message_send2way, 1985 .message_purge = iucv_message_purge, 1986 .path_accept = iucv_path_accept, 1987 .path_connect = iucv_path_connect, 1988 .path_quiesce = iucv_path_quiesce, 1989 .path_resume = iucv_path_resume, 1990 .path_sever = iucv_path_sever, 1991 .iucv_register = iucv_register, 1992 .iucv_unregister = iucv_unregister, 1993 .bus = NULL, 1994 .root = NULL, 1995 }; 1996 EXPORT_SYMBOL(iucv_if); 1997 1998 static enum cpuhp_state iucv_online; 1999 /** 2000 * iucv_init 2001 * 2002 * Allocates and initializes various data structures. 2003 */ 2004 static int __init iucv_init(void) 2005 { 2006 int rc; 2007 2008 if (!MACHINE_IS_VM) { 2009 rc = -EPROTONOSUPPORT; 2010 goto out; 2011 } 2012 ctl_set_bit(0, 1); 2013 rc = iucv_query_maxconn(); 2014 if (rc) 2015 goto out_ctl; 2016 rc = register_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); 2017 if (rc) 2018 goto out_ctl; 2019 iucv_root = root_device_register("iucv"); 2020 if (IS_ERR(iucv_root)) { 2021 rc = PTR_ERR(iucv_root); 2022 goto out_int; 2023 } 2024 2025 rc = cpuhp_setup_state(CPUHP_NET_IUCV_PREPARE, "net/iucv:prepare", 2026 iucv_cpu_prepare, iucv_cpu_dead); 2027 if (rc) 2028 goto out_dev; 2029 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "net/iucv:online", 2030 iucv_cpu_online, iucv_cpu_down_prep); 2031 if (rc < 0) 2032 goto out_prep; 2033 iucv_online = rc; 2034 2035 rc = register_reboot_notifier(&iucv_reboot_notifier); 2036 if (rc) 2037 goto out_remove_hp; 2038 ASCEBC(iucv_error_no_listener, 16); 2039 ASCEBC(iucv_error_no_memory, 16); 2040 ASCEBC(iucv_error_pathid, 16); 2041 iucv_available = 1; 2042 rc = bus_register(&iucv_bus); 2043 if (rc) 2044 goto out_reboot; 2045 iucv_if.root = iucv_root; 2046 iucv_if.bus = &iucv_bus; 2047 return 0; 2048 2049 out_reboot: 2050 unregister_reboot_notifier(&iucv_reboot_notifier); 2051 out_remove_hp: 2052 cpuhp_remove_state(iucv_online); 2053 out_prep: 2054 cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE); 2055 out_dev: 2056 root_device_unregister(iucv_root); 2057 out_int: 2058 unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); 2059 out_ctl: 2060 ctl_clear_bit(0, 1); 2061 out: 2062 return rc; 2063 } 2064 2065 /** 2066 * iucv_exit 2067 * 2068 * Frees everything allocated from iucv_init. 2069 */ 2070 static void __exit iucv_exit(void) 2071 { 2072 struct iucv_irq_list *p, *n; 2073 2074 spin_lock_irq(&iucv_queue_lock); 2075 list_for_each_entry_safe(p, n, &iucv_task_queue, list) 2076 kfree(p); 2077 list_for_each_entry_safe(p, n, &iucv_work_queue, list) 2078 kfree(p); 2079 spin_unlock_irq(&iucv_queue_lock); 2080 unregister_reboot_notifier(&iucv_reboot_notifier); 2081 2082 cpuhp_remove_state_nocalls(iucv_online); 2083 cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE); 2084 root_device_unregister(iucv_root); 2085 bus_unregister(&iucv_bus); 2086 unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); 2087 } 2088 2089 subsys_initcall(iucv_init); 2090 module_exit(iucv_exit); 2091 2092 MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); 2093 MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); 2094 MODULE_LICENSE("GPL"); 2095