1 /* 2 * IUCV base infrastructure. 3 * 4 * Copyright IBM Corp. 2001, 2009 5 * 6 * Author(s): 7 * Original source: 8 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 9 * Xenia Tkatschow (xenia@us.ibm.com) 10 * 2Gb awareness and general cleanup: 11 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) 12 * Rewritten for af_iucv: 13 * Martin Schwidefsky <schwidefsky@de.ibm.com> 14 * PM functions: 15 * Ursula Braun (ursula.braun@de.ibm.com) 16 * 17 * Documentation used: 18 * The original source 19 * CP Programming Service, IBM document # SC24-5760 20 * 21 * This program is free software; you can redistribute it and/or modify 22 * it under the terms of the GNU General Public License as published by 23 * the Free Software Foundation; either version 2, or (at your option) 24 * any later version. 25 * 26 * This program is distributed in the hope that it will be useful, 27 * but WITHOUT ANY WARRANTY; without even the implied warranty of 28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 29 * GNU General Public License for more details. 30 * 31 * You should have received a copy of the GNU General Public License 32 * along with this program; if not, write to the Free Software 33 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 34 */ 35 36 #define KMSG_COMPONENT "iucv" 37 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 38 39 #include <linux/kernel_stat.h> 40 #include <linux/module.h> 41 #include <linux/moduleparam.h> 42 #include <linux/spinlock.h> 43 #include <linux/kernel.h> 44 #include <linux/slab.h> 45 #include <linux/init.h> 46 #include <linux/interrupt.h> 47 #include <linux/list.h> 48 #include <linux/errno.h> 49 #include <linux/err.h> 50 #include <linux/device.h> 51 #include <linux/cpu.h> 52 #include <linux/reboot.h> 53 #include <net/iucv/iucv.h> 54 #include <linux/atomic.h> 55 #include <asm/ebcdic.h> 56 #include <asm/io.h> 57 #include <asm/irq.h> 58 #include <asm/smp.h> 59 60 /* 61 * FLAGS: 62 * All flags are defined in the field IPFLAGS1 of each function 63 * and can be found in CP Programming Services. 64 * IPSRCCLS - Indicates you have specified a source class. 65 * IPTRGCLS - Indicates you have specified a target class. 66 * IPFGPID - Indicates you have specified a pathid. 67 * IPFGMID - Indicates you have specified a message ID. 68 * IPNORPY - Indicates a one-way message. No reply expected. 69 * IPALL - Indicates that all paths are affected. 70 */ 71 #define IUCV_IPSRCCLS 0x01 72 #define IUCV_IPTRGCLS 0x01 73 #define IUCV_IPFGPID 0x02 74 #define IUCV_IPFGMID 0x04 75 #define IUCV_IPNORPY 0x10 76 #define IUCV_IPALL 0x80 77 78 static int iucv_bus_match(struct device *dev, struct device_driver *drv) 79 { 80 return 0; 81 } 82 83 enum iucv_pm_states { 84 IUCV_PM_INITIAL = 0, 85 IUCV_PM_FREEZING = 1, 86 IUCV_PM_THAWING = 2, 87 IUCV_PM_RESTORING = 3, 88 }; 89 static enum iucv_pm_states iucv_pm_state; 90 91 static int iucv_pm_prepare(struct device *); 92 static void iucv_pm_complete(struct device *); 93 static int iucv_pm_freeze(struct device *); 94 static int iucv_pm_thaw(struct device *); 95 static int iucv_pm_restore(struct device *); 96 97 static const struct dev_pm_ops iucv_pm_ops = { 98 .prepare = iucv_pm_prepare, 99 .complete = iucv_pm_complete, 100 .freeze = iucv_pm_freeze, 101 .thaw = iucv_pm_thaw, 102 .restore = iucv_pm_restore, 103 }; 104 105 struct bus_type iucv_bus = { 106 .name = "iucv", 107 .match = iucv_bus_match, 108 .pm = &iucv_pm_ops, 109 }; 110 EXPORT_SYMBOL(iucv_bus); 111 112 struct device *iucv_root; 113 EXPORT_SYMBOL(iucv_root); 114 115 static int iucv_available; 116 117 /* General IUCV interrupt structure */ 118 struct iucv_irq_data { 119 u16 ippathid; 120 u8 ipflags1; 121 u8 iptype; 122 u32 res2[8]; 123 }; 124 125 struct iucv_irq_list { 126 struct list_head list; 127 struct iucv_irq_data data; 128 }; 129 130 static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; 131 static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE }; 132 static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE }; 133 134 /* 135 * Queue of interrupt buffers lock for delivery via the tasklet 136 * (fast but can't call smp_call_function). 137 */ 138 static LIST_HEAD(iucv_task_queue); 139 140 /* 141 * The tasklet for fast delivery of iucv interrupts. 142 */ 143 static void iucv_tasklet_fn(unsigned long); 144 static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); 145 146 /* 147 * Queue of interrupt buffers for delivery via a work queue 148 * (slower but can call smp_call_function). 149 */ 150 static LIST_HEAD(iucv_work_queue); 151 152 /* 153 * The work element to deliver path pending interrupts. 154 */ 155 static void iucv_work_fn(struct work_struct *work); 156 static DECLARE_WORK(iucv_work, iucv_work_fn); 157 158 /* 159 * Spinlock protecting task and work queue. 160 */ 161 static DEFINE_SPINLOCK(iucv_queue_lock); 162 163 enum iucv_command_codes { 164 IUCV_QUERY = 0, 165 IUCV_RETRIEVE_BUFFER = 2, 166 IUCV_SEND = 4, 167 IUCV_RECEIVE = 5, 168 IUCV_REPLY = 6, 169 IUCV_REJECT = 8, 170 IUCV_PURGE = 9, 171 IUCV_ACCEPT = 10, 172 IUCV_CONNECT = 11, 173 IUCV_DECLARE_BUFFER = 12, 174 IUCV_QUIESCE = 13, 175 IUCV_RESUME = 14, 176 IUCV_SEVER = 15, 177 IUCV_SETMASK = 16, 178 IUCV_SETCONTROLMASK = 17, 179 }; 180 181 /* 182 * Error messages that are used with the iucv_sever function. They get 183 * converted to EBCDIC. 184 */ 185 static char iucv_error_no_listener[16] = "NO LISTENER"; 186 static char iucv_error_no_memory[16] = "NO MEMORY"; 187 static char iucv_error_pathid[16] = "INVALID PATHID"; 188 189 /* 190 * iucv_handler_list: List of registered handlers. 191 */ 192 static LIST_HEAD(iucv_handler_list); 193 194 /* 195 * iucv_path_table: an array of iucv_path structures. 196 */ 197 static struct iucv_path **iucv_path_table; 198 static unsigned long iucv_max_pathid; 199 200 /* 201 * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table 202 */ 203 static DEFINE_SPINLOCK(iucv_table_lock); 204 205 /* 206 * iucv_active_cpu: contains the number of the cpu executing the tasklet 207 * or the work handler. Needed for iucv_path_sever called from tasklet. 208 */ 209 static int iucv_active_cpu = -1; 210 211 /* 212 * Mutex and wait queue for iucv_register/iucv_unregister. 213 */ 214 static DEFINE_MUTEX(iucv_register_mutex); 215 216 /* 217 * Counter for number of non-smp capable handlers. 218 */ 219 static int iucv_nonsmp_handler; 220 221 /* 222 * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, 223 * iucv_path_quiesce and iucv_path_sever. 224 */ 225 struct iucv_cmd_control { 226 u16 ippathid; 227 u8 ipflags1; 228 u8 iprcode; 229 u16 ipmsglim; 230 u16 res1; 231 u8 ipvmid[8]; 232 u8 ipuser[16]; 233 u8 iptarget[8]; 234 } __attribute__ ((packed,aligned(8))); 235 236 /* 237 * Data in parameter list iucv structure. Used by iucv_message_send, 238 * iucv_message_send2way and iucv_message_reply. 239 */ 240 struct iucv_cmd_dpl { 241 u16 ippathid; 242 u8 ipflags1; 243 u8 iprcode; 244 u32 ipmsgid; 245 u32 iptrgcls; 246 u8 iprmmsg[8]; 247 u32 ipsrccls; 248 u32 ipmsgtag; 249 u32 ipbfadr2; 250 u32 ipbfln2f; 251 u32 res; 252 } __attribute__ ((packed,aligned(8))); 253 254 /* 255 * Data in buffer iucv structure. Used by iucv_message_receive, 256 * iucv_message_reject, iucv_message_send, iucv_message_send2way 257 * and iucv_declare_cpu. 258 */ 259 struct iucv_cmd_db { 260 u16 ippathid; 261 u8 ipflags1; 262 u8 iprcode; 263 u32 ipmsgid; 264 u32 iptrgcls; 265 u32 ipbfadr1; 266 u32 ipbfln1f; 267 u32 ipsrccls; 268 u32 ipmsgtag; 269 u32 ipbfadr2; 270 u32 ipbfln2f; 271 u32 res; 272 } __attribute__ ((packed,aligned(8))); 273 274 /* 275 * Purge message iucv structure. Used by iucv_message_purge. 276 */ 277 struct iucv_cmd_purge { 278 u16 ippathid; 279 u8 ipflags1; 280 u8 iprcode; 281 u32 ipmsgid; 282 u8 ipaudit[3]; 283 u8 res1[5]; 284 u32 res2; 285 u32 ipsrccls; 286 u32 ipmsgtag; 287 u32 res3[3]; 288 } __attribute__ ((packed,aligned(8))); 289 290 /* 291 * Set mask iucv structure. Used by iucv_enable_cpu. 292 */ 293 struct iucv_cmd_set_mask { 294 u8 ipmask; 295 u8 res1[2]; 296 u8 iprcode; 297 u32 res2[9]; 298 } __attribute__ ((packed,aligned(8))); 299 300 union iucv_param { 301 struct iucv_cmd_control ctrl; 302 struct iucv_cmd_dpl dpl; 303 struct iucv_cmd_db db; 304 struct iucv_cmd_purge purge; 305 struct iucv_cmd_set_mask set_mask; 306 }; 307 308 /* 309 * Anchor for per-cpu IUCV command parameter block. 310 */ 311 static union iucv_param *iucv_param[NR_CPUS]; 312 static union iucv_param *iucv_param_irq[NR_CPUS]; 313 314 /** 315 * iucv_call_b2f0 316 * @code: identifier of IUCV call to CP. 317 * @parm: pointer to a struct iucv_parm block 318 * 319 * Calls CP to execute IUCV commands. 320 * 321 * Returns the result of the CP IUCV call. 322 */ 323 static inline int __iucv_call_b2f0(int command, union iucv_param *parm) 324 { 325 register unsigned long reg0 asm ("0"); 326 register unsigned long reg1 asm ("1"); 327 int ccode; 328 329 reg0 = command; 330 reg1 = (unsigned long)parm; 331 asm volatile( 332 " .long 0xb2f01000\n" 333 " ipm %0\n" 334 " srl %0,28\n" 335 : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) 336 : "m" (*parm) : "cc"); 337 return ccode; 338 } 339 340 static inline int iucv_call_b2f0(int command, union iucv_param *parm) 341 { 342 int ccode; 343 344 ccode = __iucv_call_b2f0(command, parm); 345 return ccode == 1 ? parm->ctrl.iprcode : ccode; 346 } 347 348 /** 349 * iucv_query_maxconn 350 * 351 * Determines the maximum number of connections that may be established. 352 * 353 * Returns the maximum number of connections or -EPERM is IUCV is not 354 * available. 355 */ 356 static int __iucv_query_maxconn(void *param, unsigned long *max_pathid) 357 { 358 register unsigned long reg0 asm ("0"); 359 register unsigned long reg1 asm ("1"); 360 int ccode; 361 362 reg0 = IUCV_QUERY; 363 reg1 = (unsigned long) param; 364 asm volatile ( 365 " .long 0xb2f01000\n" 366 " ipm %0\n" 367 " srl %0,28\n" 368 : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); 369 *max_pathid = reg1; 370 return ccode; 371 } 372 373 static int iucv_query_maxconn(void) 374 { 375 unsigned long max_pathid; 376 void *param; 377 int ccode; 378 379 param = kzalloc(sizeof(union iucv_param), GFP_KERNEL | GFP_DMA); 380 if (!param) 381 return -ENOMEM; 382 ccode = __iucv_query_maxconn(param, &max_pathid); 383 if (ccode == 0) 384 iucv_max_pathid = max_pathid; 385 kfree(param); 386 return ccode ? -EPERM : 0; 387 } 388 389 /** 390 * iucv_allow_cpu 391 * @data: unused 392 * 393 * Allow iucv interrupts on this cpu. 394 */ 395 static void iucv_allow_cpu(void *data) 396 { 397 int cpu = smp_processor_id(); 398 union iucv_param *parm; 399 400 /* 401 * Enable all iucv interrupts. 402 * ipmask contains bits for the different interrupts 403 * 0x80 - Flag to allow nonpriority message pending interrupts 404 * 0x40 - Flag to allow priority message pending interrupts 405 * 0x20 - Flag to allow nonpriority message completion interrupts 406 * 0x10 - Flag to allow priority message completion interrupts 407 * 0x08 - Flag to allow IUCV control interrupts 408 */ 409 parm = iucv_param_irq[cpu]; 410 memset(parm, 0, sizeof(union iucv_param)); 411 parm->set_mask.ipmask = 0xf8; 412 iucv_call_b2f0(IUCV_SETMASK, parm); 413 414 /* 415 * Enable all iucv control interrupts. 416 * ipmask contains bits for the different interrupts 417 * 0x80 - Flag to allow pending connections interrupts 418 * 0x40 - Flag to allow connection complete interrupts 419 * 0x20 - Flag to allow connection severed interrupts 420 * 0x10 - Flag to allow connection quiesced interrupts 421 * 0x08 - Flag to allow connection resumed interrupts 422 */ 423 memset(parm, 0, sizeof(union iucv_param)); 424 parm->set_mask.ipmask = 0xf8; 425 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 426 /* Set indication that iucv interrupts are allowed for this cpu. */ 427 cpumask_set_cpu(cpu, &iucv_irq_cpumask); 428 } 429 430 /** 431 * iucv_block_cpu 432 * @data: unused 433 * 434 * Block iucv interrupts on this cpu. 435 */ 436 static void iucv_block_cpu(void *data) 437 { 438 int cpu = smp_processor_id(); 439 union iucv_param *parm; 440 441 /* Disable all iucv interrupts. */ 442 parm = iucv_param_irq[cpu]; 443 memset(parm, 0, sizeof(union iucv_param)); 444 iucv_call_b2f0(IUCV_SETMASK, parm); 445 446 /* Clear indication that iucv interrupts are allowed for this cpu. */ 447 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); 448 } 449 450 /** 451 * iucv_block_cpu_almost 452 * @data: unused 453 * 454 * Allow connection-severed interrupts only on this cpu. 455 */ 456 static void iucv_block_cpu_almost(void *data) 457 { 458 int cpu = smp_processor_id(); 459 union iucv_param *parm; 460 461 /* Allow iucv control interrupts only */ 462 parm = iucv_param_irq[cpu]; 463 memset(parm, 0, sizeof(union iucv_param)); 464 parm->set_mask.ipmask = 0x08; 465 iucv_call_b2f0(IUCV_SETMASK, parm); 466 /* Allow iucv-severed interrupt only */ 467 memset(parm, 0, sizeof(union iucv_param)); 468 parm->set_mask.ipmask = 0x20; 469 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 470 471 /* Clear indication that iucv interrupts are allowed for this cpu. */ 472 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); 473 } 474 475 /** 476 * iucv_declare_cpu 477 * @data: unused 478 * 479 * Declare a interrupt buffer on this cpu. 480 */ 481 static void iucv_declare_cpu(void *data) 482 { 483 int cpu = smp_processor_id(); 484 union iucv_param *parm; 485 int rc; 486 487 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) 488 return; 489 490 /* Declare interrupt buffer. */ 491 parm = iucv_param_irq[cpu]; 492 memset(parm, 0, sizeof(union iucv_param)); 493 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); 494 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); 495 if (rc) { 496 char *err = "Unknown"; 497 switch (rc) { 498 case 0x03: 499 err = "Directory error"; 500 break; 501 case 0x0a: 502 err = "Invalid length"; 503 break; 504 case 0x13: 505 err = "Buffer already exists"; 506 break; 507 case 0x3e: 508 err = "Buffer overlap"; 509 break; 510 case 0x5c: 511 err = "Paging or storage error"; 512 break; 513 } 514 pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n", 515 cpu, rc, err); 516 return; 517 } 518 519 /* Set indication that an iucv buffer exists for this cpu. */ 520 cpumask_set_cpu(cpu, &iucv_buffer_cpumask); 521 522 if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask)) 523 /* Enable iucv interrupts on this cpu. */ 524 iucv_allow_cpu(NULL); 525 else 526 /* Disable iucv interrupts on this cpu. */ 527 iucv_block_cpu(NULL); 528 } 529 530 /** 531 * iucv_retrieve_cpu 532 * @data: unused 533 * 534 * Retrieve interrupt buffer on this cpu. 535 */ 536 static void iucv_retrieve_cpu(void *data) 537 { 538 int cpu = smp_processor_id(); 539 union iucv_param *parm; 540 541 if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) 542 return; 543 544 /* Block iucv interrupts. */ 545 iucv_block_cpu(NULL); 546 547 /* Retrieve interrupt buffer. */ 548 parm = iucv_param_irq[cpu]; 549 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); 550 551 /* Clear indication that an iucv buffer exists for this cpu. */ 552 cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); 553 } 554 555 /** 556 * iucv_setmask_smp 557 * 558 * Allow iucv interrupts on all cpus. 559 */ 560 static void iucv_setmask_mp(void) 561 { 562 int cpu; 563 564 get_online_cpus(); 565 for_each_online_cpu(cpu) 566 /* Enable all cpus with a declared buffer. */ 567 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) && 568 !cpumask_test_cpu(cpu, &iucv_irq_cpumask)) 569 smp_call_function_single(cpu, iucv_allow_cpu, 570 NULL, 1); 571 put_online_cpus(); 572 } 573 574 /** 575 * iucv_setmask_up 576 * 577 * Allow iucv interrupts on a single cpu. 578 */ 579 static void iucv_setmask_up(void) 580 { 581 cpumask_t cpumask; 582 int cpu; 583 584 /* Disable all cpu but the first in cpu_irq_cpumask. */ 585 cpumask_copy(&cpumask, &iucv_irq_cpumask); 586 cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask); 587 for_each_cpu(cpu, &cpumask) 588 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); 589 } 590 591 /** 592 * iucv_enable 593 * 594 * This function makes iucv ready for use. It allocates the pathid 595 * table, declares an iucv interrupt buffer and enables the iucv 596 * interrupts. Called when the first user has registered an iucv 597 * handler. 598 */ 599 static int iucv_enable(void) 600 { 601 size_t alloc_size; 602 int cpu, rc; 603 604 get_online_cpus(); 605 rc = -ENOMEM; 606 alloc_size = iucv_max_pathid * sizeof(struct iucv_path); 607 iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); 608 if (!iucv_path_table) 609 goto out; 610 /* Declare per cpu buffers. */ 611 rc = -EIO; 612 for_each_online_cpu(cpu) 613 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 614 if (cpumask_empty(&iucv_buffer_cpumask)) 615 /* No cpu could declare an iucv buffer. */ 616 goto out; 617 put_online_cpus(); 618 return 0; 619 out: 620 kfree(iucv_path_table); 621 iucv_path_table = NULL; 622 put_online_cpus(); 623 return rc; 624 } 625 626 /** 627 * iucv_disable 628 * 629 * This function shuts down iucv. It disables iucv interrupts, retrieves 630 * the iucv interrupt buffer and frees the pathid table. Called after the 631 * last user unregister its iucv handler. 632 */ 633 static void iucv_disable(void) 634 { 635 get_online_cpus(); 636 on_each_cpu(iucv_retrieve_cpu, NULL, 1); 637 kfree(iucv_path_table); 638 iucv_path_table = NULL; 639 put_online_cpus(); 640 } 641 642 static void free_iucv_data(int cpu) 643 { 644 kfree(iucv_param_irq[cpu]); 645 iucv_param_irq[cpu] = NULL; 646 kfree(iucv_param[cpu]); 647 iucv_param[cpu] = NULL; 648 kfree(iucv_irq_data[cpu]); 649 iucv_irq_data[cpu] = NULL; 650 } 651 652 static int alloc_iucv_data(int cpu) 653 { 654 /* Note: GFP_DMA used to get memory below 2G */ 655 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), 656 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 657 if (!iucv_irq_data[cpu]) 658 goto out_free; 659 660 /* Allocate parameter blocks. */ 661 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), 662 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 663 if (!iucv_param[cpu]) 664 goto out_free; 665 666 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), 667 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 668 if (!iucv_param_irq[cpu]) 669 goto out_free; 670 671 return 0; 672 673 out_free: 674 free_iucv_data(cpu); 675 return -ENOMEM; 676 } 677 678 static int iucv_cpu_notify(struct notifier_block *self, 679 unsigned long action, void *hcpu) 680 { 681 cpumask_t cpumask; 682 long cpu = (long) hcpu; 683 684 switch (action) { 685 case CPU_UP_PREPARE: 686 case CPU_UP_PREPARE_FROZEN: 687 if (alloc_iucv_data(cpu)) 688 return notifier_from_errno(-ENOMEM); 689 break; 690 case CPU_UP_CANCELED: 691 case CPU_UP_CANCELED_FROZEN: 692 case CPU_DEAD: 693 case CPU_DEAD_FROZEN: 694 free_iucv_data(cpu); 695 break; 696 case CPU_ONLINE: 697 case CPU_ONLINE_FROZEN: 698 case CPU_DOWN_FAILED: 699 case CPU_DOWN_FAILED_FROZEN: 700 if (!iucv_path_table) 701 break; 702 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 703 break; 704 case CPU_DOWN_PREPARE: 705 case CPU_DOWN_PREPARE_FROZEN: 706 if (!iucv_path_table) 707 break; 708 cpumask_copy(&cpumask, &iucv_buffer_cpumask); 709 cpumask_clear_cpu(cpu, &cpumask); 710 if (cpumask_empty(&cpumask)) 711 /* Can't offline last IUCV enabled cpu. */ 712 return notifier_from_errno(-EINVAL); 713 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); 714 if (cpumask_empty(&iucv_irq_cpumask)) 715 smp_call_function_single( 716 cpumask_first(&iucv_buffer_cpumask), 717 iucv_allow_cpu, NULL, 1); 718 break; 719 } 720 return NOTIFY_OK; 721 } 722 723 static struct notifier_block __refdata iucv_cpu_notifier = { 724 .notifier_call = iucv_cpu_notify, 725 }; 726 727 /** 728 * iucv_sever_pathid 729 * @pathid: path identification number. 730 * @userdata: 16-bytes of user data. 731 * 732 * Sever an iucv path to free up the pathid. Used internally. 733 */ 734 static int iucv_sever_pathid(u16 pathid, u8 *userdata) 735 { 736 union iucv_param *parm; 737 738 parm = iucv_param_irq[smp_processor_id()]; 739 memset(parm, 0, sizeof(union iucv_param)); 740 if (userdata) 741 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 742 parm->ctrl.ippathid = pathid; 743 return iucv_call_b2f0(IUCV_SEVER, parm); 744 } 745 746 /** 747 * __iucv_cleanup_queue 748 * @dummy: unused dummy argument 749 * 750 * Nop function called via smp_call_function to force work items from 751 * pending external iucv interrupts to the work queue. 752 */ 753 static void __iucv_cleanup_queue(void *dummy) 754 { 755 } 756 757 /** 758 * iucv_cleanup_queue 759 * 760 * Function called after a path has been severed to find all remaining 761 * work items for the now stale pathid. The caller needs to hold the 762 * iucv_table_lock. 763 */ 764 static void iucv_cleanup_queue(void) 765 { 766 struct iucv_irq_list *p, *n; 767 768 /* 769 * When a path is severed, the pathid can be reused immediately 770 * on a iucv connect or a connection pending interrupt. Remove 771 * all entries from the task queue that refer to a stale pathid 772 * (iucv_path_table[ix] == NULL). Only then do the iucv connect 773 * or deliver the connection pending interrupt. To get all the 774 * pending interrupts force them to the work queue by calling 775 * an empty function on all cpus. 776 */ 777 smp_call_function(__iucv_cleanup_queue, NULL, 1); 778 spin_lock_irq(&iucv_queue_lock); 779 list_for_each_entry_safe(p, n, &iucv_task_queue, list) { 780 /* Remove stale work items from the task queue. */ 781 if (iucv_path_table[p->data.ippathid] == NULL) { 782 list_del(&p->list); 783 kfree(p); 784 } 785 } 786 spin_unlock_irq(&iucv_queue_lock); 787 } 788 789 /** 790 * iucv_register: 791 * @handler: address of iucv handler structure 792 * @smp: != 0 indicates that the handler can deal with out of order messages 793 * 794 * Registers a driver with IUCV. 795 * 796 * Returns 0 on success, -ENOMEM if the memory allocation for the pathid 797 * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. 798 */ 799 int iucv_register(struct iucv_handler *handler, int smp) 800 { 801 int rc; 802 803 if (!iucv_available) 804 return -ENOSYS; 805 mutex_lock(&iucv_register_mutex); 806 if (!smp) 807 iucv_nonsmp_handler++; 808 if (list_empty(&iucv_handler_list)) { 809 rc = iucv_enable(); 810 if (rc) 811 goto out_mutex; 812 } else if (!smp && iucv_nonsmp_handler == 1) 813 iucv_setmask_up(); 814 INIT_LIST_HEAD(&handler->paths); 815 816 spin_lock_bh(&iucv_table_lock); 817 list_add_tail(&handler->list, &iucv_handler_list); 818 spin_unlock_bh(&iucv_table_lock); 819 rc = 0; 820 out_mutex: 821 mutex_unlock(&iucv_register_mutex); 822 return rc; 823 } 824 EXPORT_SYMBOL(iucv_register); 825 826 /** 827 * iucv_unregister 828 * @handler: address of iucv handler structure 829 * @smp: != 0 indicates that the handler can deal with out of order messages 830 * 831 * Unregister driver from IUCV. 832 */ 833 void iucv_unregister(struct iucv_handler *handler, int smp) 834 { 835 struct iucv_path *p, *n; 836 837 mutex_lock(&iucv_register_mutex); 838 spin_lock_bh(&iucv_table_lock); 839 /* Remove handler from the iucv_handler_list. */ 840 list_del_init(&handler->list); 841 /* Sever all pathids still referring to the handler. */ 842 list_for_each_entry_safe(p, n, &handler->paths, list) { 843 iucv_sever_pathid(p->pathid, NULL); 844 iucv_path_table[p->pathid] = NULL; 845 list_del(&p->list); 846 iucv_path_free(p); 847 } 848 spin_unlock_bh(&iucv_table_lock); 849 if (!smp) 850 iucv_nonsmp_handler--; 851 if (list_empty(&iucv_handler_list)) 852 iucv_disable(); 853 else if (!smp && iucv_nonsmp_handler == 0) 854 iucv_setmask_mp(); 855 mutex_unlock(&iucv_register_mutex); 856 } 857 EXPORT_SYMBOL(iucv_unregister); 858 859 static int iucv_reboot_event(struct notifier_block *this, 860 unsigned long event, void *ptr) 861 { 862 int i; 863 864 if (cpumask_empty(&iucv_irq_cpumask)) 865 return NOTIFY_DONE; 866 867 get_online_cpus(); 868 on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1); 869 preempt_disable(); 870 for (i = 0; i < iucv_max_pathid; i++) { 871 if (iucv_path_table[i]) 872 iucv_sever_pathid(i, NULL); 873 } 874 preempt_enable(); 875 put_online_cpus(); 876 iucv_disable(); 877 return NOTIFY_DONE; 878 } 879 880 static struct notifier_block iucv_reboot_notifier = { 881 .notifier_call = iucv_reboot_event, 882 }; 883 884 /** 885 * iucv_path_accept 886 * @path: address of iucv path structure 887 * @handler: address of iucv handler structure 888 * @userdata: 16 bytes of data reflected to the communication partner 889 * @private: private data passed to interrupt handlers for this path 890 * 891 * This function is issued after the user received a connection pending 892 * external interrupt and now wishes to complete the IUCV communication path. 893 * 894 * Returns the result of the CP IUCV call. 895 */ 896 int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, 897 u8 *userdata, void *private) 898 { 899 union iucv_param *parm; 900 int rc; 901 902 local_bh_disable(); 903 if (cpumask_empty(&iucv_buffer_cpumask)) { 904 rc = -EIO; 905 goto out; 906 } 907 /* Prepare parameter block. */ 908 parm = iucv_param[smp_processor_id()]; 909 memset(parm, 0, sizeof(union iucv_param)); 910 parm->ctrl.ippathid = path->pathid; 911 parm->ctrl.ipmsglim = path->msglim; 912 if (userdata) 913 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 914 parm->ctrl.ipflags1 = path->flags; 915 916 rc = iucv_call_b2f0(IUCV_ACCEPT, parm); 917 if (!rc) { 918 path->private = private; 919 path->msglim = parm->ctrl.ipmsglim; 920 path->flags = parm->ctrl.ipflags1; 921 } 922 out: 923 local_bh_enable(); 924 return rc; 925 } 926 EXPORT_SYMBOL(iucv_path_accept); 927 928 /** 929 * iucv_path_connect 930 * @path: address of iucv path structure 931 * @handler: address of iucv handler structure 932 * @userid: 8-byte user identification 933 * @system: 8-byte target system identification 934 * @userdata: 16 bytes of data reflected to the communication partner 935 * @private: private data passed to interrupt handlers for this path 936 * 937 * This function establishes an IUCV path. Although the connect may complete 938 * successfully, you are not able to use the path until you receive an IUCV 939 * Connection Complete external interrupt. 940 * 941 * Returns the result of the CP IUCV call. 942 */ 943 int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, 944 u8 *userid, u8 *system, u8 *userdata, 945 void *private) 946 { 947 union iucv_param *parm; 948 int rc; 949 950 spin_lock_bh(&iucv_table_lock); 951 iucv_cleanup_queue(); 952 if (cpumask_empty(&iucv_buffer_cpumask)) { 953 rc = -EIO; 954 goto out; 955 } 956 parm = iucv_param[smp_processor_id()]; 957 memset(parm, 0, sizeof(union iucv_param)); 958 parm->ctrl.ipmsglim = path->msglim; 959 parm->ctrl.ipflags1 = path->flags; 960 if (userid) { 961 memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); 962 ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); 963 EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); 964 } 965 if (system) { 966 memcpy(parm->ctrl.iptarget, system, 967 sizeof(parm->ctrl.iptarget)); 968 ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); 969 EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); 970 } 971 if (userdata) 972 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 973 974 rc = iucv_call_b2f0(IUCV_CONNECT, parm); 975 if (!rc) { 976 if (parm->ctrl.ippathid < iucv_max_pathid) { 977 path->pathid = parm->ctrl.ippathid; 978 path->msglim = parm->ctrl.ipmsglim; 979 path->flags = parm->ctrl.ipflags1; 980 path->handler = handler; 981 path->private = private; 982 list_add_tail(&path->list, &handler->paths); 983 iucv_path_table[path->pathid] = path; 984 } else { 985 iucv_sever_pathid(parm->ctrl.ippathid, 986 iucv_error_pathid); 987 rc = -EIO; 988 } 989 } 990 out: 991 spin_unlock_bh(&iucv_table_lock); 992 return rc; 993 } 994 EXPORT_SYMBOL(iucv_path_connect); 995 996 /** 997 * iucv_path_quiesce: 998 * @path: address of iucv path structure 999 * @userdata: 16 bytes of data reflected to the communication partner 1000 * 1001 * This function temporarily suspends incoming messages on an IUCV path. 1002 * You can later reactivate the path by invoking the iucv_resume function. 1003 * 1004 * Returns the result from the CP IUCV call. 1005 */ 1006 int iucv_path_quiesce(struct iucv_path *path, u8 *userdata) 1007 { 1008 union iucv_param *parm; 1009 int rc; 1010 1011 local_bh_disable(); 1012 if (cpumask_empty(&iucv_buffer_cpumask)) { 1013 rc = -EIO; 1014 goto out; 1015 } 1016 parm = iucv_param[smp_processor_id()]; 1017 memset(parm, 0, sizeof(union iucv_param)); 1018 if (userdata) 1019 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 1020 parm->ctrl.ippathid = path->pathid; 1021 rc = iucv_call_b2f0(IUCV_QUIESCE, parm); 1022 out: 1023 local_bh_enable(); 1024 return rc; 1025 } 1026 EXPORT_SYMBOL(iucv_path_quiesce); 1027 1028 /** 1029 * iucv_path_resume: 1030 * @path: address of iucv path structure 1031 * @userdata: 16 bytes of data reflected to the communication partner 1032 * 1033 * This function resumes incoming messages on an IUCV path that has 1034 * been stopped with iucv_path_quiesce. 1035 * 1036 * Returns the result from the CP IUCV call. 1037 */ 1038 int iucv_path_resume(struct iucv_path *path, u8 *userdata) 1039 { 1040 union iucv_param *parm; 1041 int rc; 1042 1043 local_bh_disable(); 1044 if (cpumask_empty(&iucv_buffer_cpumask)) { 1045 rc = -EIO; 1046 goto out; 1047 } 1048 parm = iucv_param[smp_processor_id()]; 1049 memset(parm, 0, sizeof(union iucv_param)); 1050 if (userdata) 1051 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 1052 parm->ctrl.ippathid = path->pathid; 1053 rc = iucv_call_b2f0(IUCV_RESUME, parm); 1054 out: 1055 local_bh_enable(); 1056 return rc; 1057 } 1058 1059 /** 1060 * iucv_path_sever 1061 * @path: address of iucv path structure 1062 * @userdata: 16 bytes of data reflected to the communication partner 1063 * 1064 * This function terminates an IUCV path. 1065 * 1066 * Returns the result from the CP IUCV call. 1067 */ 1068 int iucv_path_sever(struct iucv_path *path, u8 *userdata) 1069 { 1070 int rc; 1071 1072 preempt_disable(); 1073 if (cpumask_empty(&iucv_buffer_cpumask)) { 1074 rc = -EIO; 1075 goto out; 1076 } 1077 if (iucv_active_cpu != smp_processor_id()) 1078 spin_lock_bh(&iucv_table_lock); 1079 rc = iucv_sever_pathid(path->pathid, userdata); 1080 iucv_path_table[path->pathid] = NULL; 1081 list_del_init(&path->list); 1082 if (iucv_active_cpu != smp_processor_id()) 1083 spin_unlock_bh(&iucv_table_lock); 1084 out: 1085 preempt_enable(); 1086 return rc; 1087 } 1088 EXPORT_SYMBOL(iucv_path_sever); 1089 1090 /** 1091 * iucv_message_purge 1092 * @path: address of iucv path structure 1093 * @msg: address of iucv msg structure 1094 * @srccls: source class of message 1095 * 1096 * Cancels a message you have sent. 1097 * 1098 * Returns the result from the CP IUCV call. 1099 */ 1100 int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, 1101 u32 srccls) 1102 { 1103 union iucv_param *parm; 1104 int rc; 1105 1106 local_bh_disable(); 1107 if (cpumask_empty(&iucv_buffer_cpumask)) { 1108 rc = -EIO; 1109 goto out; 1110 } 1111 parm = iucv_param[smp_processor_id()]; 1112 memset(parm, 0, sizeof(union iucv_param)); 1113 parm->purge.ippathid = path->pathid; 1114 parm->purge.ipmsgid = msg->id; 1115 parm->purge.ipsrccls = srccls; 1116 parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; 1117 rc = iucv_call_b2f0(IUCV_PURGE, parm); 1118 if (!rc) { 1119 msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; 1120 msg->tag = parm->purge.ipmsgtag; 1121 } 1122 out: 1123 local_bh_enable(); 1124 return rc; 1125 } 1126 EXPORT_SYMBOL(iucv_message_purge); 1127 1128 /** 1129 * iucv_message_receive_iprmdata 1130 * @path: address of iucv path structure 1131 * @msg: address of iucv msg structure 1132 * @flags: how the message is received (IUCV_IPBUFLST) 1133 * @buffer: address of data buffer or address of struct iucv_array 1134 * @size: length of data buffer 1135 * @residual: 1136 * 1137 * Internal function used by iucv_message_receive and __iucv_message_receive 1138 * to receive RMDATA data stored in struct iucv_message. 1139 */ 1140 static int iucv_message_receive_iprmdata(struct iucv_path *path, 1141 struct iucv_message *msg, 1142 u8 flags, void *buffer, 1143 size_t size, size_t *residual) 1144 { 1145 struct iucv_array *array; 1146 u8 *rmmsg; 1147 size_t copy; 1148 1149 /* 1150 * Message is 8 bytes long and has been stored to the 1151 * message descriptor itself. 1152 */ 1153 if (residual) 1154 *residual = abs(size - 8); 1155 rmmsg = msg->rmmsg; 1156 if (flags & IUCV_IPBUFLST) { 1157 /* Copy to struct iucv_array. */ 1158 size = (size < 8) ? size : 8; 1159 for (array = buffer; size > 0; array++) { 1160 copy = min_t(size_t, size, array->length); 1161 memcpy((u8 *)(addr_t) array->address, 1162 rmmsg, copy); 1163 rmmsg += copy; 1164 size -= copy; 1165 } 1166 } else { 1167 /* Copy to direct buffer. */ 1168 memcpy(buffer, rmmsg, min_t(size_t, size, 8)); 1169 } 1170 return 0; 1171 } 1172 1173 /** 1174 * __iucv_message_receive 1175 * @path: address of iucv path structure 1176 * @msg: address of iucv msg structure 1177 * @flags: how the message is received (IUCV_IPBUFLST) 1178 * @buffer: address of data buffer or address of struct iucv_array 1179 * @size: length of data buffer 1180 * @residual: 1181 * 1182 * This function receives messages that are being sent to you over 1183 * established paths. This function will deal with RMDATA messages 1184 * embedded in struct iucv_message as well. 1185 * 1186 * Locking: no locking 1187 * 1188 * Returns the result from the CP IUCV call. 1189 */ 1190 int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, 1191 u8 flags, void *buffer, size_t size, size_t *residual) 1192 { 1193 union iucv_param *parm; 1194 int rc; 1195 1196 if (msg->flags & IUCV_IPRMDATA) 1197 return iucv_message_receive_iprmdata(path, msg, flags, 1198 buffer, size, residual); 1199 if (cpumask_empty(&iucv_buffer_cpumask)) { 1200 rc = -EIO; 1201 goto out; 1202 } 1203 parm = iucv_param[smp_processor_id()]; 1204 memset(parm, 0, sizeof(union iucv_param)); 1205 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1206 parm->db.ipbfln1f = (u32) size; 1207 parm->db.ipmsgid = msg->id; 1208 parm->db.ippathid = path->pathid; 1209 parm->db.iptrgcls = msg->class; 1210 parm->db.ipflags1 = (flags | IUCV_IPFGPID | 1211 IUCV_IPFGMID | IUCV_IPTRGCLS); 1212 rc = iucv_call_b2f0(IUCV_RECEIVE, parm); 1213 if (!rc || rc == 5) { 1214 msg->flags = parm->db.ipflags1; 1215 if (residual) 1216 *residual = parm->db.ipbfln1f; 1217 } 1218 out: 1219 return rc; 1220 } 1221 EXPORT_SYMBOL(__iucv_message_receive); 1222 1223 /** 1224 * iucv_message_receive 1225 * @path: address of iucv path structure 1226 * @msg: address of iucv msg structure 1227 * @flags: how the message is received (IUCV_IPBUFLST) 1228 * @buffer: address of data buffer or address of struct iucv_array 1229 * @size: length of data buffer 1230 * @residual: 1231 * 1232 * This function receives messages that are being sent to you over 1233 * established paths. This function will deal with RMDATA messages 1234 * embedded in struct iucv_message as well. 1235 * 1236 * Locking: local_bh_enable/local_bh_disable 1237 * 1238 * Returns the result from the CP IUCV call. 1239 */ 1240 int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, 1241 u8 flags, void *buffer, size_t size, size_t *residual) 1242 { 1243 int rc; 1244 1245 if (msg->flags & IUCV_IPRMDATA) 1246 return iucv_message_receive_iprmdata(path, msg, flags, 1247 buffer, size, residual); 1248 local_bh_disable(); 1249 rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); 1250 local_bh_enable(); 1251 return rc; 1252 } 1253 EXPORT_SYMBOL(iucv_message_receive); 1254 1255 /** 1256 * iucv_message_reject 1257 * @path: address of iucv path structure 1258 * @msg: address of iucv msg structure 1259 * 1260 * The reject function refuses a specified message. Between the time you 1261 * are notified of a message and the time that you complete the message, 1262 * the message may be rejected. 1263 * 1264 * Returns the result from the CP IUCV call. 1265 */ 1266 int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) 1267 { 1268 union iucv_param *parm; 1269 int rc; 1270 1271 local_bh_disable(); 1272 if (cpumask_empty(&iucv_buffer_cpumask)) { 1273 rc = -EIO; 1274 goto out; 1275 } 1276 parm = iucv_param[smp_processor_id()]; 1277 memset(parm, 0, sizeof(union iucv_param)); 1278 parm->db.ippathid = path->pathid; 1279 parm->db.ipmsgid = msg->id; 1280 parm->db.iptrgcls = msg->class; 1281 parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); 1282 rc = iucv_call_b2f0(IUCV_REJECT, parm); 1283 out: 1284 local_bh_enable(); 1285 return rc; 1286 } 1287 EXPORT_SYMBOL(iucv_message_reject); 1288 1289 /** 1290 * iucv_message_reply 1291 * @path: address of iucv path structure 1292 * @msg: address of iucv msg structure 1293 * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1294 * @reply: address of reply data buffer or address of struct iucv_array 1295 * @size: length of reply data buffer 1296 * 1297 * This function responds to the two-way messages that you receive. You 1298 * must identify completely the message to which you wish to reply. ie, 1299 * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into 1300 * the parameter list. 1301 * 1302 * Returns the result from the CP IUCV call. 1303 */ 1304 int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, 1305 u8 flags, void *reply, size_t size) 1306 { 1307 union iucv_param *parm; 1308 int rc; 1309 1310 local_bh_disable(); 1311 if (cpumask_empty(&iucv_buffer_cpumask)) { 1312 rc = -EIO; 1313 goto out; 1314 } 1315 parm = iucv_param[smp_processor_id()]; 1316 memset(parm, 0, sizeof(union iucv_param)); 1317 if (flags & IUCV_IPRMDATA) { 1318 parm->dpl.ippathid = path->pathid; 1319 parm->dpl.ipflags1 = flags; 1320 parm->dpl.ipmsgid = msg->id; 1321 parm->dpl.iptrgcls = msg->class; 1322 memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); 1323 } else { 1324 parm->db.ipbfadr1 = (u32)(addr_t) reply; 1325 parm->db.ipbfln1f = (u32) size; 1326 parm->db.ippathid = path->pathid; 1327 parm->db.ipflags1 = flags; 1328 parm->db.ipmsgid = msg->id; 1329 parm->db.iptrgcls = msg->class; 1330 } 1331 rc = iucv_call_b2f0(IUCV_REPLY, parm); 1332 out: 1333 local_bh_enable(); 1334 return rc; 1335 } 1336 EXPORT_SYMBOL(iucv_message_reply); 1337 1338 /** 1339 * __iucv_message_send 1340 * @path: address of iucv path structure 1341 * @msg: address of iucv msg structure 1342 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1343 * @srccls: source class of message 1344 * @buffer: address of send buffer or address of struct iucv_array 1345 * @size: length of send buffer 1346 * 1347 * This function transmits data to another application. Data to be 1348 * transmitted is in a buffer and this is a one-way message and the 1349 * receiver will not reply to the message. 1350 * 1351 * Locking: no locking 1352 * 1353 * Returns the result from the CP IUCV call. 1354 */ 1355 int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, 1356 u8 flags, u32 srccls, void *buffer, size_t size) 1357 { 1358 union iucv_param *parm; 1359 int rc; 1360 1361 if (cpumask_empty(&iucv_buffer_cpumask)) { 1362 rc = -EIO; 1363 goto out; 1364 } 1365 parm = iucv_param[smp_processor_id()]; 1366 memset(parm, 0, sizeof(union iucv_param)); 1367 if (flags & IUCV_IPRMDATA) { 1368 /* Message of 8 bytes can be placed into the parameter list. */ 1369 parm->dpl.ippathid = path->pathid; 1370 parm->dpl.ipflags1 = flags | IUCV_IPNORPY; 1371 parm->dpl.iptrgcls = msg->class; 1372 parm->dpl.ipsrccls = srccls; 1373 parm->dpl.ipmsgtag = msg->tag; 1374 memcpy(parm->dpl.iprmmsg, buffer, 8); 1375 } else { 1376 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1377 parm->db.ipbfln1f = (u32) size; 1378 parm->db.ippathid = path->pathid; 1379 parm->db.ipflags1 = flags | IUCV_IPNORPY; 1380 parm->db.iptrgcls = msg->class; 1381 parm->db.ipsrccls = srccls; 1382 parm->db.ipmsgtag = msg->tag; 1383 } 1384 rc = iucv_call_b2f0(IUCV_SEND, parm); 1385 if (!rc) 1386 msg->id = parm->db.ipmsgid; 1387 out: 1388 return rc; 1389 } 1390 EXPORT_SYMBOL(__iucv_message_send); 1391 1392 /** 1393 * iucv_message_send 1394 * @path: address of iucv path structure 1395 * @msg: address of iucv msg structure 1396 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1397 * @srccls: source class of message 1398 * @buffer: address of send buffer or address of struct iucv_array 1399 * @size: length of send buffer 1400 * 1401 * This function transmits data to another application. Data to be 1402 * transmitted is in a buffer and this is a one-way message and the 1403 * receiver will not reply to the message. 1404 * 1405 * Locking: local_bh_enable/local_bh_disable 1406 * 1407 * Returns the result from the CP IUCV call. 1408 */ 1409 int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, 1410 u8 flags, u32 srccls, void *buffer, size_t size) 1411 { 1412 int rc; 1413 1414 local_bh_disable(); 1415 rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); 1416 local_bh_enable(); 1417 return rc; 1418 } 1419 EXPORT_SYMBOL(iucv_message_send); 1420 1421 /** 1422 * iucv_message_send2way 1423 * @path: address of iucv path structure 1424 * @msg: address of iucv msg structure 1425 * @flags: how the message is sent and the reply is received 1426 * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) 1427 * @srccls: source class of message 1428 * @buffer: address of send buffer or address of struct iucv_array 1429 * @size: length of send buffer 1430 * @ansbuf: address of answer buffer or address of struct iucv_array 1431 * @asize: size of reply buffer 1432 * 1433 * This function transmits data to another application. Data to be 1434 * transmitted is in a buffer. The receiver of the send is expected to 1435 * reply to the message and a buffer is provided into which IUCV moves 1436 * the reply to this message. 1437 * 1438 * Returns the result from the CP IUCV call. 1439 */ 1440 int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, 1441 u8 flags, u32 srccls, void *buffer, size_t size, 1442 void *answer, size_t asize, size_t *residual) 1443 { 1444 union iucv_param *parm; 1445 int rc; 1446 1447 local_bh_disable(); 1448 if (cpumask_empty(&iucv_buffer_cpumask)) { 1449 rc = -EIO; 1450 goto out; 1451 } 1452 parm = iucv_param[smp_processor_id()]; 1453 memset(parm, 0, sizeof(union iucv_param)); 1454 if (flags & IUCV_IPRMDATA) { 1455 parm->dpl.ippathid = path->pathid; 1456 parm->dpl.ipflags1 = path->flags; /* priority message */ 1457 parm->dpl.iptrgcls = msg->class; 1458 parm->dpl.ipsrccls = srccls; 1459 parm->dpl.ipmsgtag = msg->tag; 1460 parm->dpl.ipbfadr2 = (u32)(addr_t) answer; 1461 parm->dpl.ipbfln2f = (u32) asize; 1462 memcpy(parm->dpl.iprmmsg, buffer, 8); 1463 } else { 1464 parm->db.ippathid = path->pathid; 1465 parm->db.ipflags1 = path->flags; /* priority message */ 1466 parm->db.iptrgcls = msg->class; 1467 parm->db.ipsrccls = srccls; 1468 parm->db.ipmsgtag = msg->tag; 1469 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1470 parm->db.ipbfln1f = (u32) size; 1471 parm->db.ipbfadr2 = (u32)(addr_t) answer; 1472 parm->db.ipbfln2f = (u32) asize; 1473 } 1474 rc = iucv_call_b2f0(IUCV_SEND, parm); 1475 if (!rc) 1476 msg->id = parm->db.ipmsgid; 1477 out: 1478 local_bh_enable(); 1479 return rc; 1480 } 1481 EXPORT_SYMBOL(iucv_message_send2way); 1482 1483 /** 1484 * iucv_path_pending 1485 * @data: Pointer to external interrupt buffer 1486 * 1487 * Process connection pending work item. Called from tasklet while holding 1488 * iucv_table_lock. 1489 */ 1490 struct iucv_path_pending { 1491 u16 ippathid; 1492 u8 ipflags1; 1493 u8 iptype; 1494 u16 ipmsglim; 1495 u16 res1; 1496 u8 ipvmid[8]; 1497 u8 ipuser[16]; 1498 u32 res3; 1499 u8 ippollfg; 1500 u8 res4[3]; 1501 } __packed; 1502 1503 static void iucv_path_pending(struct iucv_irq_data *data) 1504 { 1505 struct iucv_path_pending *ipp = (void *) data; 1506 struct iucv_handler *handler; 1507 struct iucv_path *path; 1508 char *error; 1509 1510 BUG_ON(iucv_path_table[ipp->ippathid]); 1511 /* New pathid, handler found. Create a new path struct. */ 1512 error = iucv_error_no_memory; 1513 path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); 1514 if (!path) 1515 goto out_sever; 1516 path->pathid = ipp->ippathid; 1517 iucv_path_table[path->pathid] = path; 1518 EBCASC(ipp->ipvmid, 8); 1519 1520 /* Call registered handler until one is found that wants the path. */ 1521 list_for_each_entry(handler, &iucv_handler_list, list) { 1522 if (!handler->path_pending) 1523 continue; 1524 /* 1525 * Add path to handler to allow a call to iucv_path_sever 1526 * inside the path_pending function. If the handler returns 1527 * an error remove the path from the handler again. 1528 */ 1529 list_add(&path->list, &handler->paths); 1530 path->handler = handler; 1531 if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) 1532 return; 1533 list_del(&path->list); 1534 path->handler = NULL; 1535 } 1536 /* No handler wanted the path. */ 1537 iucv_path_table[path->pathid] = NULL; 1538 iucv_path_free(path); 1539 error = iucv_error_no_listener; 1540 out_sever: 1541 iucv_sever_pathid(ipp->ippathid, error); 1542 } 1543 1544 /** 1545 * iucv_path_complete 1546 * @data: Pointer to external interrupt buffer 1547 * 1548 * Process connection complete work item. Called from tasklet while holding 1549 * iucv_table_lock. 1550 */ 1551 struct iucv_path_complete { 1552 u16 ippathid; 1553 u8 ipflags1; 1554 u8 iptype; 1555 u16 ipmsglim; 1556 u16 res1; 1557 u8 res2[8]; 1558 u8 ipuser[16]; 1559 u32 res3; 1560 u8 ippollfg; 1561 u8 res4[3]; 1562 } __packed; 1563 1564 static void iucv_path_complete(struct iucv_irq_data *data) 1565 { 1566 struct iucv_path_complete *ipc = (void *) data; 1567 struct iucv_path *path = iucv_path_table[ipc->ippathid]; 1568 1569 if (path) 1570 path->flags = ipc->ipflags1; 1571 if (path && path->handler && path->handler->path_complete) 1572 path->handler->path_complete(path, ipc->ipuser); 1573 } 1574 1575 /** 1576 * iucv_path_severed 1577 * @data: Pointer to external interrupt buffer 1578 * 1579 * Process connection severed work item. Called from tasklet while holding 1580 * iucv_table_lock. 1581 */ 1582 struct iucv_path_severed { 1583 u16 ippathid; 1584 u8 res1; 1585 u8 iptype; 1586 u32 res2; 1587 u8 res3[8]; 1588 u8 ipuser[16]; 1589 u32 res4; 1590 u8 ippollfg; 1591 u8 res5[3]; 1592 } __packed; 1593 1594 static void iucv_path_severed(struct iucv_irq_data *data) 1595 { 1596 struct iucv_path_severed *ips = (void *) data; 1597 struct iucv_path *path = iucv_path_table[ips->ippathid]; 1598 1599 if (!path || !path->handler) /* Already severed */ 1600 return; 1601 if (path->handler->path_severed) 1602 path->handler->path_severed(path, ips->ipuser); 1603 else { 1604 iucv_sever_pathid(path->pathid, NULL); 1605 iucv_path_table[path->pathid] = NULL; 1606 list_del(&path->list); 1607 iucv_path_free(path); 1608 } 1609 } 1610 1611 /** 1612 * iucv_path_quiesced 1613 * @data: Pointer to external interrupt buffer 1614 * 1615 * Process connection quiesced work item. Called from tasklet while holding 1616 * iucv_table_lock. 1617 */ 1618 struct iucv_path_quiesced { 1619 u16 ippathid; 1620 u8 res1; 1621 u8 iptype; 1622 u32 res2; 1623 u8 res3[8]; 1624 u8 ipuser[16]; 1625 u32 res4; 1626 u8 ippollfg; 1627 u8 res5[3]; 1628 } __packed; 1629 1630 static void iucv_path_quiesced(struct iucv_irq_data *data) 1631 { 1632 struct iucv_path_quiesced *ipq = (void *) data; 1633 struct iucv_path *path = iucv_path_table[ipq->ippathid]; 1634 1635 if (path && path->handler && path->handler->path_quiesced) 1636 path->handler->path_quiesced(path, ipq->ipuser); 1637 } 1638 1639 /** 1640 * iucv_path_resumed 1641 * @data: Pointer to external interrupt buffer 1642 * 1643 * Process connection resumed work item. Called from tasklet while holding 1644 * iucv_table_lock. 1645 */ 1646 struct iucv_path_resumed { 1647 u16 ippathid; 1648 u8 res1; 1649 u8 iptype; 1650 u32 res2; 1651 u8 res3[8]; 1652 u8 ipuser[16]; 1653 u32 res4; 1654 u8 ippollfg; 1655 u8 res5[3]; 1656 } __packed; 1657 1658 static void iucv_path_resumed(struct iucv_irq_data *data) 1659 { 1660 struct iucv_path_resumed *ipr = (void *) data; 1661 struct iucv_path *path = iucv_path_table[ipr->ippathid]; 1662 1663 if (path && path->handler && path->handler->path_resumed) 1664 path->handler->path_resumed(path, ipr->ipuser); 1665 } 1666 1667 /** 1668 * iucv_message_complete 1669 * @data: Pointer to external interrupt buffer 1670 * 1671 * Process message complete work item. Called from tasklet while holding 1672 * iucv_table_lock. 1673 */ 1674 struct iucv_message_complete { 1675 u16 ippathid; 1676 u8 ipflags1; 1677 u8 iptype; 1678 u32 ipmsgid; 1679 u32 ipaudit; 1680 u8 iprmmsg[8]; 1681 u32 ipsrccls; 1682 u32 ipmsgtag; 1683 u32 res; 1684 u32 ipbfln2f; 1685 u8 ippollfg; 1686 u8 res2[3]; 1687 } __packed; 1688 1689 static void iucv_message_complete(struct iucv_irq_data *data) 1690 { 1691 struct iucv_message_complete *imc = (void *) data; 1692 struct iucv_path *path = iucv_path_table[imc->ippathid]; 1693 struct iucv_message msg; 1694 1695 if (path && path->handler && path->handler->message_complete) { 1696 msg.flags = imc->ipflags1; 1697 msg.id = imc->ipmsgid; 1698 msg.audit = imc->ipaudit; 1699 memcpy(msg.rmmsg, imc->iprmmsg, 8); 1700 msg.class = imc->ipsrccls; 1701 msg.tag = imc->ipmsgtag; 1702 msg.length = imc->ipbfln2f; 1703 path->handler->message_complete(path, &msg); 1704 } 1705 } 1706 1707 /** 1708 * iucv_message_pending 1709 * @data: Pointer to external interrupt buffer 1710 * 1711 * Process message pending work item. Called from tasklet while holding 1712 * iucv_table_lock. 1713 */ 1714 struct iucv_message_pending { 1715 u16 ippathid; 1716 u8 ipflags1; 1717 u8 iptype; 1718 u32 ipmsgid; 1719 u32 iptrgcls; 1720 union { 1721 u32 iprmmsg1_u32; 1722 u8 iprmmsg1[4]; 1723 } ln1msg1; 1724 union { 1725 u32 ipbfln1f; 1726 u8 iprmmsg2[4]; 1727 } ln1msg2; 1728 u32 res1[3]; 1729 u32 ipbfln2f; 1730 u8 ippollfg; 1731 u8 res2[3]; 1732 } __packed; 1733 1734 static void iucv_message_pending(struct iucv_irq_data *data) 1735 { 1736 struct iucv_message_pending *imp = (void *) data; 1737 struct iucv_path *path = iucv_path_table[imp->ippathid]; 1738 struct iucv_message msg; 1739 1740 if (path && path->handler && path->handler->message_pending) { 1741 msg.flags = imp->ipflags1; 1742 msg.id = imp->ipmsgid; 1743 msg.class = imp->iptrgcls; 1744 if (imp->ipflags1 & IUCV_IPRMDATA) { 1745 memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); 1746 msg.length = 8; 1747 } else 1748 msg.length = imp->ln1msg2.ipbfln1f; 1749 msg.reply_size = imp->ipbfln2f; 1750 path->handler->message_pending(path, &msg); 1751 } 1752 } 1753 1754 /** 1755 * iucv_tasklet_fn: 1756 * 1757 * This tasklet loops over the queue of irq buffers created by 1758 * iucv_external_interrupt, calls the appropriate action handler 1759 * and then frees the buffer. 1760 */ 1761 static void iucv_tasklet_fn(unsigned long ignored) 1762 { 1763 typedef void iucv_irq_fn(struct iucv_irq_data *); 1764 static iucv_irq_fn *irq_fn[] = { 1765 [0x02] = iucv_path_complete, 1766 [0x03] = iucv_path_severed, 1767 [0x04] = iucv_path_quiesced, 1768 [0x05] = iucv_path_resumed, 1769 [0x06] = iucv_message_complete, 1770 [0x07] = iucv_message_complete, 1771 [0x08] = iucv_message_pending, 1772 [0x09] = iucv_message_pending, 1773 }; 1774 LIST_HEAD(task_queue); 1775 struct iucv_irq_list *p, *n; 1776 1777 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ 1778 if (!spin_trylock(&iucv_table_lock)) { 1779 tasklet_schedule(&iucv_tasklet); 1780 return; 1781 } 1782 iucv_active_cpu = smp_processor_id(); 1783 1784 spin_lock_irq(&iucv_queue_lock); 1785 list_splice_init(&iucv_task_queue, &task_queue); 1786 spin_unlock_irq(&iucv_queue_lock); 1787 1788 list_for_each_entry_safe(p, n, &task_queue, list) { 1789 list_del_init(&p->list); 1790 irq_fn[p->data.iptype](&p->data); 1791 kfree(p); 1792 } 1793 1794 iucv_active_cpu = -1; 1795 spin_unlock(&iucv_table_lock); 1796 } 1797 1798 /** 1799 * iucv_work_fn: 1800 * 1801 * This work function loops over the queue of path pending irq blocks 1802 * created by iucv_external_interrupt, calls the appropriate action 1803 * handler and then frees the buffer. 1804 */ 1805 static void iucv_work_fn(struct work_struct *work) 1806 { 1807 LIST_HEAD(work_queue); 1808 struct iucv_irq_list *p, *n; 1809 1810 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ 1811 spin_lock_bh(&iucv_table_lock); 1812 iucv_active_cpu = smp_processor_id(); 1813 1814 spin_lock_irq(&iucv_queue_lock); 1815 list_splice_init(&iucv_work_queue, &work_queue); 1816 spin_unlock_irq(&iucv_queue_lock); 1817 1818 iucv_cleanup_queue(); 1819 list_for_each_entry_safe(p, n, &work_queue, list) { 1820 list_del_init(&p->list); 1821 iucv_path_pending(&p->data); 1822 kfree(p); 1823 } 1824 1825 iucv_active_cpu = -1; 1826 spin_unlock_bh(&iucv_table_lock); 1827 } 1828 1829 /** 1830 * iucv_external_interrupt 1831 * @code: irq code 1832 * 1833 * Handles external interrupts coming in from CP. 1834 * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). 1835 */ 1836 static void iucv_external_interrupt(struct ext_code ext_code, 1837 unsigned int param32, unsigned long param64) 1838 { 1839 struct iucv_irq_data *p; 1840 struct iucv_irq_list *work; 1841 1842 inc_irq_stat(IRQEXT_IUC); 1843 p = iucv_irq_data[smp_processor_id()]; 1844 if (p->ippathid >= iucv_max_pathid) { 1845 WARN_ON(p->ippathid >= iucv_max_pathid); 1846 iucv_sever_pathid(p->ippathid, iucv_error_no_listener); 1847 return; 1848 } 1849 BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); 1850 work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); 1851 if (!work) { 1852 pr_warn("iucv_external_interrupt: out of memory\n"); 1853 return; 1854 } 1855 memcpy(&work->data, p, sizeof(work->data)); 1856 spin_lock(&iucv_queue_lock); 1857 if (p->iptype == 0x01) { 1858 /* Path pending interrupt. */ 1859 list_add_tail(&work->list, &iucv_work_queue); 1860 schedule_work(&iucv_work); 1861 } else { 1862 /* The other interrupts. */ 1863 list_add_tail(&work->list, &iucv_task_queue); 1864 tasklet_schedule(&iucv_tasklet); 1865 } 1866 spin_unlock(&iucv_queue_lock); 1867 } 1868 1869 static int iucv_pm_prepare(struct device *dev) 1870 { 1871 int rc = 0; 1872 1873 #ifdef CONFIG_PM_DEBUG 1874 printk(KERN_INFO "iucv_pm_prepare\n"); 1875 #endif 1876 if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) 1877 rc = dev->driver->pm->prepare(dev); 1878 return rc; 1879 } 1880 1881 static void iucv_pm_complete(struct device *dev) 1882 { 1883 #ifdef CONFIG_PM_DEBUG 1884 printk(KERN_INFO "iucv_pm_complete\n"); 1885 #endif 1886 if (dev->driver && dev->driver->pm && dev->driver->pm->complete) 1887 dev->driver->pm->complete(dev); 1888 } 1889 1890 /** 1891 * iucv_path_table_empty() - determine if iucv path table is empty 1892 * 1893 * Returns 0 if there are still iucv pathes defined 1894 * 1 if there are no iucv pathes defined 1895 */ 1896 int iucv_path_table_empty(void) 1897 { 1898 int i; 1899 1900 for (i = 0; i < iucv_max_pathid; i++) { 1901 if (iucv_path_table[i]) 1902 return 0; 1903 } 1904 return 1; 1905 } 1906 1907 /** 1908 * iucv_pm_freeze() - Freeze PM callback 1909 * @dev: iucv-based device 1910 * 1911 * disable iucv interrupts 1912 * invoke callback function of the iucv-based driver 1913 * shut down iucv, if no iucv-pathes are established anymore 1914 */ 1915 static int iucv_pm_freeze(struct device *dev) 1916 { 1917 int cpu; 1918 struct iucv_irq_list *p, *n; 1919 int rc = 0; 1920 1921 #ifdef CONFIG_PM_DEBUG 1922 printk(KERN_WARNING "iucv_pm_freeze\n"); 1923 #endif 1924 if (iucv_pm_state != IUCV_PM_FREEZING) { 1925 for_each_cpu(cpu, &iucv_irq_cpumask) 1926 smp_call_function_single(cpu, iucv_block_cpu_almost, 1927 NULL, 1); 1928 cancel_work_sync(&iucv_work); 1929 list_for_each_entry_safe(p, n, &iucv_work_queue, list) { 1930 list_del_init(&p->list); 1931 iucv_sever_pathid(p->data.ippathid, 1932 iucv_error_no_listener); 1933 kfree(p); 1934 } 1935 } 1936 iucv_pm_state = IUCV_PM_FREEZING; 1937 if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) 1938 rc = dev->driver->pm->freeze(dev); 1939 if (iucv_path_table_empty()) 1940 iucv_disable(); 1941 return rc; 1942 } 1943 1944 /** 1945 * iucv_pm_thaw() - Thaw PM callback 1946 * @dev: iucv-based device 1947 * 1948 * make iucv ready for use again: allocate path table, declare interrupt buffers 1949 * and enable iucv interrupts 1950 * invoke callback function of the iucv-based driver 1951 */ 1952 static int iucv_pm_thaw(struct device *dev) 1953 { 1954 int rc = 0; 1955 1956 #ifdef CONFIG_PM_DEBUG 1957 printk(KERN_WARNING "iucv_pm_thaw\n"); 1958 #endif 1959 iucv_pm_state = IUCV_PM_THAWING; 1960 if (!iucv_path_table) { 1961 rc = iucv_enable(); 1962 if (rc) 1963 goto out; 1964 } 1965 if (cpumask_empty(&iucv_irq_cpumask)) { 1966 if (iucv_nonsmp_handler) 1967 /* enable interrupts on one cpu */ 1968 iucv_allow_cpu(NULL); 1969 else 1970 /* enable interrupts on all cpus */ 1971 iucv_setmask_mp(); 1972 } 1973 if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) 1974 rc = dev->driver->pm->thaw(dev); 1975 out: 1976 return rc; 1977 } 1978 1979 /** 1980 * iucv_pm_restore() - Restore PM callback 1981 * @dev: iucv-based device 1982 * 1983 * make iucv ready for use again: allocate path table, declare interrupt buffers 1984 * and enable iucv interrupts 1985 * invoke callback function of the iucv-based driver 1986 */ 1987 static int iucv_pm_restore(struct device *dev) 1988 { 1989 int rc = 0; 1990 1991 #ifdef CONFIG_PM_DEBUG 1992 printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); 1993 #endif 1994 if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) 1995 pr_warn("Suspending Linux did not completely close all IUCV connections\n"); 1996 iucv_pm_state = IUCV_PM_RESTORING; 1997 if (cpumask_empty(&iucv_irq_cpumask)) { 1998 rc = iucv_query_maxconn(); 1999 rc = iucv_enable(); 2000 if (rc) 2001 goto out; 2002 } 2003 if (dev->driver && dev->driver->pm && dev->driver->pm->restore) 2004 rc = dev->driver->pm->restore(dev); 2005 out: 2006 return rc; 2007 } 2008 2009 struct iucv_interface iucv_if = { 2010 .message_receive = iucv_message_receive, 2011 .__message_receive = __iucv_message_receive, 2012 .message_reply = iucv_message_reply, 2013 .message_reject = iucv_message_reject, 2014 .message_send = iucv_message_send, 2015 .__message_send = __iucv_message_send, 2016 .message_send2way = iucv_message_send2way, 2017 .message_purge = iucv_message_purge, 2018 .path_accept = iucv_path_accept, 2019 .path_connect = iucv_path_connect, 2020 .path_quiesce = iucv_path_quiesce, 2021 .path_resume = iucv_path_resume, 2022 .path_sever = iucv_path_sever, 2023 .iucv_register = iucv_register, 2024 .iucv_unregister = iucv_unregister, 2025 .bus = NULL, 2026 .root = NULL, 2027 }; 2028 EXPORT_SYMBOL(iucv_if); 2029 2030 /** 2031 * iucv_init 2032 * 2033 * Allocates and initializes various data structures. 2034 */ 2035 static int __init iucv_init(void) 2036 { 2037 int rc; 2038 int cpu; 2039 2040 if (!MACHINE_IS_VM) { 2041 rc = -EPROTONOSUPPORT; 2042 goto out; 2043 } 2044 ctl_set_bit(0, 1); 2045 rc = iucv_query_maxconn(); 2046 if (rc) 2047 goto out_ctl; 2048 rc = register_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); 2049 if (rc) 2050 goto out_ctl; 2051 iucv_root = root_device_register("iucv"); 2052 if (IS_ERR(iucv_root)) { 2053 rc = PTR_ERR(iucv_root); 2054 goto out_int; 2055 } 2056 2057 cpu_notifier_register_begin(); 2058 2059 for_each_online_cpu(cpu) { 2060 if (alloc_iucv_data(cpu)) { 2061 rc = -ENOMEM; 2062 goto out_free; 2063 } 2064 } 2065 rc = __register_hotcpu_notifier(&iucv_cpu_notifier); 2066 if (rc) 2067 goto out_free; 2068 2069 cpu_notifier_register_done(); 2070 2071 rc = register_reboot_notifier(&iucv_reboot_notifier); 2072 if (rc) 2073 goto out_cpu; 2074 ASCEBC(iucv_error_no_listener, 16); 2075 ASCEBC(iucv_error_no_memory, 16); 2076 ASCEBC(iucv_error_pathid, 16); 2077 iucv_available = 1; 2078 rc = bus_register(&iucv_bus); 2079 if (rc) 2080 goto out_reboot; 2081 iucv_if.root = iucv_root; 2082 iucv_if.bus = &iucv_bus; 2083 return 0; 2084 2085 out_reboot: 2086 unregister_reboot_notifier(&iucv_reboot_notifier); 2087 out_cpu: 2088 cpu_notifier_register_begin(); 2089 __unregister_hotcpu_notifier(&iucv_cpu_notifier); 2090 out_free: 2091 for_each_possible_cpu(cpu) 2092 free_iucv_data(cpu); 2093 2094 cpu_notifier_register_done(); 2095 2096 root_device_unregister(iucv_root); 2097 out_int: 2098 unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); 2099 out_ctl: 2100 ctl_clear_bit(0, 1); 2101 out: 2102 return rc; 2103 } 2104 2105 /** 2106 * iucv_exit 2107 * 2108 * Frees everything allocated from iucv_init. 2109 */ 2110 static void __exit iucv_exit(void) 2111 { 2112 struct iucv_irq_list *p, *n; 2113 int cpu; 2114 2115 spin_lock_irq(&iucv_queue_lock); 2116 list_for_each_entry_safe(p, n, &iucv_task_queue, list) 2117 kfree(p); 2118 list_for_each_entry_safe(p, n, &iucv_work_queue, list) 2119 kfree(p); 2120 spin_unlock_irq(&iucv_queue_lock); 2121 unregister_reboot_notifier(&iucv_reboot_notifier); 2122 cpu_notifier_register_begin(); 2123 __unregister_hotcpu_notifier(&iucv_cpu_notifier); 2124 for_each_possible_cpu(cpu) 2125 free_iucv_data(cpu); 2126 cpu_notifier_register_done(); 2127 root_device_unregister(iucv_root); 2128 bus_unregister(&iucv_bus); 2129 unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); 2130 } 2131 2132 subsys_initcall(iucv_init); 2133 module_exit(iucv_exit); 2134 2135 MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); 2136 MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); 2137 MODULE_LICENSE("GPL"); 2138