1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 /* 10 * Cross Partition Communication (XPC) uv-based functions. 11 * 12 * Architecture specific implementation of common functions. 13 * 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/mm.h> 18 #include <linux/interrupt.h> 19 #include <linux/delay.h> 20 #include <linux/device.h> 21 #include <linux/cpu.h> 22 #include <linux/module.h> 23 #include <linux/err.h> 24 #include <linux/slab.h> 25 #include <linux/numa.h> 26 #include <asm/uv/uv_hub.h> 27 #if defined CONFIG_X86_64 28 #include <asm/uv/bios.h> 29 #include <asm/uv/uv_irq.h> 30 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 31 #include <asm/sn/intr.h> 32 #include <asm/sn/sn_sal.h> 33 #endif 34 #include "../sgi-gru/gru.h" 35 #include "../sgi-gru/grukservices.h" 36 #include "xpc.h" 37 38 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 39 struct uv_IO_APIC_route_entry { 40 __u64 vector : 8, 41 delivery_mode : 3, 42 dest_mode : 1, 43 delivery_status : 1, 44 polarity : 1, 45 __reserved_1 : 1, 46 trigger : 1, 47 mask : 1, 48 __reserved_2 : 15, 49 dest : 32; 50 }; 51 #endif 52 53 static struct xpc_heartbeat_uv *xpc_heartbeat_uv; 54 55 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) 56 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 57 XPC_ACTIVATE_MSG_SIZE_UV) 58 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate" 59 60 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) 61 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 62 XPC_NOTIFY_MSG_SIZE_UV) 63 #define XPC_NOTIFY_IRQ_NAME "xpc_notify" 64 65 static int xpc_mq_node = NUMA_NO_NODE; 66 67 static struct xpc_gru_mq_uv *xpc_activate_mq_uv; 68 static struct xpc_gru_mq_uv *xpc_notify_mq_uv; 69 70 static int 71 xpc_setup_partitions_uv(void) 72 { 73 short partid; 74 struct xpc_partition_uv *part_uv; 75 76 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 77 part_uv = &xpc_partitions[partid].sn.uv; 78 79 mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex); 80 spin_lock_init(&part_uv->flags_lock); 81 part_uv->remote_act_state = XPC_P_AS_INACTIVE; 82 } 83 return 0; 84 } 85 86 static void 87 xpc_teardown_partitions_uv(void) 88 { 89 short partid; 90 struct xpc_partition_uv *part_uv; 91 unsigned long irq_flags; 92 93 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 94 part_uv = &xpc_partitions[partid].sn.uv; 95 96 if (part_uv->cached_activate_gru_mq_desc != NULL) { 97 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); 98 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 99 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; 100 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 101 kfree(part_uv->cached_activate_gru_mq_desc); 102 part_uv->cached_activate_gru_mq_desc = NULL; 103 mutex_unlock(&part_uv-> 104 cached_activate_gru_mq_desc_mutex); 105 } 106 } 107 } 108 109 static int 110 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) 111 { 112 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 113 114 #if defined CONFIG_X86_64 115 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, 116 UV_AFFINITY_CPU); 117 if (mq->irq < 0) 118 return mq->irq; 119 120 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); 121 122 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 123 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) 124 mq->irq = SGI_XPC_ACTIVATE; 125 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) 126 mq->irq = SGI_XPC_NOTIFY; 127 else 128 return -EINVAL; 129 130 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; 131 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value); 132 #else 133 #error not a supported configuration 134 #endif 135 136 return 0; 137 } 138 139 static void 140 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) 141 { 142 #if defined CONFIG_X86_64 143 uv_teardown_irq(mq->irq); 144 145 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 146 int mmr_pnode; 147 unsigned long mmr_value; 148 149 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 150 mmr_value = 1UL << 16; 151 152 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); 153 #else 154 #error not a supported configuration 155 #endif 156 } 157 158 static int 159 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) 160 { 161 int ret; 162 163 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 164 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 165 166 ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address), 167 mq->order, &mq->mmr_offset); 168 if (ret < 0) { 169 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", 170 ret); 171 return -EBUSY; 172 } 173 #elif defined CONFIG_X86_64 174 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address), 175 mq->order, &mq->mmr_offset); 176 if (ret < 0) { 177 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " 178 "ret=%d\n", ret); 179 return ret; 180 } 181 #else 182 #error not a supported configuration 183 #endif 184 185 mq->watchlist_num = ret; 186 return 0; 187 } 188 189 static void 190 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) 191 { 192 int ret; 193 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 194 195 #if defined CONFIG_X86_64 196 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num); 197 BUG_ON(ret != BIOS_STATUS_SUCCESS); 198 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 199 ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num); 200 BUG_ON(ret != SALRET_OK); 201 #else 202 #error not a supported configuration 203 #endif 204 } 205 206 static struct xpc_gru_mq_uv * 207 xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, 208 irq_handler_t irq_handler) 209 { 210 enum xp_retval xp_ret; 211 int ret; 212 int nid; 213 int nasid; 214 int pg_order; 215 struct page *page; 216 struct xpc_gru_mq_uv *mq; 217 struct uv_IO_APIC_route_entry *mmr_value; 218 219 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); 220 if (mq == NULL) { 221 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " 222 "a xpc_gru_mq_uv structure\n"); 223 ret = -ENOMEM; 224 goto out_0; 225 } 226 227 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc), 228 GFP_KERNEL); 229 if (mq->gru_mq_desc == NULL) { 230 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " 231 "a gru_message_queue_desc structure\n"); 232 ret = -ENOMEM; 233 goto out_1; 234 } 235 236 pg_order = get_order(mq_size); 237 mq->order = pg_order + PAGE_SHIFT; 238 mq_size = 1UL << mq->order; 239 240 mq->mmr_blade = uv_cpu_to_blade_id(cpu); 241 242 nid = cpu_to_node(cpu); 243 page = __alloc_pages_node(nid, 244 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 245 pg_order); 246 if (page == NULL) { 247 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 248 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); 249 ret = -ENOMEM; 250 goto out_2; 251 } 252 mq->address = page_address(page); 253 254 /* enable generation of irq when GRU mq operation occurs to this mq */ 255 ret = xpc_gru_mq_watchlist_alloc_uv(mq); 256 if (ret != 0) 257 goto out_3; 258 259 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); 260 if (ret != 0) 261 goto out_4; 262 263 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); 264 if (ret != 0) { 265 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", 266 mq->irq, -ret); 267 goto out_5; 268 } 269 270 nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu)); 271 272 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; 273 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, 274 nasid, mmr_value->vector, mmr_value->dest); 275 if (ret != 0) { 276 dev_err(xpc_part, "gru_create_message_queue() returned " 277 "error=%d\n", ret); 278 ret = -EINVAL; 279 goto out_6; 280 } 281 282 /* allow other partitions to access this GRU mq */ 283 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); 284 if (xp_ret != xpSuccess) { 285 ret = -EACCES; 286 goto out_6; 287 } 288 289 return mq; 290 291 /* something went wrong */ 292 out_6: 293 free_irq(mq->irq, NULL); 294 out_5: 295 xpc_release_gru_mq_irq_uv(mq); 296 out_4: 297 xpc_gru_mq_watchlist_free_uv(mq); 298 out_3: 299 free_pages((unsigned long)mq->address, pg_order); 300 out_2: 301 kfree(mq->gru_mq_desc); 302 out_1: 303 kfree(mq); 304 out_0: 305 return ERR_PTR(ret); 306 } 307 308 static void 309 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) 310 { 311 unsigned int mq_size; 312 int pg_order; 313 int ret; 314 315 /* disallow other partitions to access GRU mq */ 316 mq_size = 1UL << mq->order; 317 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size); 318 BUG_ON(ret != xpSuccess); 319 320 /* unregister irq handler and release mq irq/vector mapping */ 321 free_irq(mq->irq, NULL); 322 xpc_release_gru_mq_irq_uv(mq); 323 324 /* disable generation of irq when GRU mq op occurs to this mq */ 325 xpc_gru_mq_watchlist_free_uv(mq); 326 327 pg_order = mq->order - PAGE_SHIFT; 328 free_pages((unsigned long)mq->address, pg_order); 329 330 kfree(mq); 331 } 332 333 static enum xp_retval 334 xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg, 335 size_t msg_size) 336 { 337 enum xp_retval xp_ret; 338 int ret; 339 340 while (1) { 341 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size); 342 if (ret == MQE_OK) { 343 xp_ret = xpSuccess; 344 break; 345 } 346 347 if (ret == MQE_QUEUE_FULL) { 348 dev_dbg(xpc_chan, "gru_send_message_gpa() returned " 349 "error=MQE_QUEUE_FULL\n"); 350 /* !!! handle QLimit reached; delay & try again */ 351 /* ??? Do we add a limit to the number of retries? */ 352 (void)msleep_interruptible(10); 353 } else if (ret == MQE_CONGESTION) { 354 dev_dbg(xpc_chan, "gru_send_message_gpa() returned " 355 "error=MQE_CONGESTION\n"); 356 /* !!! handle LB Overflow; simply try again */ 357 /* ??? Do we add a limit to the number of retries? */ 358 } else { 359 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */ 360 dev_err(xpc_chan, "gru_send_message_gpa() returned " 361 "error=%d\n", ret); 362 xp_ret = xpGruSendMqError; 363 break; 364 } 365 } 366 return xp_ret; 367 } 368 369 static void 370 xpc_process_activate_IRQ_rcvd_uv(void) 371 { 372 unsigned long irq_flags; 373 short partid; 374 struct xpc_partition *part; 375 u8 act_state_req; 376 377 DBUG_ON(xpc_activate_IRQ_rcvd == 0); 378 379 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 380 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 381 part = &xpc_partitions[partid]; 382 383 if (part->sn.uv.act_state_req == 0) 384 continue; 385 386 xpc_activate_IRQ_rcvd--; 387 BUG_ON(xpc_activate_IRQ_rcvd < 0); 388 389 act_state_req = part->sn.uv.act_state_req; 390 part->sn.uv.act_state_req = 0; 391 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 392 393 if (act_state_req == XPC_P_ASR_ACTIVATE_UV) { 394 if (part->act_state == XPC_P_AS_INACTIVE) 395 xpc_activate_partition(part); 396 else if (part->act_state == XPC_P_AS_DEACTIVATING) 397 XPC_DEACTIVATE_PARTITION(part, xpReactivating); 398 399 } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) { 400 if (part->act_state == XPC_P_AS_INACTIVE) 401 xpc_activate_partition(part); 402 else 403 XPC_DEACTIVATE_PARTITION(part, xpReactivating); 404 405 } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) { 406 XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason); 407 408 } else { 409 BUG(); 410 } 411 412 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 413 if (xpc_activate_IRQ_rcvd == 0) 414 break; 415 } 416 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 417 418 } 419 420 static void 421 xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, 422 struct xpc_activate_mq_msghdr_uv *msg_hdr, 423 int part_setup, 424 int *wakeup_hb_checker) 425 { 426 unsigned long irq_flags; 427 struct xpc_partition_uv *part_uv = &part->sn.uv; 428 struct xpc_openclose_args *args; 429 430 part_uv->remote_act_state = msg_hdr->act_state; 431 432 switch (msg_hdr->type) { 433 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: 434 /* syncing of remote_act_state was just done above */ 435 break; 436 437 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { 438 struct xpc_activate_mq_msg_activate_req_uv *msg; 439 440 /* 441 * ??? Do we deal here with ts_jiffies being different 442 * ??? if act_state != XPC_P_AS_INACTIVE instead of 443 * ??? below? 444 */ 445 msg = container_of(msg_hdr, struct 446 xpc_activate_mq_msg_activate_req_uv, hdr); 447 448 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 449 if (part_uv->act_state_req == 0) 450 xpc_activate_IRQ_rcvd++; 451 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; 452 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ 453 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; 454 part_uv->heartbeat_gpa = msg->heartbeat_gpa; 455 456 if (msg->activate_gru_mq_desc_gpa != 457 part_uv->activate_gru_mq_desc_gpa) { 458 spin_lock(&part_uv->flags_lock); 459 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; 460 spin_unlock(&part_uv->flags_lock); 461 part_uv->activate_gru_mq_desc_gpa = 462 msg->activate_gru_mq_desc_gpa; 463 } 464 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 465 466 (*wakeup_hb_checker)++; 467 break; 468 } 469 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { 470 struct xpc_activate_mq_msg_deactivate_req_uv *msg; 471 472 msg = container_of(msg_hdr, struct 473 xpc_activate_mq_msg_deactivate_req_uv, hdr); 474 475 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 476 if (part_uv->act_state_req == 0) 477 xpc_activate_IRQ_rcvd++; 478 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; 479 part_uv->reason = msg->reason; 480 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 481 482 (*wakeup_hb_checker)++; 483 return; 484 } 485 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { 486 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; 487 488 if (!part_setup) 489 break; 490 491 msg = container_of(msg_hdr, struct 492 xpc_activate_mq_msg_chctl_closerequest_uv, 493 hdr); 494 args = &part->remote_openclose_args[msg->ch_number]; 495 args->reason = msg->reason; 496 497 spin_lock_irqsave(&part->chctl_lock, irq_flags); 498 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; 499 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 500 501 xpc_wakeup_channel_mgr(part); 502 break; 503 } 504 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { 505 struct xpc_activate_mq_msg_chctl_closereply_uv *msg; 506 507 if (!part_setup) 508 break; 509 510 msg = container_of(msg_hdr, struct 511 xpc_activate_mq_msg_chctl_closereply_uv, 512 hdr); 513 514 spin_lock_irqsave(&part->chctl_lock, irq_flags); 515 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; 516 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 517 518 xpc_wakeup_channel_mgr(part); 519 break; 520 } 521 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { 522 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; 523 524 if (!part_setup) 525 break; 526 527 msg = container_of(msg_hdr, struct 528 xpc_activate_mq_msg_chctl_openrequest_uv, 529 hdr); 530 args = &part->remote_openclose_args[msg->ch_number]; 531 args->entry_size = msg->entry_size; 532 args->local_nentries = msg->local_nentries; 533 534 spin_lock_irqsave(&part->chctl_lock, irq_flags); 535 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; 536 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 537 538 xpc_wakeup_channel_mgr(part); 539 break; 540 } 541 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { 542 struct xpc_activate_mq_msg_chctl_openreply_uv *msg; 543 544 if (!part_setup) 545 break; 546 547 msg = container_of(msg_hdr, struct 548 xpc_activate_mq_msg_chctl_openreply_uv, hdr); 549 args = &part->remote_openclose_args[msg->ch_number]; 550 args->remote_nentries = msg->remote_nentries; 551 args->local_nentries = msg->local_nentries; 552 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa; 553 554 spin_lock_irqsave(&part->chctl_lock, irq_flags); 555 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; 556 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 557 558 xpc_wakeup_channel_mgr(part); 559 break; 560 } 561 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { 562 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; 563 564 if (!part_setup) 565 break; 566 567 msg = container_of(msg_hdr, struct 568 xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); 569 spin_lock_irqsave(&part->chctl_lock, irq_flags); 570 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE; 571 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 572 573 xpc_wakeup_channel_mgr(part); 574 } 575 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: 576 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 577 part_uv->flags |= XPC_P_ENGAGED_UV; 578 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 579 break; 580 581 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: 582 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 583 part_uv->flags &= ~XPC_P_ENGAGED_UV; 584 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 585 break; 586 587 default: 588 dev_err(xpc_part, "received unknown activate_mq msg type=%d " 589 "from partition=%d\n", msg_hdr->type, XPC_PARTID(part)); 590 591 /* get hb checker to deactivate from the remote partition */ 592 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 593 if (part_uv->act_state_req == 0) 594 xpc_activate_IRQ_rcvd++; 595 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; 596 part_uv->reason = xpBadMsgType; 597 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 598 599 (*wakeup_hb_checker)++; 600 return; 601 } 602 603 if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && 604 part->remote_rp_ts_jiffies != 0) { 605 /* 606 * ??? Does what we do here need to be sensitive to 607 * ??? act_state or remote_act_state? 608 */ 609 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 610 if (part_uv->act_state_req == 0) 611 xpc_activate_IRQ_rcvd++; 612 part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; 613 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 614 615 (*wakeup_hb_checker)++; 616 } 617 } 618 619 static irqreturn_t 620 xpc_handle_activate_IRQ_uv(int irq, void *dev_id) 621 { 622 struct xpc_activate_mq_msghdr_uv *msg_hdr; 623 short partid; 624 struct xpc_partition *part; 625 int wakeup_hb_checker = 0; 626 int part_referenced; 627 628 while (1) { 629 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc); 630 if (msg_hdr == NULL) 631 break; 632 633 partid = msg_hdr->partid; 634 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { 635 dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() " 636 "received invalid partid=0x%x in message\n", 637 partid); 638 } else { 639 part = &xpc_partitions[partid]; 640 641 part_referenced = xpc_part_ref(part); 642 xpc_handle_activate_mq_msg_uv(part, msg_hdr, 643 part_referenced, 644 &wakeup_hb_checker); 645 if (part_referenced) 646 xpc_part_deref(part); 647 } 648 649 gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr); 650 } 651 652 if (wakeup_hb_checker) 653 wake_up_interruptible(&xpc_activate_IRQ_wq); 654 655 return IRQ_HANDLED; 656 } 657 658 static enum xp_retval 659 xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc, 660 unsigned long gru_mq_desc_gpa) 661 { 662 enum xp_retval ret; 663 664 ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa, 665 sizeof(struct gru_message_queue_desc)); 666 if (ret == xpSuccess) 667 gru_mq_desc->mq = NULL; 668 669 return ret; 670 } 671 672 static enum xp_retval 673 xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, 674 int msg_type) 675 { 676 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; 677 struct xpc_partition_uv *part_uv = &part->sn.uv; 678 struct gru_message_queue_desc *gru_mq_desc; 679 unsigned long irq_flags; 680 enum xp_retval ret; 681 682 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); 683 684 msg_hdr->type = msg_type; 685 msg_hdr->partid = xp_partition_id; 686 msg_hdr->act_state = part->act_state; 687 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; 688 689 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); 690 again: 691 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) { 692 gru_mq_desc = part_uv->cached_activate_gru_mq_desc; 693 if (gru_mq_desc == NULL) { 694 gru_mq_desc = kmalloc(sizeof(struct 695 gru_message_queue_desc), 696 GFP_KERNEL); 697 if (gru_mq_desc == NULL) { 698 ret = xpNoMemory; 699 goto done; 700 } 701 part_uv->cached_activate_gru_mq_desc = gru_mq_desc; 702 } 703 704 ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc, 705 part_uv-> 706 activate_gru_mq_desc_gpa); 707 if (ret != xpSuccess) 708 goto done; 709 710 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 711 part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; 712 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 713 } 714 715 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ 716 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg, 717 msg_size); 718 if (ret != xpSuccess) { 719 smp_rmb(); /* ensure a fresh copy of part_uv->flags */ 720 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) 721 goto again; 722 } 723 done: 724 mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex); 725 return ret; 726 } 727 728 static void 729 xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg, 730 size_t msg_size, int msg_type) 731 { 732 enum xp_retval ret; 733 734 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); 735 if (unlikely(ret != xpSuccess)) 736 XPC_DEACTIVATE_PARTITION(part, ret); 737 } 738 739 static void 740 xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, 741 void *msg, size_t msg_size, int msg_type) 742 { 743 struct xpc_partition *part = &xpc_partitions[ch->partid]; 744 enum xp_retval ret; 745 746 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); 747 if (unlikely(ret != xpSuccess)) { 748 if (irq_flags != NULL) 749 spin_unlock_irqrestore(&ch->lock, *irq_flags); 750 751 XPC_DEACTIVATE_PARTITION(part, ret); 752 753 if (irq_flags != NULL) 754 spin_lock_irqsave(&ch->lock, *irq_flags); 755 } 756 } 757 758 static void 759 xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) 760 { 761 unsigned long irq_flags; 762 struct xpc_partition_uv *part_uv = &part->sn.uv; 763 764 /* 765 * !!! Make our side think that the remote partition sent an activate 766 * !!! mq message our way by doing what the activate IRQ handler would 767 * !!! do had one really been sent. 768 */ 769 770 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 771 if (part_uv->act_state_req == 0) 772 xpc_activate_IRQ_rcvd++; 773 part_uv->act_state_req = act_state_req; 774 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 775 776 wake_up_interruptible(&xpc_activate_IRQ_wq); 777 } 778 779 static enum xp_retval 780 xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, 781 size_t *len) 782 { 783 s64 status; 784 enum xp_retval ret; 785 786 #if defined CONFIG_X86_64 787 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa, 788 (u64 *)len); 789 if (status == BIOS_STATUS_SUCCESS) 790 ret = xpSuccess; 791 else if (status == BIOS_STATUS_MORE_PASSES) 792 ret = xpNeedMoreInfo; 793 else 794 ret = xpBiosError; 795 796 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 797 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); 798 if (status == SALRET_OK) 799 ret = xpSuccess; 800 else if (status == SALRET_MORE_PASSES) 801 ret = xpNeedMoreInfo; 802 else 803 ret = xpSalError; 804 805 #else 806 #error not a supported configuration 807 #endif 808 809 return ret; 810 } 811 812 static int 813 xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp) 814 { 815 xpc_heartbeat_uv = 816 &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat; 817 rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv); 818 rp->sn.uv.activate_gru_mq_desc_gpa = 819 uv_gpa(xpc_activate_mq_uv->gru_mq_desc); 820 return 0; 821 } 822 823 static void 824 xpc_allow_hb_uv(short partid) 825 { 826 } 827 828 static void 829 xpc_disallow_hb_uv(short partid) 830 { 831 } 832 833 static void 834 xpc_disallow_all_hbs_uv(void) 835 { 836 } 837 838 static void 839 xpc_increment_heartbeat_uv(void) 840 { 841 xpc_heartbeat_uv->value++; 842 } 843 844 static void 845 xpc_offline_heartbeat_uv(void) 846 { 847 xpc_increment_heartbeat_uv(); 848 xpc_heartbeat_uv->offline = 1; 849 } 850 851 static void 852 xpc_online_heartbeat_uv(void) 853 { 854 xpc_increment_heartbeat_uv(); 855 xpc_heartbeat_uv->offline = 0; 856 } 857 858 static void 859 xpc_heartbeat_init_uv(void) 860 { 861 xpc_heartbeat_uv->value = 1; 862 xpc_heartbeat_uv->offline = 0; 863 } 864 865 static void 866 xpc_heartbeat_exit_uv(void) 867 { 868 xpc_offline_heartbeat_uv(); 869 } 870 871 static enum xp_retval 872 xpc_get_remote_heartbeat_uv(struct xpc_partition *part) 873 { 874 struct xpc_partition_uv *part_uv = &part->sn.uv; 875 enum xp_retval ret; 876 877 ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat), 878 part_uv->heartbeat_gpa, 879 sizeof(struct xpc_heartbeat_uv)); 880 if (ret != xpSuccess) 881 return ret; 882 883 if (part_uv->cached_heartbeat.value == part->last_heartbeat && 884 !part_uv->cached_heartbeat.offline) { 885 886 ret = xpNoHeartbeat; 887 } else { 888 part->last_heartbeat = part_uv->cached_heartbeat.value; 889 } 890 return ret; 891 } 892 893 static void 894 xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, 895 unsigned long remote_rp_gpa, int nasid) 896 { 897 short partid = remote_rp->SAL_partid; 898 struct xpc_partition *part = &xpc_partitions[partid]; 899 struct xpc_activate_mq_msg_activate_req_uv msg; 900 901 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ 902 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; 903 part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa; 904 part->sn.uv.activate_gru_mq_desc_gpa = 905 remote_rp->sn.uv.activate_gru_mq_desc_gpa; 906 907 /* 908 * ??? Is it a good idea to make this conditional on what is 909 * ??? potentially stale state information? 910 */ 911 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { 912 msg.rp_gpa = uv_gpa(xpc_rsvd_page); 913 msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa; 914 msg.activate_gru_mq_desc_gpa = 915 xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa; 916 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 917 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); 918 } 919 920 if (part->act_state == XPC_P_AS_INACTIVE) 921 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); 922 } 923 924 static void 925 xpc_request_partition_reactivation_uv(struct xpc_partition *part) 926 { 927 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); 928 } 929 930 static void 931 xpc_request_partition_deactivation_uv(struct xpc_partition *part) 932 { 933 struct xpc_activate_mq_msg_deactivate_req_uv msg; 934 935 /* 936 * ??? Is it a good idea to make this conditional on what is 937 * ??? potentially stale state information? 938 */ 939 if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING && 940 part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) { 941 942 msg.reason = part->reason; 943 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 944 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV); 945 } 946 } 947 948 static void 949 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part) 950 { 951 /* nothing needs to be done */ 952 return; 953 } 954 955 static void 956 xpc_init_fifo_uv(struct xpc_fifo_head_uv *head) 957 { 958 head->first = NULL; 959 head->last = NULL; 960 spin_lock_init(&head->lock); 961 head->n_entries = 0; 962 } 963 964 static void * 965 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) 966 { 967 unsigned long irq_flags; 968 struct xpc_fifo_entry_uv *first; 969 970 spin_lock_irqsave(&head->lock, irq_flags); 971 first = head->first; 972 if (head->first != NULL) { 973 head->first = first->next; 974 if (head->first == NULL) 975 head->last = NULL; 976 977 head->n_entries--; 978 BUG_ON(head->n_entries < 0); 979 980 first->next = NULL; 981 } 982 spin_unlock_irqrestore(&head->lock, irq_flags); 983 return first; 984 } 985 986 static void 987 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head, 988 struct xpc_fifo_entry_uv *last) 989 { 990 unsigned long irq_flags; 991 992 last->next = NULL; 993 spin_lock_irqsave(&head->lock, irq_flags); 994 if (head->last != NULL) 995 head->last->next = last; 996 else 997 head->first = last; 998 head->last = last; 999 head->n_entries++; 1000 spin_unlock_irqrestore(&head->lock, irq_flags); 1001 } 1002 1003 static int 1004 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head) 1005 { 1006 return head->n_entries; 1007 } 1008 1009 /* 1010 * Setup the channel structures that are uv specific. 1011 */ 1012 static enum xp_retval 1013 xpc_setup_ch_structures_uv(struct xpc_partition *part) 1014 { 1015 struct xpc_channel_uv *ch_uv; 1016 int ch_number; 1017 1018 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 1019 ch_uv = &part->channels[ch_number].sn.uv; 1020 1021 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 1022 xpc_init_fifo_uv(&ch_uv->recv_msg_list); 1023 } 1024 1025 return xpSuccess; 1026 } 1027 1028 /* 1029 * Teardown the channel structures that are uv specific. 1030 */ 1031 static void 1032 xpc_teardown_ch_structures_uv(struct xpc_partition *part) 1033 { 1034 /* nothing needs to be done */ 1035 return; 1036 } 1037 1038 static enum xp_retval 1039 xpc_make_first_contact_uv(struct xpc_partition *part) 1040 { 1041 struct xpc_activate_mq_msg_uv msg; 1042 1043 /* 1044 * We send a sync msg to get the remote partition's remote_act_state 1045 * updated to our current act_state which at this point should 1046 * be XPC_P_AS_ACTIVATING. 1047 */ 1048 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1049 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); 1050 1051 while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) || 1052 (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) { 1053 1054 dev_dbg(xpc_part, "waiting to make first contact with " 1055 "partition %d\n", XPC_PARTID(part)); 1056 1057 /* wait a 1/4 of a second or so */ 1058 (void)msleep_interruptible(250); 1059 1060 if (part->act_state == XPC_P_AS_DEACTIVATING) 1061 return part->reason; 1062 } 1063 1064 return xpSuccess; 1065 } 1066 1067 static u64 1068 xpc_get_chctl_all_flags_uv(struct xpc_partition *part) 1069 { 1070 unsigned long irq_flags; 1071 union xpc_channel_ctl_flags chctl; 1072 1073 spin_lock_irqsave(&part->chctl_lock, irq_flags); 1074 chctl = part->chctl; 1075 if (chctl.all_flags != 0) 1076 part->chctl.all_flags = 0; 1077 1078 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 1079 return chctl.all_flags; 1080 } 1081 1082 static enum xp_retval 1083 xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch) 1084 { 1085 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1086 struct xpc_send_msg_slot_uv *msg_slot; 1087 unsigned long irq_flags; 1088 int nentries; 1089 int entry; 1090 size_t nbytes; 1091 1092 for (nentries = ch->local_nentries; nentries > 0; nentries--) { 1093 nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv); 1094 ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL); 1095 if (ch_uv->send_msg_slots == NULL) 1096 continue; 1097 1098 for (entry = 0; entry < nentries; entry++) { 1099 msg_slot = &ch_uv->send_msg_slots[entry]; 1100 1101 msg_slot->msg_slot_number = entry; 1102 xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list, 1103 &msg_slot->next); 1104 } 1105 1106 spin_lock_irqsave(&ch->lock, irq_flags); 1107 if (nentries < ch->local_nentries) 1108 ch->local_nentries = nentries; 1109 spin_unlock_irqrestore(&ch->lock, irq_flags); 1110 return xpSuccess; 1111 } 1112 1113 return xpNoMemory; 1114 } 1115 1116 static enum xp_retval 1117 xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch) 1118 { 1119 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1120 struct xpc_notify_mq_msg_uv *msg_slot; 1121 unsigned long irq_flags; 1122 int nentries; 1123 int entry; 1124 size_t nbytes; 1125 1126 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { 1127 nbytes = nentries * ch->entry_size; 1128 ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL); 1129 if (ch_uv->recv_msg_slots == NULL) 1130 continue; 1131 1132 for (entry = 0; entry < nentries; entry++) { 1133 msg_slot = ch_uv->recv_msg_slots + 1134 entry * ch->entry_size; 1135 1136 msg_slot->hdr.msg_slot_number = entry; 1137 } 1138 1139 spin_lock_irqsave(&ch->lock, irq_flags); 1140 if (nentries < ch->remote_nentries) 1141 ch->remote_nentries = nentries; 1142 spin_unlock_irqrestore(&ch->lock, irq_flags); 1143 return xpSuccess; 1144 } 1145 1146 return xpNoMemory; 1147 } 1148 1149 /* 1150 * Allocate msg_slots associated with the channel. 1151 */ 1152 static enum xp_retval 1153 xpc_setup_msg_structures_uv(struct xpc_channel *ch) 1154 { 1155 static enum xp_retval ret; 1156 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1157 1158 DBUG_ON(ch->flags & XPC_C_SETUP); 1159 1160 ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct 1161 gru_message_queue_desc), 1162 GFP_KERNEL); 1163 if (ch_uv->cached_notify_gru_mq_desc == NULL) 1164 return xpNoMemory; 1165 1166 ret = xpc_allocate_send_msg_slot_uv(ch); 1167 if (ret == xpSuccess) { 1168 1169 ret = xpc_allocate_recv_msg_slot_uv(ch); 1170 if (ret != xpSuccess) { 1171 kfree(ch_uv->send_msg_slots); 1172 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 1173 } 1174 } 1175 return ret; 1176 } 1177 1178 /* 1179 * Free up msg_slots and clear other stuff that were setup for the specified 1180 * channel. 1181 */ 1182 static void 1183 xpc_teardown_msg_structures_uv(struct xpc_channel *ch) 1184 { 1185 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1186 1187 lockdep_assert_held(&ch->lock); 1188 1189 kfree(ch_uv->cached_notify_gru_mq_desc); 1190 ch_uv->cached_notify_gru_mq_desc = NULL; 1191 1192 if (ch->flags & XPC_C_SETUP) { 1193 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 1194 kfree(ch_uv->send_msg_slots); 1195 xpc_init_fifo_uv(&ch_uv->recv_msg_list); 1196 kfree(ch_uv->recv_msg_slots); 1197 } 1198 } 1199 1200 static void 1201 xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1202 { 1203 struct xpc_activate_mq_msg_chctl_closerequest_uv msg; 1204 1205 msg.ch_number = ch->number; 1206 msg.reason = ch->reason; 1207 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1208 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV); 1209 } 1210 1211 static void 1212 xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1213 { 1214 struct xpc_activate_mq_msg_chctl_closereply_uv msg; 1215 1216 msg.ch_number = ch->number; 1217 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1218 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV); 1219 } 1220 1221 static void 1222 xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1223 { 1224 struct xpc_activate_mq_msg_chctl_openrequest_uv msg; 1225 1226 msg.ch_number = ch->number; 1227 msg.entry_size = ch->entry_size; 1228 msg.local_nentries = ch->local_nentries; 1229 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1230 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); 1231 } 1232 1233 static void 1234 xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1235 { 1236 struct xpc_activate_mq_msg_chctl_openreply_uv msg; 1237 1238 msg.ch_number = ch->number; 1239 msg.local_nentries = ch->local_nentries; 1240 msg.remote_nentries = ch->remote_nentries; 1241 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc); 1242 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1243 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); 1244 } 1245 1246 static void 1247 xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1248 { 1249 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg; 1250 1251 msg.ch_number = ch->number; 1252 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1253 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV); 1254 } 1255 1256 static void 1257 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) 1258 { 1259 unsigned long irq_flags; 1260 1261 spin_lock_irqsave(&part->chctl_lock, irq_flags); 1262 part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST; 1263 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 1264 1265 xpc_wakeup_channel_mgr(part); 1266 } 1267 1268 static enum xp_retval 1269 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, 1270 unsigned long gru_mq_desc_gpa) 1271 { 1272 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1273 1274 DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL); 1275 return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc, 1276 gru_mq_desc_gpa); 1277 } 1278 1279 static void 1280 xpc_indicate_partition_engaged_uv(struct xpc_partition *part) 1281 { 1282 struct xpc_activate_mq_msg_uv msg; 1283 1284 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1285 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV); 1286 } 1287 1288 static void 1289 xpc_indicate_partition_disengaged_uv(struct xpc_partition *part) 1290 { 1291 struct xpc_activate_mq_msg_uv msg; 1292 1293 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1294 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV); 1295 } 1296 1297 static void 1298 xpc_assume_partition_disengaged_uv(short partid) 1299 { 1300 struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv; 1301 unsigned long irq_flags; 1302 1303 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 1304 part_uv->flags &= ~XPC_P_ENGAGED_UV; 1305 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 1306 } 1307 1308 static int 1309 xpc_partition_engaged_uv(short partid) 1310 { 1311 return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0; 1312 } 1313 1314 static int 1315 xpc_any_partition_engaged_uv(void) 1316 { 1317 struct xpc_partition_uv *part_uv; 1318 short partid; 1319 1320 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 1321 part_uv = &xpc_partitions[partid].sn.uv; 1322 if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0) 1323 return 1; 1324 } 1325 return 0; 1326 } 1327 1328 static enum xp_retval 1329 xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags, 1330 struct xpc_send_msg_slot_uv **address_of_msg_slot) 1331 { 1332 enum xp_retval ret; 1333 struct xpc_send_msg_slot_uv *msg_slot; 1334 struct xpc_fifo_entry_uv *entry; 1335 1336 while (1) { 1337 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list); 1338 if (entry != NULL) 1339 break; 1340 1341 if (flags & XPC_NOWAIT) 1342 return xpNoWait; 1343 1344 ret = xpc_allocate_msg_wait(ch); 1345 if (ret != xpInterrupted && ret != xpTimeout) 1346 return ret; 1347 } 1348 1349 msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next); 1350 *address_of_msg_slot = msg_slot; 1351 return xpSuccess; 1352 } 1353 1354 static void 1355 xpc_free_msg_slot_uv(struct xpc_channel *ch, 1356 struct xpc_send_msg_slot_uv *msg_slot) 1357 { 1358 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next); 1359 1360 /* wakeup anyone waiting for a free msg slot */ 1361 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) 1362 wake_up(&ch->msg_allocate_wq); 1363 } 1364 1365 static void 1366 xpc_notify_sender_uv(struct xpc_channel *ch, 1367 struct xpc_send_msg_slot_uv *msg_slot, 1368 enum xp_retval reason) 1369 { 1370 xpc_notify_func func = msg_slot->func; 1371 1372 if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) { 1373 1374 atomic_dec(&ch->n_to_notify); 1375 1376 dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p " 1377 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, 1378 msg_slot->msg_slot_number, ch->partid, ch->number); 1379 1380 func(reason, ch->partid, ch->number, msg_slot->key); 1381 1382 dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p " 1383 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, 1384 msg_slot->msg_slot_number, ch->partid, ch->number); 1385 } 1386 } 1387 1388 static void 1389 xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch, 1390 struct xpc_notify_mq_msg_uv *msg) 1391 { 1392 struct xpc_send_msg_slot_uv *msg_slot; 1393 int entry = msg->hdr.msg_slot_number % ch->local_nentries; 1394 1395 msg_slot = &ch->sn.uv.send_msg_slots[entry]; 1396 1397 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number); 1398 msg_slot->msg_slot_number += ch->local_nentries; 1399 1400 if (msg_slot->func != NULL) 1401 xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered); 1402 1403 xpc_free_msg_slot_uv(ch, msg_slot); 1404 } 1405 1406 static void 1407 xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, 1408 struct xpc_notify_mq_msg_uv *msg) 1409 { 1410 struct xpc_partition_uv *part_uv = &part->sn.uv; 1411 struct xpc_channel *ch; 1412 struct xpc_channel_uv *ch_uv; 1413 struct xpc_notify_mq_msg_uv *msg_slot; 1414 unsigned long irq_flags; 1415 int ch_number = msg->hdr.ch_number; 1416 1417 if (unlikely(ch_number >= part->nchannels)) { 1418 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid " 1419 "channel number=0x%x in message from partid=%d\n", 1420 ch_number, XPC_PARTID(part)); 1421 1422 /* get hb checker to deactivate from the remote partition */ 1423 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 1424 if (part_uv->act_state_req == 0) 1425 xpc_activate_IRQ_rcvd++; 1426 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; 1427 part_uv->reason = xpBadChannelNumber; 1428 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 1429 1430 wake_up_interruptible(&xpc_activate_IRQ_wq); 1431 return; 1432 } 1433 1434 ch = &part->channels[ch_number]; 1435 xpc_msgqueue_ref(ch); 1436 1437 if (!(ch->flags & XPC_C_CONNECTED)) { 1438 xpc_msgqueue_deref(ch); 1439 return; 1440 } 1441 1442 /* see if we're really dealing with an ACK for a previously sent msg */ 1443 if (msg->hdr.size == 0) { 1444 xpc_handle_notify_mq_ack_uv(ch, msg); 1445 xpc_msgqueue_deref(ch); 1446 return; 1447 } 1448 1449 /* we're dealing with a normal message sent via the notify_mq */ 1450 ch_uv = &ch->sn.uv; 1451 1452 msg_slot = ch_uv->recv_msg_slots + 1453 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; 1454 1455 BUG_ON(msg_slot->hdr.size != 0); 1456 1457 memcpy(msg_slot, msg, msg->hdr.size); 1458 1459 xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next); 1460 1461 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { 1462 /* 1463 * If there is an existing idle kthread get it to deliver 1464 * the payload, otherwise we'll have to get the channel mgr 1465 * for this partition to create a kthread to do the delivery. 1466 */ 1467 if (atomic_read(&ch->kthreads_idle) > 0) 1468 wake_up_nr(&ch->idle_wq, 1); 1469 else 1470 xpc_send_chctl_local_msgrequest_uv(part, ch->number); 1471 } 1472 xpc_msgqueue_deref(ch); 1473 } 1474 1475 static irqreturn_t 1476 xpc_handle_notify_IRQ_uv(int irq, void *dev_id) 1477 { 1478 struct xpc_notify_mq_msg_uv *msg; 1479 short partid; 1480 struct xpc_partition *part; 1481 1482 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) != 1483 NULL) { 1484 1485 partid = msg->hdr.partid; 1486 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { 1487 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received " 1488 "invalid partid=0x%x in message\n", partid); 1489 } else { 1490 part = &xpc_partitions[partid]; 1491 1492 if (xpc_part_ref(part)) { 1493 xpc_handle_notify_mq_msg_uv(part, msg); 1494 xpc_part_deref(part); 1495 } 1496 } 1497 1498 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg); 1499 } 1500 1501 return IRQ_HANDLED; 1502 } 1503 1504 static int 1505 xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch) 1506 { 1507 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list); 1508 } 1509 1510 static void 1511 xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number) 1512 { 1513 struct xpc_channel *ch = &part->channels[ch_number]; 1514 int ndeliverable_payloads; 1515 1516 xpc_msgqueue_ref(ch); 1517 1518 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch); 1519 1520 if (ndeliverable_payloads > 0 && 1521 (ch->flags & XPC_C_CONNECTED) && 1522 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) { 1523 1524 xpc_activate_kthreads(ch, ndeliverable_payloads); 1525 } 1526 1527 xpc_msgqueue_deref(ch); 1528 } 1529 1530 static enum xp_retval 1531 xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, 1532 u16 payload_size, u8 notify_type, xpc_notify_func func, 1533 void *key) 1534 { 1535 enum xp_retval ret = xpSuccess; 1536 struct xpc_send_msg_slot_uv *msg_slot = NULL; 1537 struct xpc_notify_mq_msg_uv *msg; 1538 u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV]; 1539 size_t msg_size; 1540 1541 DBUG_ON(notify_type != XPC_N_CALL); 1542 1543 msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size; 1544 if (msg_size > ch->entry_size) 1545 return xpPayloadTooBig; 1546 1547 xpc_msgqueue_ref(ch); 1548 1549 if (ch->flags & XPC_C_DISCONNECTING) { 1550 ret = ch->reason; 1551 goto out_1; 1552 } 1553 if (!(ch->flags & XPC_C_CONNECTED)) { 1554 ret = xpNotConnected; 1555 goto out_1; 1556 } 1557 1558 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot); 1559 if (ret != xpSuccess) 1560 goto out_1; 1561 1562 if (func != NULL) { 1563 atomic_inc(&ch->n_to_notify); 1564 1565 msg_slot->key = key; 1566 smp_wmb(); /* a non-NULL func must hit memory after the key */ 1567 msg_slot->func = func; 1568 1569 if (ch->flags & XPC_C_DISCONNECTING) { 1570 ret = ch->reason; 1571 goto out_2; 1572 } 1573 } 1574 1575 msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer; 1576 msg->hdr.partid = xp_partition_id; 1577 msg->hdr.ch_number = ch->number; 1578 msg->hdr.size = msg_size; 1579 msg->hdr.msg_slot_number = msg_slot->msg_slot_number; 1580 memcpy(&msg->payload, payload, payload_size); 1581 1582 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, 1583 msg_size); 1584 if (ret == xpSuccess) 1585 goto out_1; 1586 1587 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); 1588 out_2: 1589 if (func != NULL) { 1590 /* 1591 * Try to NULL the msg_slot's func field. If we fail, then 1592 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which 1593 * case we need to pretend we succeeded to send the message 1594 * since the user will get a callout for the disconnect error 1595 * by xpc_notify_senders_of_disconnect_uv(), and to also get an 1596 * error returned here will confuse them. Additionally, since 1597 * in this case the channel is being disconnected we don't need 1598 * to put the the msg_slot back on the free list. 1599 */ 1600 if (cmpxchg(&msg_slot->func, func, NULL) != func) { 1601 ret = xpSuccess; 1602 goto out_1; 1603 } 1604 1605 msg_slot->key = NULL; 1606 atomic_dec(&ch->n_to_notify); 1607 } 1608 xpc_free_msg_slot_uv(ch, msg_slot); 1609 out_1: 1610 xpc_msgqueue_deref(ch); 1611 return ret; 1612 } 1613 1614 /* 1615 * Tell the callers of xpc_send_notify() that the status of their payloads 1616 * is unknown because the channel is now disconnecting. 1617 * 1618 * We don't worry about putting these msg_slots on the free list since the 1619 * msg_slots themselves are about to be kfree'd. 1620 */ 1621 static void 1622 xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch) 1623 { 1624 struct xpc_send_msg_slot_uv *msg_slot; 1625 int entry; 1626 1627 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); 1628 1629 for (entry = 0; entry < ch->local_nentries; entry++) { 1630 1631 if (atomic_read(&ch->n_to_notify) == 0) 1632 break; 1633 1634 msg_slot = &ch->sn.uv.send_msg_slots[entry]; 1635 if (msg_slot->func != NULL) 1636 xpc_notify_sender_uv(ch, msg_slot, ch->reason); 1637 } 1638 } 1639 1640 /* 1641 * Get the next deliverable message's payload. 1642 */ 1643 static void * 1644 xpc_get_deliverable_payload_uv(struct xpc_channel *ch) 1645 { 1646 struct xpc_fifo_entry_uv *entry; 1647 struct xpc_notify_mq_msg_uv *msg; 1648 void *payload = NULL; 1649 1650 if (!(ch->flags & XPC_C_DISCONNECTING)) { 1651 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list); 1652 if (entry != NULL) { 1653 msg = container_of(entry, struct xpc_notify_mq_msg_uv, 1654 hdr.u.next); 1655 payload = &msg->payload; 1656 } 1657 } 1658 return payload; 1659 } 1660 1661 static void 1662 xpc_received_payload_uv(struct xpc_channel *ch, void *payload) 1663 { 1664 struct xpc_notify_mq_msg_uv *msg; 1665 enum xp_retval ret; 1666 1667 msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload); 1668 1669 /* return an ACK to the sender of this message */ 1670 1671 msg->hdr.partid = xp_partition_id; 1672 msg->hdr.size = 0; /* size of zero indicates this is an ACK */ 1673 1674 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, 1675 sizeof(struct xpc_notify_mq_msghdr_uv)); 1676 if (ret != xpSuccess) 1677 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); 1678 } 1679 1680 static struct xpc_arch_operations xpc_arch_ops_uv = { 1681 .setup_partitions = xpc_setup_partitions_uv, 1682 .teardown_partitions = xpc_teardown_partitions_uv, 1683 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv, 1684 .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv, 1685 .setup_rsvd_page = xpc_setup_rsvd_page_uv, 1686 1687 .allow_hb = xpc_allow_hb_uv, 1688 .disallow_hb = xpc_disallow_hb_uv, 1689 .disallow_all_hbs = xpc_disallow_all_hbs_uv, 1690 .increment_heartbeat = xpc_increment_heartbeat_uv, 1691 .offline_heartbeat = xpc_offline_heartbeat_uv, 1692 .online_heartbeat = xpc_online_heartbeat_uv, 1693 .heartbeat_init = xpc_heartbeat_init_uv, 1694 .heartbeat_exit = xpc_heartbeat_exit_uv, 1695 .get_remote_heartbeat = xpc_get_remote_heartbeat_uv, 1696 1697 .request_partition_activation = 1698 xpc_request_partition_activation_uv, 1699 .request_partition_reactivation = 1700 xpc_request_partition_reactivation_uv, 1701 .request_partition_deactivation = 1702 xpc_request_partition_deactivation_uv, 1703 .cancel_partition_deactivation_request = 1704 xpc_cancel_partition_deactivation_request_uv, 1705 1706 .setup_ch_structures = xpc_setup_ch_structures_uv, 1707 .teardown_ch_structures = xpc_teardown_ch_structures_uv, 1708 1709 .make_first_contact = xpc_make_first_contact_uv, 1710 1711 .get_chctl_all_flags = xpc_get_chctl_all_flags_uv, 1712 .send_chctl_closerequest = xpc_send_chctl_closerequest_uv, 1713 .send_chctl_closereply = xpc_send_chctl_closereply_uv, 1714 .send_chctl_openrequest = xpc_send_chctl_openrequest_uv, 1715 .send_chctl_openreply = xpc_send_chctl_openreply_uv, 1716 .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv, 1717 .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv, 1718 1719 .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv, 1720 1721 .setup_msg_structures = xpc_setup_msg_structures_uv, 1722 .teardown_msg_structures = xpc_teardown_msg_structures_uv, 1723 1724 .indicate_partition_engaged = xpc_indicate_partition_engaged_uv, 1725 .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv, 1726 .assume_partition_disengaged = xpc_assume_partition_disengaged_uv, 1727 .partition_engaged = xpc_partition_engaged_uv, 1728 .any_partition_engaged = xpc_any_partition_engaged_uv, 1729 1730 .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv, 1731 .send_payload = xpc_send_payload_uv, 1732 .get_deliverable_payload = xpc_get_deliverable_payload_uv, 1733 .received_payload = xpc_received_payload_uv, 1734 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, 1735 }; 1736 1737 static int 1738 xpc_init_mq_node(int nid) 1739 { 1740 int cpu; 1741 1742 get_online_cpus(); 1743 1744 for_each_cpu(cpu, cpumask_of_node(nid)) { 1745 xpc_activate_mq_uv = 1746 xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid, 1747 XPC_ACTIVATE_IRQ_NAME, 1748 xpc_handle_activate_IRQ_uv); 1749 if (!IS_ERR(xpc_activate_mq_uv)) 1750 break; 1751 } 1752 if (IS_ERR(xpc_activate_mq_uv)) { 1753 put_online_cpus(); 1754 return PTR_ERR(xpc_activate_mq_uv); 1755 } 1756 1757 for_each_cpu(cpu, cpumask_of_node(nid)) { 1758 xpc_notify_mq_uv = 1759 xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid, 1760 XPC_NOTIFY_IRQ_NAME, 1761 xpc_handle_notify_IRQ_uv); 1762 if (!IS_ERR(xpc_notify_mq_uv)) 1763 break; 1764 } 1765 if (IS_ERR(xpc_notify_mq_uv)) { 1766 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1767 put_online_cpus(); 1768 return PTR_ERR(xpc_notify_mq_uv); 1769 } 1770 1771 put_online_cpus(); 1772 return 0; 1773 } 1774 1775 int 1776 xpc_init_uv(void) 1777 { 1778 int nid; 1779 int ret = 0; 1780 1781 xpc_arch_ops = xpc_arch_ops_uv; 1782 1783 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { 1784 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", 1785 XPC_MSG_HDR_MAX_SIZE); 1786 return -E2BIG; 1787 } 1788 1789 if (xpc_mq_node < 0) 1790 for_each_online_node(nid) { 1791 ret = xpc_init_mq_node(nid); 1792 1793 if (!ret) 1794 break; 1795 } 1796 else 1797 ret = xpc_init_mq_node(xpc_mq_node); 1798 1799 if (ret < 0) 1800 dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n", 1801 -ret); 1802 1803 return ret; 1804 } 1805 1806 void 1807 xpc_exit_uv(void) 1808 { 1809 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); 1810 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1811 } 1812 1813 module_param(xpc_mq_node, int, 0); 1814 MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues."); 1815