1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 /* 10 * Cross Partition Communication (XPC) uv-based functions. 11 * 12 * Architecture specific implementation of common functions. 13 * 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/mm.h> 18 #include <linux/interrupt.h> 19 #include <linux/delay.h> 20 #include <linux/device.h> 21 #include <linux/err.h> 22 #include <linux/slab.h> 23 #include <asm/uv/uv_hub.h> 24 #if defined CONFIG_X86_64 25 #include <asm/uv/bios.h> 26 #include <asm/uv/uv_irq.h> 27 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 28 #include <asm/sn/intr.h> 29 #include <asm/sn/sn_sal.h> 30 #endif 31 #include "../sgi-gru/gru.h" 32 #include "../sgi-gru/grukservices.h" 33 #include "xpc.h" 34 35 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 36 struct uv_IO_APIC_route_entry { 37 __u64 vector : 8, 38 delivery_mode : 3, 39 dest_mode : 1, 40 delivery_status : 1, 41 polarity : 1, 42 __reserved_1 : 1, 43 trigger : 1, 44 mask : 1, 45 __reserved_2 : 15, 46 dest : 32; 47 }; 48 #endif 49 50 static struct xpc_heartbeat_uv *xpc_heartbeat_uv; 51 52 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) 53 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 54 XPC_ACTIVATE_MSG_SIZE_UV) 55 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate" 56 57 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) 58 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 59 XPC_NOTIFY_MSG_SIZE_UV) 60 #define XPC_NOTIFY_IRQ_NAME "xpc_notify" 61 62 static struct xpc_gru_mq_uv *xpc_activate_mq_uv; 63 static struct xpc_gru_mq_uv *xpc_notify_mq_uv; 64 65 static int 66 xpc_setup_partitions_uv(void) 67 { 68 short partid; 69 struct xpc_partition_uv *part_uv; 70 71 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 72 part_uv = &xpc_partitions[partid].sn.uv; 73 74 mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex); 75 spin_lock_init(&part_uv->flags_lock); 76 part_uv->remote_act_state = XPC_P_AS_INACTIVE; 77 } 78 return 0; 79 } 80 81 static void 82 xpc_teardown_partitions_uv(void) 83 { 84 short partid; 85 struct xpc_partition_uv *part_uv; 86 unsigned long irq_flags; 87 88 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 89 part_uv = &xpc_partitions[partid].sn.uv; 90 91 if (part_uv->cached_activate_gru_mq_desc != NULL) { 92 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); 93 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 94 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; 95 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 96 kfree(part_uv->cached_activate_gru_mq_desc); 97 part_uv->cached_activate_gru_mq_desc = NULL; 98 mutex_unlock(&part_uv-> 99 cached_activate_gru_mq_desc_mutex); 100 } 101 } 102 } 103 104 static int 105 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) 106 { 107 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 108 109 #if defined CONFIG_X86_64 110 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, 111 UV_AFFINITY_CPU); 112 if (mq->irq < 0) { 113 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", 114 -mq->irq); 115 return mq->irq; 116 } 117 118 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); 119 120 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 121 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) 122 mq->irq = SGI_XPC_ACTIVATE; 123 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) 124 mq->irq = SGI_XPC_NOTIFY; 125 else 126 return -EINVAL; 127 128 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; 129 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value); 130 #else 131 #error not a supported configuration 132 #endif 133 134 return 0; 135 } 136 137 static void 138 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) 139 { 140 #if defined CONFIG_X86_64 141 uv_teardown_irq(mq->irq); 142 143 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 144 int mmr_pnode; 145 unsigned long mmr_value; 146 147 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 148 mmr_value = 1UL << 16; 149 150 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); 151 #else 152 #error not a supported configuration 153 #endif 154 } 155 156 static int 157 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) 158 { 159 int ret; 160 161 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 162 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 163 164 ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address), 165 mq->order, &mq->mmr_offset); 166 if (ret < 0) { 167 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", 168 ret); 169 return -EBUSY; 170 } 171 #elif defined CONFIG_X86_64 172 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address), 173 mq->order, &mq->mmr_offset); 174 if (ret < 0) { 175 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " 176 "ret=%d\n", ret); 177 return ret; 178 } 179 #else 180 #error not a supported configuration 181 #endif 182 183 mq->watchlist_num = ret; 184 return 0; 185 } 186 187 static void 188 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) 189 { 190 int ret; 191 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 192 193 #if defined CONFIG_X86_64 194 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num); 195 BUG_ON(ret != BIOS_STATUS_SUCCESS); 196 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 197 ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num); 198 BUG_ON(ret != SALRET_OK); 199 #else 200 #error not a supported configuration 201 #endif 202 } 203 204 static struct xpc_gru_mq_uv * 205 xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, 206 irq_handler_t irq_handler) 207 { 208 enum xp_retval xp_ret; 209 int ret; 210 int nid; 211 int nasid; 212 int pg_order; 213 struct page *page; 214 struct xpc_gru_mq_uv *mq; 215 struct uv_IO_APIC_route_entry *mmr_value; 216 217 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); 218 if (mq == NULL) { 219 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " 220 "a xpc_gru_mq_uv structure\n"); 221 ret = -ENOMEM; 222 goto out_0; 223 } 224 225 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc), 226 GFP_KERNEL); 227 if (mq->gru_mq_desc == NULL) { 228 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " 229 "a gru_message_queue_desc structure\n"); 230 ret = -ENOMEM; 231 goto out_1; 232 } 233 234 pg_order = get_order(mq_size); 235 mq->order = pg_order + PAGE_SHIFT; 236 mq_size = 1UL << mq->order; 237 238 mq->mmr_blade = uv_cpu_to_blade_id(cpu); 239 240 nid = cpu_to_node(cpu); 241 page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 242 pg_order); 243 if (page == NULL) { 244 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 245 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); 246 ret = -ENOMEM; 247 goto out_2; 248 } 249 mq->address = page_address(page); 250 251 /* enable generation of irq when GRU mq operation occurs to this mq */ 252 ret = xpc_gru_mq_watchlist_alloc_uv(mq); 253 if (ret != 0) 254 goto out_3; 255 256 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); 257 if (ret != 0) 258 goto out_4; 259 260 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); 261 if (ret != 0) { 262 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", 263 mq->irq, -ret); 264 goto out_5; 265 } 266 267 nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu)); 268 269 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; 270 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, 271 nasid, mmr_value->vector, mmr_value->dest); 272 if (ret != 0) { 273 dev_err(xpc_part, "gru_create_message_queue() returned " 274 "error=%d\n", ret); 275 ret = -EINVAL; 276 goto out_6; 277 } 278 279 /* allow other partitions to access this GRU mq */ 280 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); 281 if (xp_ret != xpSuccess) { 282 ret = -EACCES; 283 goto out_6; 284 } 285 286 return mq; 287 288 /* something went wrong */ 289 out_6: 290 free_irq(mq->irq, NULL); 291 out_5: 292 xpc_release_gru_mq_irq_uv(mq); 293 out_4: 294 xpc_gru_mq_watchlist_free_uv(mq); 295 out_3: 296 free_pages((unsigned long)mq->address, pg_order); 297 out_2: 298 kfree(mq->gru_mq_desc); 299 out_1: 300 kfree(mq); 301 out_0: 302 return ERR_PTR(ret); 303 } 304 305 static void 306 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) 307 { 308 unsigned int mq_size; 309 int pg_order; 310 int ret; 311 312 /* disallow other partitions to access GRU mq */ 313 mq_size = 1UL << mq->order; 314 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size); 315 BUG_ON(ret != xpSuccess); 316 317 /* unregister irq handler and release mq irq/vector mapping */ 318 free_irq(mq->irq, NULL); 319 xpc_release_gru_mq_irq_uv(mq); 320 321 /* disable generation of irq when GRU mq op occurs to this mq */ 322 xpc_gru_mq_watchlist_free_uv(mq); 323 324 pg_order = mq->order - PAGE_SHIFT; 325 free_pages((unsigned long)mq->address, pg_order); 326 327 kfree(mq); 328 } 329 330 static enum xp_retval 331 xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg, 332 size_t msg_size) 333 { 334 enum xp_retval xp_ret; 335 int ret; 336 337 while (1) { 338 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size); 339 if (ret == MQE_OK) { 340 xp_ret = xpSuccess; 341 break; 342 } 343 344 if (ret == MQE_QUEUE_FULL) { 345 dev_dbg(xpc_chan, "gru_send_message_gpa() returned " 346 "error=MQE_QUEUE_FULL\n"); 347 /* !!! handle QLimit reached; delay & try again */ 348 /* ??? Do we add a limit to the number of retries? */ 349 (void)msleep_interruptible(10); 350 } else if (ret == MQE_CONGESTION) { 351 dev_dbg(xpc_chan, "gru_send_message_gpa() returned " 352 "error=MQE_CONGESTION\n"); 353 /* !!! handle LB Overflow; simply try again */ 354 /* ??? Do we add a limit to the number of retries? */ 355 } else { 356 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */ 357 dev_err(xpc_chan, "gru_send_message_gpa() returned " 358 "error=%d\n", ret); 359 xp_ret = xpGruSendMqError; 360 break; 361 } 362 } 363 return xp_ret; 364 } 365 366 static void 367 xpc_process_activate_IRQ_rcvd_uv(void) 368 { 369 unsigned long irq_flags; 370 short partid; 371 struct xpc_partition *part; 372 u8 act_state_req; 373 374 DBUG_ON(xpc_activate_IRQ_rcvd == 0); 375 376 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 377 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 378 part = &xpc_partitions[partid]; 379 380 if (part->sn.uv.act_state_req == 0) 381 continue; 382 383 xpc_activate_IRQ_rcvd--; 384 BUG_ON(xpc_activate_IRQ_rcvd < 0); 385 386 act_state_req = part->sn.uv.act_state_req; 387 part->sn.uv.act_state_req = 0; 388 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 389 390 if (act_state_req == XPC_P_ASR_ACTIVATE_UV) { 391 if (part->act_state == XPC_P_AS_INACTIVE) 392 xpc_activate_partition(part); 393 else if (part->act_state == XPC_P_AS_DEACTIVATING) 394 XPC_DEACTIVATE_PARTITION(part, xpReactivating); 395 396 } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) { 397 if (part->act_state == XPC_P_AS_INACTIVE) 398 xpc_activate_partition(part); 399 else 400 XPC_DEACTIVATE_PARTITION(part, xpReactivating); 401 402 } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) { 403 XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason); 404 405 } else { 406 BUG(); 407 } 408 409 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 410 if (xpc_activate_IRQ_rcvd == 0) 411 break; 412 } 413 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 414 415 } 416 417 static void 418 xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, 419 struct xpc_activate_mq_msghdr_uv *msg_hdr, 420 int part_setup, 421 int *wakeup_hb_checker) 422 { 423 unsigned long irq_flags; 424 struct xpc_partition_uv *part_uv = &part->sn.uv; 425 struct xpc_openclose_args *args; 426 427 part_uv->remote_act_state = msg_hdr->act_state; 428 429 switch (msg_hdr->type) { 430 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: 431 /* syncing of remote_act_state was just done above */ 432 break; 433 434 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { 435 struct xpc_activate_mq_msg_activate_req_uv *msg; 436 437 /* 438 * ??? Do we deal here with ts_jiffies being different 439 * ??? if act_state != XPC_P_AS_INACTIVE instead of 440 * ??? below? 441 */ 442 msg = container_of(msg_hdr, struct 443 xpc_activate_mq_msg_activate_req_uv, hdr); 444 445 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 446 if (part_uv->act_state_req == 0) 447 xpc_activate_IRQ_rcvd++; 448 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; 449 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ 450 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; 451 part_uv->heartbeat_gpa = msg->heartbeat_gpa; 452 453 if (msg->activate_gru_mq_desc_gpa != 454 part_uv->activate_gru_mq_desc_gpa) { 455 spin_lock(&part_uv->flags_lock); 456 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; 457 spin_unlock(&part_uv->flags_lock); 458 part_uv->activate_gru_mq_desc_gpa = 459 msg->activate_gru_mq_desc_gpa; 460 } 461 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 462 463 (*wakeup_hb_checker)++; 464 break; 465 } 466 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { 467 struct xpc_activate_mq_msg_deactivate_req_uv *msg; 468 469 msg = container_of(msg_hdr, struct 470 xpc_activate_mq_msg_deactivate_req_uv, hdr); 471 472 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 473 if (part_uv->act_state_req == 0) 474 xpc_activate_IRQ_rcvd++; 475 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; 476 part_uv->reason = msg->reason; 477 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 478 479 (*wakeup_hb_checker)++; 480 return; 481 } 482 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { 483 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; 484 485 if (!part_setup) 486 break; 487 488 msg = container_of(msg_hdr, struct 489 xpc_activate_mq_msg_chctl_closerequest_uv, 490 hdr); 491 args = &part->remote_openclose_args[msg->ch_number]; 492 args->reason = msg->reason; 493 494 spin_lock_irqsave(&part->chctl_lock, irq_flags); 495 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; 496 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 497 498 xpc_wakeup_channel_mgr(part); 499 break; 500 } 501 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { 502 struct xpc_activate_mq_msg_chctl_closereply_uv *msg; 503 504 if (!part_setup) 505 break; 506 507 msg = container_of(msg_hdr, struct 508 xpc_activate_mq_msg_chctl_closereply_uv, 509 hdr); 510 511 spin_lock_irqsave(&part->chctl_lock, irq_flags); 512 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; 513 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 514 515 xpc_wakeup_channel_mgr(part); 516 break; 517 } 518 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { 519 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; 520 521 if (!part_setup) 522 break; 523 524 msg = container_of(msg_hdr, struct 525 xpc_activate_mq_msg_chctl_openrequest_uv, 526 hdr); 527 args = &part->remote_openclose_args[msg->ch_number]; 528 args->entry_size = msg->entry_size; 529 args->local_nentries = msg->local_nentries; 530 531 spin_lock_irqsave(&part->chctl_lock, irq_flags); 532 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; 533 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 534 535 xpc_wakeup_channel_mgr(part); 536 break; 537 } 538 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { 539 struct xpc_activate_mq_msg_chctl_openreply_uv *msg; 540 541 if (!part_setup) 542 break; 543 544 msg = container_of(msg_hdr, struct 545 xpc_activate_mq_msg_chctl_openreply_uv, hdr); 546 args = &part->remote_openclose_args[msg->ch_number]; 547 args->remote_nentries = msg->remote_nentries; 548 args->local_nentries = msg->local_nentries; 549 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa; 550 551 spin_lock_irqsave(&part->chctl_lock, irq_flags); 552 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; 553 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 554 555 xpc_wakeup_channel_mgr(part); 556 break; 557 } 558 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { 559 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; 560 561 if (!part_setup) 562 break; 563 564 msg = container_of(msg_hdr, struct 565 xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); 566 spin_lock_irqsave(&part->chctl_lock, irq_flags); 567 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE; 568 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 569 570 xpc_wakeup_channel_mgr(part); 571 } 572 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: 573 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 574 part_uv->flags |= XPC_P_ENGAGED_UV; 575 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 576 break; 577 578 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: 579 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 580 part_uv->flags &= ~XPC_P_ENGAGED_UV; 581 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 582 break; 583 584 default: 585 dev_err(xpc_part, "received unknown activate_mq msg type=%d " 586 "from partition=%d\n", msg_hdr->type, XPC_PARTID(part)); 587 588 /* get hb checker to deactivate from the remote partition */ 589 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 590 if (part_uv->act_state_req == 0) 591 xpc_activate_IRQ_rcvd++; 592 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; 593 part_uv->reason = xpBadMsgType; 594 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 595 596 (*wakeup_hb_checker)++; 597 return; 598 } 599 600 if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && 601 part->remote_rp_ts_jiffies != 0) { 602 /* 603 * ??? Does what we do here need to be sensitive to 604 * ??? act_state or remote_act_state? 605 */ 606 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 607 if (part_uv->act_state_req == 0) 608 xpc_activate_IRQ_rcvd++; 609 part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; 610 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 611 612 (*wakeup_hb_checker)++; 613 } 614 } 615 616 static irqreturn_t 617 xpc_handle_activate_IRQ_uv(int irq, void *dev_id) 618 { 619 struct xpc_activate_mq_msghdr_uv *msg_hdr; 620 short partid; 621 struct xpc_partition *part; 622 int wakeup_hb_checker = 0; 623 int part_referenced; 624 625 while (1) { 626 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc); 627 if (msg_hdr == NULL) 628 break; 629 630 partid = msg_hdr->partid; 631 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { 632 dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() " 633 "received invalid partid=0x%x in message\n", 634 partid); 635 } else { 636 part = &xpc_partitions[partid]; 637 638 part_referenced = xpc_part_ref(part); 639 xpc_handle_activate_mq_msg_uv(part, msg_hdr, 640 part_referenced, 641 &wakeup_hb_checker); 642 if (part_referenced) 643 xpc_part_deref(part); 644 } 645 646 gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr); 647 } 648 649 if (wakeup_hb_checker) 650 wake_up_interruptible(&xpc_activate_IRQ_wq); 651 652 return IRQ_HANDLED; 653 } 654 655 static enum xp_retval 656 xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc, 657 unsigned long gru_mq_desc_gpa) 658 { 659 enum xp_retval ret; 660 661 ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa, 662 sizeof(struct gru_message_queue_desc)); 663 if (ret == xpSuccess) 664 gru_mq_desc->mq = NULL; 665 666 return ret; 667 } 668 669 static enum xp_retval 670 xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, 671 int msg_type) 672 { 673 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; 674 struct xpc_partition_uv *part_uv = &part->sn.uv; 675 struct gru_message_queue_desc *gru_mq_desc; 676 unsigned long irq_flags; 677 enum xp_retval ret; 678 679 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); 680 681 msg_hdr->type = msg_type; 682 msg_hdr->partid = xp_partition_id; 683 msg_hdr->act_state = part->act_state; 684 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; 685 686 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); 687 again: 688 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) { 689 gru_mq_desc = part_uv->cached_activate_gru_mq_desc; 690 if (gru_mq_desc == NULL) { 691 gru_mq_desc = kmalloc(sizeof(struct 692 gru_message_queue_desc), 693 GFP_KERNEL); 694 if (gru_mq_desc == NULL) { 695 ret = xpNoMemory; 696 goto done; 697 } 698 part_uv->cached_activate_gru_mq_desc = gru_mq_desc; 699 } 700 701 ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc, 702 part_uv-> 703 activate_gru_mq_desc_gpa); 704 if (ret != xpSuccess) 705 goto done; 706 707 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 708 part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; 709 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 710 } 711 712 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ 713 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg, 714 msg_size); 715 if (ret != xpSuccess) { 716 smp_rmb(); /* ensure a fresh copy of part_uv->flags */ 717 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) 718 goto again; 719 } 720 done: 721 mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex); 722 return ret; 723 } 724 725 static void 726 xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg, 727 size_t msg_size, int msg_type) 728 { 729 enum xp_retval ret; 730 731 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); 732 if (unlikely(ret != xpSuccess)) 733 XPC_DEACTIVATE_PARTITION(part, ret); 734 } 735 736 static void 737 xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, 738 void *msg, size_t msg_size, int msg_type) 739 { 740 struct xpc_partition *part = &xpc_partitions[ch->partid]; 741 enum xp_retval ret; 742 743 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); 744 if (unlikely(ret != xpSuccess)) { 745 if (irq_flags != NULL) 746 spin_unlock_irqrestore(&ch->lock, *irq_flags); 747 748 XPC_DEACTIVATE_PARTITION(part, ret); 749 750 if (irq_flags != NULL) 751 spin_lock_irqsave(&ch->lock, *irq_flags); 752 } 753 } 754 755 static void 756 xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) 757 { 758 unsigned long irq_flags; 759 struct xpc_partition_uv *part_uv = &part->sn.uv; 760 761 /* 762 * !!! Make our side think that the remote partition sent an activate 763 * !!! mq message our way by doing what the activate IRQ handler would 764 * !!! do had one really been sent. 765 */ 766 767 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 768 if (part_uv->act_state_req == 0) 769 xpc_activate_IRQ_rcvd++; 770 part_uv->act_state_req = act_state_req; 771 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 772 773 wake_up_interruptible(&xpc_activate_IRQ_wq); 774 } 775 776 static enum xp_retval 777 xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, 778 size_t *len) 779 { 780 s64 status; 781 enum xp_retval ret; 782 783 #if defined CONFIG_X86_64 784 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa, 785 (u64 *)len); 786 if (status == BIOS_STATUS_SUCCESS) 787 ret = xpSuccess; 788 else if (status == BIOS_STATUS_MORE_PASSES) 789 ret = xpNeedMoreInfo; 790 else 791 ret = xpBiosError; 792 793 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 794 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); 795 if (status == SALRET_OK) 796 ret = xpSuccess; 797 else if (status == SALRET_MORE_PASSES) 798 ret = xpNeedMoreInfo; 799 else 800 ret = xpSalError; 801 802 #else 803 #error not a supported configuration 804 #endif 805 806 return ret; 807 } 808 809 static int 810 xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp) 811 { 812 xpc_heartbeat_uv = 813 &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat; 814 rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv); 815 rp->sn.uv.activate_gru_mq_desc_gpa = 816 uv_gpa(xpc_activate_mq_uv->gru_mq_desc); 817 return 0; 818 } 819 820 static void 821 xpc_allow_hb_uv(short partid) 822 { 823 } 824 825 static void 826 xpc_disallow_hb_uv(short partid) 827 { 828 } 829 830 static void 831 xpc_disallow_all_hbs_uv(void) 832 { 833 } 834 835 static void 836 xpc_increment_heartbeat_uv(void) 837 { 838 xpc_heartbeat_uv->value++; 839 } 840 841 static void 842 xpc_offline_heartbeat_uv(void) 843 { 844 xpc_increment_heartbeat_uv(); 845 xpc_heartbeat_uv->offline = 1; 846 } 847 848 static void 849 xpc_online_heartbeat_uv(void) 850 { 851 xpc_increment_heartbeat_uv(); 852 xpc_heartbeat_uv->offline = 0; 853 } 854 855 static void 856 xpc_heartbeat_init_uv(void) 857 { 858 xpc_heartbeat_uv->value = 1; 859 xpc_heartbeat_uv->offline = 0; 860 } 861 862 static void 863 xpc_heartbeat_exit_uv(void) 864 { 865 xpc_offline_heartbeat_uv(); 866 } 867 868 static enum xp_retval 869 xpc_get_remote_heartbeat_uv(struct xpc_partition *part) 870 { 871 struct xpc_partition_uv *part_uv = &part->sn.uv; 872 enum xp_retval ret; 873 874 ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat), 875 part_uv->heartbeat_gpa, 876 sizeof(struct xpc_heartbeat_uv)); 877 if (ret != xpSuccess) 878 return ret; 879 880 if (part_uv->cached_heartbeat.value == part->last_heartbeat && 881 !part_uv->cached_heartbeat.offline) { 882 883 ret = xpNoHeartbeat; 884 } else { 885 part->last_heartbeat = part_uv->cached_heartbeat.value; 886 } 887 return ret; 888 } 889 890 static void 891 xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, 892 unsigned long remote_rp_gpa, int nasid) 893 { 894 short partid = remote_rp->SAL_partid; 895 struct xpc_partition *part = &xpc_partitions[partid]; 896 struct xpc_activate_mq_msg_activate_req_uv msg; 897 898 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ 899 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; 900 part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa; 901 part->sn.uv.activate_gru_mq_desc_gpa = 902 remote_rp->sn.uv.activate_gru_mq_desc_gpa; 903 904 /* 905 * ??? Is it a good idea to make this conditional on what is 906 * ??? potentially stale state information? 907 */ 908 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { 909 msg.rp_gpa = uv_gpa(xpc_rsvd_page); 910 msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa; 911 msg.activate_gru_mq_desc_gpa = 912 xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa; 913 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 914 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); 915 } 916 917 if (part->act_state == XPC_P_AS_INACTIVE) 918 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); 919 } 920 921 static void 922 xpc_request_partition_reactivation_uv(struct xpc_partition *part) 923 { 924 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); 925 } 926 927 static void 928 xpc_request_partition_deactivation_uv(struct xpc_partition *part) 929 { 930 struct xpc_activate_mq_msg_deactivate_req_uv msg; 931 932 /* 933 * ??? Is it a good idea to make this conditional on what is 934 * ??? potentially stale state information? 935 */ 936 if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING && 937 part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) { 938 939 msg.reason = part->reason; 940 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 941 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV); 942 } 943 } 944 945 static void 946 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part) 947 { 948 /* nothing needs to be done */ 949 return; 950 } 951 952 static void 953 xpc_init_fifo_uv(struct xpc_fifo_head_uv *head) 954 { 955 head->first = NULL; 956 head->last = NULL; 957 spin_lock_init(&head->lock); 958 head->n_entries = 0; 959 } 960 961 static void * 962 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) 963 { 964 unsigned long irq_flags; 965 struct xpc_fifo_entry_uv *first; 966 967 spin_lock_irqsave(&head->lock, irq_flags); 968 first = head->first; 969 if (head->first != NULL) { 970 head->first = first->next; 971 if (head->first == NULL) 972 head->last = NULL; 973 974 head->n_entries--; 975 BUG_ON(head->n_entries < 0); 976 977 first->next = NULL; 978 } 979 spin_unlock_irqrestore(&head->lock, irq_flags); 980 return first; 981 } 982 983 static void 984 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head, 985 struct xpc_fifo_entry_uv *last) 986 { 987 unsigned long irq_flags; 988 989 last->next = NULL; 990 spin_lock_irqsave(&head->lock, irq_flags); 991 if (head->last != NULL) 992 head->last->next = last; 993 else 994 head->first = last; 995 head->last = last; 996 head->n_entries++; 997 spin_unlock_irqrestore(&head->lock, irq_flags); 998 } 999 1000 static int 1001 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head) 1002 { 1003 return head->n_entries; 1004 } 1005 1006 /* 1007 * Setup the channel structures that are uv specific. 1008 */ 1009 static enum xp_retval 1010 xpc_setup_ch_structures_uv(struct xpc_partition *part) 1011 { 1012 struct xpc_channel_uv *ch_uv; 1013 int ch_number; 1014 1015 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 1016 ch_uv = &part->channels[ch_number].sn.uv; 1017 1018 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 1019 xpc_init_fifo_uv(&ch_uv->recv_msg_list); 1020 } 1021 1022 return xpSuccess; 1023 } 1024 1025 /* 1026 * Teardown the channel structures that are uv specific. 1027 */ 1028 static void 1029 xpc_teardown_ch_structures_uv(struct xpc_partition *part) 1030 { 1031 /* nothing needs to be done */ 1032 return; 1033 } 1034 1035 static enum xp_retval 1036 xpc_make_first_contact_uv(struct xpc_partition *part) 1037 { 1038 struct xpc_activate_mq_msg_uv msg; 1039 1040 /* 1041 * We send a sync msg to get the remote partition's remote_act_state 1042 * updated to our current act_state which at this point should 1043 * be XPC_P_AS_ACTIVATING. 1044 */ 1045 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1046 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); 1047 1048 while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) || 1049 (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) { 1050 1051 dev_dbg(xpc_part, "waiting to make first contact with " 1052 "partition %d\n", XPC_PARTID(part)); 1053 1054 /* wait a 1/4 of a second or so */ 1055 (void)msleep_interruptible(250); 1056 1057 if (part->act_state == XPC_P_AS_DEACTIVATING) 1058 return part->reason; 1059 } 1060 1061 return xpSuccess; 1062 } 1063 1064 static u64 1065 xpc_get_chctl_all_flags_uv(struct xpc_partition *part) 1066 { 1067 unsigned long irq_flags; 1068 union xpc_channel_ctl_flags chctl; 1069 1070 spin_lock_irqsave(&part->chctl_lock, irq_flags); 1071 chctl = part->chctl; 1072 if (chctl.all_flags != 0) 1073 part->chctl.all_flags = 0; 1074 1075 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 1076 return chctl.all_flags; 1077 } 1078 1079 static enum xp_retval 1080 xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch) 1081 { 1082 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1083 struct xpc_send_msg_slot_uv *msg_slot; 1084 unsigned long irq_flags; 1085 int nentries; 1086 int entry; 1087 size_t nbytes; 1088 1089 for (nentries = ch->local_nentries; nentries > 0; nentries--) { 1090 nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv); 1091 ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL); 1092 if (ch_uv->send_msg_slots == NULL) 1093 continue; 1094 1095 for (entry = 0; entry < nentries; entry++) { 1096 msg_slot = &ch_uv->send_msg_slots[entry]; 1097 1098 msg_slot->msg_slot_number = entry; 1099 xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list, 1100 &msg_slot->next); 1101 } 1102 1103 spin_lock_irqsave(&ch->lock, irq_flags); 1104 if (nentries < ch->local_nentries) 1105 ch->local_nentries = nentries; 1106 spin_unlock_irqrestore(&ch->lock, irq_flags); 1107 return xpSuccess; 1108 } 1109 1110 return xpNoMemory; 1111 } 1112 1113 static enum xp_retval 1114 xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch) 1115 { 1116 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1117 struct xpc_notify_mq_msg_uv *msg_slot; 1118 unsigned long irq_flags; 1119 int nentries; 1120 int entry; 1121 size_t nbytes; 1122 1123 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { 1124 nbytes = nentries * ch->entry_size; 1125 ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL); 1126 if (ch_uv->recv_msg_slots == NULL) 1127 continue; 1128 1129 for (entry = 0; entry < nentries; entry++) { 1130 msg_slot = ch_uv->recv_msg_slots + 1131 entry * ch->entry_size; 1132 1133 msg_slot->hdr.msg_slot_number = entry; 1134 } 1135 1136 spin_lock_irqsave(&ch->lock, irq_flags); 1137 if (nentries < ch->remote_nentries) 1138 ch->remote_nentries = nentries; 1139 spin_unlock_irqrestore(&ch->lock, irq_flags); 1140 return xpSuccess; 1141 } 1142 1143 return xpNoMemory; 1144 } 1145 1146 /* 1147 * Allocate msg_slots associated with the channel. 1148 */ 1149 static enum xp_retval 1150 xpc_setup_msg_structures_uv(struct xpc_channel *ch) 1151 { 1152 static enum xp_retval ret; 1153 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1154 1155 DBUG_ON(ch->flags & XPC_C_SETUP); 1156 1157 ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct 1158 gru_message_queue_desc), 1159 GFP_KERNEL); 1160 if (ch_uv->cached_notify_gru_mq_desc == NULL) 1161 return xpNoMemory; 1162 1163 ret = xpc_allocate_send_msg_slot_uv(ch); 1164 if (ret == xpSuccess) { 1165 1166 ret = xpc_allocate_recv_msg_slot_uv(ch); 1167 if (ret != xpSuccess) { 1168 kfree(ch_uv->send_msg_slots); 1169 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 1170 } 1171 } 1172 return ret; 1173 } 1174 1175 /* 1176 * Free up msg_slots and clear other stuff that were setup for the specified 1177 * channel. 1178 */ 1179 static void 1180 xpc_teardown_msg_structures_uv(struct xpc_channel *ch) 1181 { 1182 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1183 1184 DBUG_ON(!spin_is_locked(&ch->lock)); 1185 1186 kfree(ch_uv->cached_notify_gru_mq_desc); 1187 ch_uv->cached_notify_gru_mq_desc = NULL; 1188 1189 if (ch->flags & XPC_C_SETUP) { 1190 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 1191 kfree(ch_uv->send_msg_slots); 1192 xpc_init_fifo_uv(&ch_uv->recv_msg_list); 1193 kfree(ch_uv->recv_msg_slots); 1194 } 1195 } 1196 1197 static void 1198 xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1199 { 1200 struct xpc_activate_mq_msg_chctl_closerequest_uv msg; 1201 1202 msg.ch_number = ch->number; 1203 msg.reason = ch->reason; 1204 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1205 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV); 1206 } 1207 1208 static void 1209 xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1210 { 1211 struct xpc_activate_mq_msg_chctl_closereply_uv msg; 1212 1213 msg.ch_number = ch->number; 1214 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1215 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV); 1216 } 1217 1218 static void 1219 xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1220 { 1221 struct xpc_activate_mq_msg_chctl_openrequest_uv msg; 1222 1223 msg.ch_number = ch->number; 1224 msg.entry_size = ch->entry_size; 1225 msg.local_nentries = ch->local_nentries; 1226 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1227 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); 1228 } 1229 1230 static void 1231 xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1232 { 1233 struct xpc_activate_mq_msg_chctl_openreply_uv msg; 1234 1235 msg.ch_number = ch->number; 1236 msg.local_nentries = ch->local_nentries; 1237 msg.remote_nentries = ch->remote_nentries; 1238 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc); 1239 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1240 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); 1241 } 1242 1243 static void 1244 xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1245 { 1246 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg; 1247 1248 msg.ch_number = ch->number; 1249 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1250 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV); 1251 } 1252 1253 static void 1254 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) 1255 { 1256 unsigned long irq_flags; 1257 1258 spin_lock_irqsave(&part->chctl_lock, irq_flags); 1259 part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST; 1260 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 1261 1262 xpc_wakeup_channel_mgr(part); 1263 } 1264 1265 static enum xp_retval 1266 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, 1267 unsigned long gru_mq_desc_gpa) 1268 { 1269 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1270 1271 DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL); 1272 return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc, 1273 gru_mq_desc_gpa); 1274 } 1275 1276 static void 1277 xpc_indicate_partition_engaged_uv(struct xpc_partition *part) 1278 { 1279 struct xpc_activate_mq_msg_uv msg; 1280 1281 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1282 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV); 1283 } 1284 1285 static void 1286 xpc_indicate_partition_disengaged_uv(struct xpc_partition *part) 1287 { 1288 struct xpc_activate_mq_msg_uv msg; 1289 1290 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1291 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV); 1292 } 1293 1294 static void 1295 xpc_assume_partition_disengaged_uv(short partid) 1296 { 1297 struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv; 1298 unsigned long irq_flags; 1299 1300 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 1301 part_uv->flags &= ~XPC_P_ENGAGED_UV; 1302 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 1303 } 1304 1305 static int 1306 xpc_partition_engaged_uv(short partid) 1307 { 1308 return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0; 1309 } 1310 1311 static int 1312 xpc_any_partition_engaged_uv(void) 1313 { 1314 struct xpc_partition_uv *part_uv; 1315 short partid; 1316 1317 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 1318 part_uv = &xpc_partitions[partid].sn.uv; 1319 if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0) 1320 return 1; 1321 } 1322 return 0; 1323 } 1324 1325 static enum xp_retval 1326 xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags, 1327 struct xpc_send_msg_slot_uv **address_of_msg_slot) 1328 { 1329 enum xp_retval ret; 1330 struct xpc_send_msg_slot_uv *msg_slot; 1331 struct xpc_fifo_entry_uv *entry; 1332 1333 while (1) { 1334 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list); 1335 if (entry != NULL) 1336 break; 1337 1338 if (flags & XPC_NOWAIT) 1339 return xpNoWait; 1340 1341 ret = xpc_allocate_msg_wait(ch); 1342 if (ret != xpInterrupted && ret != xpTimeout) 1343 return ret; 1344 } 1345 1346 msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next); 1347 *address_of_msg_slot = msg_slot; 1348 return xpSuccess; 1349 } 1350 1351 static void 1352 xpc_free_msg_slot_uv(struct xpc_channel *ch, 1353 struct xpc_send_msg_slot_uv *msg_slot) 1354 { 1355 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next); 1356 1357 /* wakeup anyone waiting for a free msg slot */ 1358 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) 1359 wake_up(&ch->msg_allocate_wq); 1360 } 1361 1362 static void 1363 xpc_notify_sender_uv(struct xpc_channel *ch, 1364 struct xpc_send_msg_slot_uv *msg_slot, 1365 enum xp_retval reason) 1366 { 1367 xpc_notify_func func = msg_slot->func; 1368 1369 if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) { 1370 1371 atomic_dec(&ch->n_to_notify); 1372 1373 dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p " 1374 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, 1375 msg_slot->msg_slot_number, ch->partid, ch->number); 1376 1377 func(reason, ch->partid, ch->number, msg_slot->key); 1378 1379 dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p " 1380 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, 1381 msg_slot->msg_slot_number, ch->partid, ch->number); 1382 } 1383 } 1384 1385 static void 1386 xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch, 1387 struct xpc_notify_mq_msg_uv *msg) 1388 { 1389 struct xpc_send_msg_slot_uv *msg_slot; 1390 int entry = msg->hdr.msg_slot_number % ch->local_nentries; 1391 1392 msg_slot = &ch->sn.uv.send_msg_slots[entry]; 1393 1394 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number); 1395 msg_slot->msg_slot_number += ch->local_nentries; 1396 1397 if (msg_slot->func != NULL) 1398 xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered); 1399 1400 xpc_free_msg_slot_uv(ch, msg_slot); 1401 } 1402 1403 static void 1404 xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, 1405 struct xpc_notify_mq_msg_uv *msg) 1406 { 1407 struct xpc_partition_uv *part_uv = &part->sn.uv; 1408 struct xpc_channel *ch; 1409 struct xpc_channel_uv *ch_uv; 1410 struct xpc_notify_mq_msg_uv *msg_slot; 1411 unsigned long irq_flags; 1412 int ch_number = msg->hdr.ch_number; 1413 1414 if (unlikely(ch_number >= part->nchannels)) { 1415 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid " 1416 "channel number=0x%x in message from partid=%d\n", 1417 ch_number, XPC_PARTID(part)); 1418 1419 /* get hb checker to deactivate from the remote partition */ 1420 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 1421 if (part_uv->act_state_req == 0) 1422 xpc_activate_IRQ_rcvd++; 1423 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; 1424 part_uv->reason = xpBadChannelNumber; 1425 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 1426 1427 wake_up_interruptible(&xpc_activate_IRQ_wq); 1428 return; 1429 } 1430 1431 ch = &part->channels[ch_number]; 1432 xpc_msgqueue_ref(ch); 1433 1434 if (!(ch->flags & XPC_C_CONNECTED)) { 1435 xpc_msgqueue_deref(ch); 1436 return; 1437 } 1438 1439 /* see if we're really dealing with an ACK for a previously sent msg */ 1440 if (msg->hdr.size == 0) { 1441 xpc_handle_notify_mq_ack_uv(ch, msg); 1442 xpc_msgqueue_deref(ch); 1443 return; 1444 } 1445 1446 /* we're dealing with a normal message sent via the notify_mq */ 1447 ch_uv = &ch->sn.uv; 1448 1449 msg_slot = ch_uv->recv_msg_slots + 1450 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; 1451 1452 BUG_ON(msg_slot->hdr.size != 0); 1453 1454 memcpy(msg_slot, msg, msg->hdr.size); 1455 1456 xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next); 1457 1458 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { 1459 /* 1460 * If there is an existing idle kthread get it to deliver 1461 * the payload, otherwise we'll have to get the channel mgr 1462 * for this partition to create a kthread to do the delivery. 1463 */ 1464 if (atomic_read(&ch->kthreads_idle) > 0) 1465 wake_up_nr(&ch->idle_wq, 1); 1466 else 1467 xpc_send_chctl_local_msgrequest_uv(part, ch->number); 1468 } 1469 xpc_msgqueue_deref(ch); 1470 } 1471 1472 static irqreturn_t 1473 xpc_handle_notify_IRQ_uv(int irq, void *dev_id) 1474 { 1475 struct xpc_notify_mq_msg_uv *msg; 1476 short partid; 1477 struct xpc_partition *part; 1478 1479 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) != 1480 NULL) { 1481 1482 partid = msg->hdr.partid; 1483 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { 1484 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received " 1485 "invalid partid=0x%x in message\n", partid); 1486 } else { 1487 part = &xpc_partitions[partid]; 1488 1489 if (xpc_part_ref(part)) { 1490 xpc_handle_notify_mq_msg_uv(part, msg); 1491 xpc_part_deref(part); 1492 } 1493 } 1494 1495 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg); 1496 } 1497 1498 return IRQ_HANDLED; 1499 } 1500 1501 static int 1502 xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch) 1503 { 1504 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list); 1505 } 1506 1507 static void 1508 xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number) 1509 { 1510 struct xpc_channel *ch = &part->channels[ch_number]; 1511 int ndeliverable_payloads; 1512 1513 xpc_msgqueue_ref(ch); 1514 1515 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch); 1516 1517 if (ndeliverable_payloads > 0 && 1518 (ch->flags & XPC_C_CONNECTED) && 1519 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) { 1520 1521 xpc_activate_kthreads(ch, ndeliverable_payloads); 1522 } 1523 1524 xpc_msgqueue_deref(ch); 1525 } 1526 1527 static enum xp_retval 1528 xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, 1529 u16 payload_size, u8 notify_type, xpc_notify_func func, 1530 void *key) 1531 { 1532 enum xp_retval ret = xpSuccess; 1533 struct xpc_send_msg_slot_uv *msg_slot = NULL; 1534 struct xpc_notify_mq_msg_uv *msg; 1535 u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV]; 1536 size_t msg_size; 1537 1538 DBUG_ON(notify_type != XPC_N_CALL); 1539 1540 msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size; 1541 if (msg_size > ch->entry_size) 1542 return xpPayloadTooBig; 1543 1544 xpc_msgqueue_ref(ch); 1545 1546 if (ch->flags & XPC_C_DISCONNECTING) { 1547 ret = ch->reason; 1548 goto out_1; 1549 } 1550 if (!(ch->flags & XPC_C_CONNECTED)) { 1551 ret = xpNotConnected; 1552 goto out_1; 1553 } 1554 1555 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot); 1556 if (ret != xpSuccess) 1557 goto out_1; 1558 1559 if (func != NULL) { 1560 atomic_inc(&ch->n_to_notify); 1561 1562 msg_slot->key = key; 1563 smp_wmb(); /* a non-NULL func must hit memory after the key */ 1564 msg_slot->func = func; 1565 1566 if (ch->flags & XPC_C_DISCONNECTING) { 1567 ret = ch->reason; 1568 goto out_2; 1569 } 1570 } 1571 1572 msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer; 1573 msg->hdr.partid = xp_partition_id; 1574 msg->hdr.ch_number = ch->number; 1575 msg->hdr.size = msg_size; 1576 msg->hdr.msg_slot_number = msg_slot->msg_slot_number; 1577 memcpy(&msg->payload, payload, payload_size); 1578 1579 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, 1580 msg_size); 1581 if (ret == xpSuccess) 1582 goto out_1; 1583 1584 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); 1585 out_2: 1586 if (func != NULL) { 1587 /* 1588 * Try to NULL the msg_slot's func field. If we fail, then 1589 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which 1590 * case we need to pretend we succeeded to send the message 1591 * since the user will get a callout for the disconnect error 1592 * by xpc_notify_senders_of_disconnect_uv(), and to also get an 1593 * error returned here will confuse them. Additionally, since 1594 * in this case the channel is being disconnected we don't need 1595 * to put the the msg_slot back on the free list. 1596 */ 1597 if (cmpxchg(&msg_slot->func, func, NULL) != func) { 1598 ret = xpSuccess; 1599 goto out_1; 1600 } 1601 1602 msg_slot->key = NULL; 1603 atomic_dec(&ch->n_to_notify); 1604 } 1605 xpc_free_msg_slot_uv(ch, msg_slot); 1606 out_1: 1607 xpc_msgqueue_deref(ch); 1608 return ret; 1609 } 1610 1611 /* 1612 * Tell the callers of xpc_send_notify() that the status of their payloads 1613 * is unknown because the channel is now disconnecting. 1614 * 1615 * We don't worry about putting these msg_slots on the free list since the 1616 * msg_slots themselves are about to be kfree'd. 1617 */ 1618 static void 1619 xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch) 1620 { 1621 struct xpc_send_msg_slot_uv *msg_slot; 1622 int entry; 1623 1624 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); 1625 1626 for (entry = 0; entry < ch->local_nentries; entry++) { 1627 1628 if (atomic_read(&ch->n_to_notify) == 0) 1629 break; 1630 1631 msg_slot = &ch->sn.uv.send_msg_slots[entry]; 1632 if (msg_slot->func != NULL) 1633 xpc_notify_sender_uv(ch, msg_slot, ch->reason); 1634 } 1635 } 1636 1637 /* 1638 * Get the next deliverable message's payload. 1639 */ 1640 static void * 1641 xpc_get_deliverable_payload_uv(struct xpc_channel *ch) 1642 { 1643 struct xpc_fifo_entry_uv *entry; 1644 struct xpc_notify_mq_msg_uv *msg; 1645 void *payload = NULL; 1646 1647 if (!(ch->flags & XPC_C_DISCONNECTING)) { 1648 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list); 1649 if (entry != NULL) { 1650 msg = container_of(entry, struct xpc_notify_mq_msg_uv, 1651 hdr.u.next); 1652 payload = &msg->payload; 1653 } 1654 } 1655 return payload; 1656 } 1657 1658 static void 1659 xpc_received_payload_uv(struct xpc_channel *ch, void *payload) 1660 { 1661 struct xpc_notify_mq_msg_uv *msg; 1662 enum xp_retval ret; 1663 1664 msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload); 1665 1666 /* return an ACK to the sender of this message */ 1667 1668 msg->hdr.partid = xp_partition_id; 1669 msg->hdr.size = 0; /* size of zero indicates this is an ACK */ 1670 1671 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, 1672 sizeof(struct xpc_notify_mq_msghdr_uv)); 1673 if (ret != xpSuccess) 1674 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); 1675 } 1676 1677 static struct xpc_arch_operations xpc_arch_ops_uv = { 1678 .setup_partitions = xpc_setup_partitions_uv, 1679 .teardown_partitions = xpc_teardown_partitions_uv, 1680 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv, 1681 .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv, 1682 .setup_rsvd_page = xpc_setup_rsvd_page_uv, 1683 1684 .allow_hb = xpc_allow_hb_uv, 1685 .disallow_hb = xpc_disallow_hb_uv, 1686 .disallow_all_hbs = xpc_disallow_all_hbs_uv, 1687 .increment_heartbeat = xpc_increment_heartbeat_uv, 1688 .offline_heartbeat = xpc_offline_heartbeat_uv, 1689 .online_heartbeat = xpc_online_heartbeat_uv, 1690 .heartbeat_init = xpc_heartbeat_init_uv, 1691 .heartbeat_exit = xpc_heartbeat_exit_uv, 1692 .get_remote_heartbeat = xpc_get_remote_heartbeat_uv, 1693 1694 .request_partition_activation = 1695 xpc_request_partition_activation_uv, 1696 .request_partition_reactivation = 1697 xpc_request_partition_reactivation_uv, 1698 .request_partition_deactivation = 1699 xpc_request_partition_deactivation_uv, 1700 .cancel_partition_deactivation_request = 1701 xpc_cancel_partition_deactivation_request_uv, 1702 1703 .setup_ch_structures = xpc_setup_ch_structures_uv, 1704 .teardown_ch_structures = xpc_teardown_ch_structures_uv, 1705 1706 .make_first_contact = xpc_make_first_contact_uv, 1707 1708 .get_chctl_all_flags = xpc_get_chctl_all_flags_uv, 1709 .send_chctl_closerequest = xpc_send_chctl_closerequest_uv, 1710 .send_chctl_closereply = xpc_send_chctl_closereply_uv, 1711 .send_chctl_openrequest = xpc_send_chctl_openrequest_uv, 1712 .send_chctl_openreply = xpc_send_chctl_openreply_uv, 1713 .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv, 1714 .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv, 1715 1716 .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv, 1717 1718 .setup_msg_structures = xpc_setup_msg_structures_uv, 1719 .teardown_msg_structures = xpc_teardown_msg_structures_uv, 1720 1721 .indicate_partition_engaged = xpc_indicate_partition_engaged_uv, 1722 .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv, 1723 .assume_partition_disengaged = xpc_assume_partition_disengaged_uv, 1724 .partition_engaged = xpc_partition_engaged_uv, 1725 .any_partition_engaged = xpc_any_partition_engaged_uv, 1726 1727 .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv, 1728 .send_payload = xpc_send_payload_uv, 1729 .get_deliverable_payload = xpc_get_deliverable_payload_uv, 1730 .received_payload = xpc_received_payload_uv, 1731 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, 1732 }; 1733 1734 int 1735 xpc_init_uv(void) 1736 { 1737 xpc_arch_ops = xpc_arch_ops_uv; 1738 1739 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { 1740 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", 1741 XPC_MSG_HDR_MAX_SIZE); 1742 return -E2BIG; 1743 } 1744 1745 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 1746 XPC_ACTIVATE_IRQ_NAME, 1747 xpc_handle_activate_IRQ_uv); 1748 if (IS_ERR(xpc_activate_mq_uv)) 1749 return PTR_ERR(xpc_activate_mq_uv); 1750 1751 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 1752 XPC_NOTIFY_IRQ_NAME, 1753 xpc_handle_notify_IRQ_uv); 1754 if (IS_ERR(xpc_notify_mq_uv)) { 1755 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1756 return PTR_ERR(xpc_notify_mq_uv); 1757 } 1758 1759 return 0; 1760 } 1761 1762 void 1763 xpc_exit_uv(void) 1764 { 1765 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); 1766 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1767 } 1768