1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 /* 10 * Cross Partition Communication (XPC) uv-based functions. 11 * 12 * Architecture specific implementation of common functions. 13 * 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/mm.h> 18 #include <linux/interrupt.h> 19 #include <linux/delay.h> 20 #include <linux/device.h> 21 #include <linux/err.h> 22 #include <linux/slab.h> 23 #include <asm/uv/uv_hub.h> 24 #if defined CONFIG_X86_64 25 #include <asm/uv/bios.h> 26 #include <asm/uv/uv_irq.h> 27 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 28 #include <asm/sn/intr.h> 29 #include <asm/sn/sn_sal.h> 30 #endif 31 #include "../sgi-gru/gru.h" 32 #include "../sgi-gru/grukservices.h" 33 #include "xpc.h" 34 35 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 36 struct uv_IO_APIC_route_entry { 37 __u64 vector : 8, 38 delivery_mode : 3, 39 dest_mode : 1, 40 delivery_status : 1, 41 polarity : 1, 42 __reserved_1 : 1, 43 trigger : 1, 44 mask : 1, 45 __reserved_2 : 15, 46 dest : 32; 47 }; 48 #endif 49 50 static struct xpc_heartbeat_uv *xpc_heartbeat_uv; 51 52 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) 53 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 54 XPC_ACTIVATE_MSG_SIZE_UV) 55 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate" 56 57 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) 58 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 59 XPC_NOTIFY_MSG_SIZE_UV) 60 #define XPC_NOTIFY_IRQ_NAME "xpc_notify" 61 62 static struct xpc_gru_mq_uv *xpc_activate_mq_uv; 63 static struct xpc_gru_mq_uv *xpc_notify_mq_uv; 64 65 static int 66 xpc_setup_partitions_uv(void) 67 { 68 short partid; 69 struct xpc_partition_uv *part_uv; 70 71 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 72 part_uv = &xpc_partitions[partid].sn.uv; 73 74 mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex); 75 spin_lock_init(&part_uv->flags_lock); 76 part_uv->remote_act_state = XPC_P_AS_INACTIVE; 77 } 78 return 0; 79 } 80 81 static void 82 xpc_teardown_partitions_uv(void) 83 { 84 short partid; 85 struct xpc_partition_uv *part_uv; 86 unsigned long irq_flags; 87 88 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 89 part_uv = &xpc_partitions[partid].sn.uv; 90 91 if (part_uv->cached_activate_gru_mq_desc != NULL) { 92 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); 93 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 94 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; 95 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 96 kfree(part_uv->cached_activate_gru_mq_desc); 97 part_uv->cached_activate_gru_mq_desc = NULL; 98 mutex_unlock(&part_uv-> 99 cached_activate_gru_mq_desc_mutex); 100 } 101 } 102 } 103 104 static int 105 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) 106 { 107 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 108 109 #if defined CONFIG_X86_64 110 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, 111 UV_AFFINITY_CPU); 112 if (mq->irq < 0) { 113 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", 114 -mq->irq); 115 return mq->irq; 116 } 117 118 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); 119 120 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 121 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) 122 mq->irq = SGI_XPC_ACTIVATE; 123 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) 124 mq->irq = SGI_XPC_NOTIFY; 125 else 126 return -EINVAL; 127 128 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; 129 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value); 130 #else 131 #error not a supported configuration 132 #endif 133 134 return 0; 135 } 136 137 static void 138 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) 139 { 140 #if defined CONFIG_X86_64 141 uv_teardown_irq(mq->irq); 142 143 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 144 int mmr_pnode; 145 unsigned long mmr_value; 146 147 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 148 mmr_value = 1UL << 16; 149 150 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); 151 #else 152 #error not a supported configuration 153 #endif 154 } 155 156 static int 157 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) 158 { 159 int ret; 160 161 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 162 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 163 164 ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address), 165 mq->order, &mq->mmr_offset); 166 if (ret < 0) { 167 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", 168 ret); 169 return -EBUSY; 170 } 171 #elif defined CONFIG_X86_64 172 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address), 173 mq->order, &mq->mmr_offset); 174 if (ret < 0) { 175 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " 176 "ret=%d\n", ret); 177 return ret; 178 } 179 #else 180 #error not a supported configuration 181 #endif 182 183 mq->watchlist_num = ret; 184 return 0; 185 } 186 187 static void 188 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) 189 { 190 int ret; 191 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 192 193 #if defined CONFIG_X86_64 194 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num); 195 BUG_ON(ret != BIOS_STATUS_SUCCESS); 196 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 197 ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num); 198 BUG_ON(ret != SALRET_OK); 199 #else 200 #error not a supported configuration 201 #endif 202 } 203 204 static struct xpc_gru_mq_uv * 205 xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, 206 irq_handler_t irq_handler) 207 { 208 enum xp_retval xp_ret; 209 int ret; 210 int nid; 211 int nasid; 212 int pg_order; 213 struct page *page; 214 struct xpc_gru_mq_uv *mq; 215 struct uv_IO_APIC_route_entry *mmr_value; 216 217 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); 218 if (mq == NULL) { 219 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " 220 "a xpc_gru_mq_uv structure\n"); 221 ret = -ENOMEM; 222 goto out_0; 223 } 224 225 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc), 226 GFP_KERNEL); 227 if (mq->gru_mq_desc == NULL) { 228 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " 229 "a gru_message_queue_desc structure\n"); 230 ret = -ENOMEM; 231 goto out_1; 232 } 233 234 pg_order = get_order(mq_size); 235 mq->order = pg_order + PAGE_SHIFT; 236 mq_size = 1UL << mq->order; 237 238 mq->mmr_blade = uv_cpu_to_blade_id(cpu); 239 240 nid = cpu_to_node(cpu); 241 page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 242 pg_order); 243 if (page == NULL) { 244 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 245 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); 246 ret = -ENOMEM; 247 goto out_2; 248 } 249 mq->address = page_address(page); 250 251 /* enable generation of irq when GRU mq operation occurs to this mq */ 252 ret = xpc_gru_mq_watchlist_alloc_uv(mq); 253 if (ret != 0) 254 goto out_3; 255 256 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); 257 if (ret != 0) 258 goto out_4; 259 260 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); 261 if (ret != 0) { 262 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", 263 mq->irq, -ret); 264 goto out_5; 265 } 266 267 nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu)); 268 269 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; 270 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, 271 nasid, mmr_value->vector, mmr_value->dest); 272 if (ret != 0) { 273 dev_err(xpc_part, "gru_create_message_queue() returned " 274 "error=%d\n", ret); 275 ret = -EINVAL; 276 goto out_6; 277 } 278 279 /* allow other partitions to access this GRU mq */ 280 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); 281 if (xp_ret != xpSuccess) { 282 ret = -EACCES; 283 goto out_6; 284 } 285 286 return mq; 287 288 /* something went wrong */ 289 out_6: 290 free_irq(mq->irq, NULL); 291 out_5: 292 xpc_release_gru_mq_irq_uv(mq); 293 out_4: 294 xpc_gru_mq_watchlist_free_uv(mq); 295 out_3: 296 free_pages((unsigned long)mq->address, pg_order); 297 out_2: 298 kfree(mq->gru_mq_desc); 299 out_1: 300 kfree(mq); 301 out_0: 302 return ERR_PTR(ret); 303 } 304 305 static void 306 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) 307 { 308 unsigned int mq_size; 309 int pg_order; 310 int ret; 311 312 /* disallow other partitions to access GRU mq */ 313 mq_size = 1UL << mq->order; 314 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size); 315 BUG_ON(ret != xpSuccess); 316 317 /* unregister irq handler and release mq irq/vector mapping */ 318 free_irq(mq->irq, NULL); 319 xpc_release_gru_mq_irq_uv(mq); 320 321 /* disable generation of irq when GRU mq op occurs to this mq */ 322 xpc_gru_mq_watchlist_free_uv(mq); 323 324 pg_order = mq->order - PAGE_SHIFT; 325 free_pages((unsigned long)mq->address, pg_order); 326 327 kfree(mq); 328 } 329 330 static enum xp_retval 331 xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg, 332 size_t msg_size) 333 { 334 enum xp_retval xp_ret; 335 int ret; 336 337 while (1) { 338 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size); 339 if (ret == MQE_OK) { 340 xp_ret = xpSuccess; 341 break; 342 } 343 344 if (ret == MQE_QUEUE_FULL) { 345 dev_dbg(xpc_chan, "gru_send_message_gpa() returned " 346 "error=MQE_QUEUE_FULL\n"); 347 /* !!! handle QLimit reached; delay & try again */ 348 /* ??? Do we add a limit to the number of retries? */ 349 (void)msleep_interruptible(10); 350 } else if (ret == MQE_CONGESTION) { 351 dev_dbg(xpc_chan, "gru_send_message_gpa() returned " 352 "error=MQE_CONGESTION\n"); 353 /* !!! handle LB Overflow; simply try again */ 354 /* ??? Do we add a limit to the number of retries? */ 355 } else { 356 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */ 357 dev_err(xpc_chan, "gru_send_message_gpa() returned " 358 "error=%d\n", ret); 359 xp_ret = xpGruSendMqError; 360 break; 361 } 362 } 363 return xp_ret; 364 } 365 366 static void 367 xpc_process_activate_IRQ_rcvd_uv(void) 368 { 369 unsigned long irq_flags; 370 short partid; 371 struct xpc_partition *part; 372 u8 act_state_req; 373 374 DBUG_ON(xpc_activate_IRQ_rcvd == 0); 375 376 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 377 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 378 part = &xpc_partitions[partid]; 379 380 if (part->sn.uv.act_state_req == 0) 381 continue; 382 383 xpc_activate_IRQ_rcvd--; 384 BUG_ON(xpc_activate_IRQ_rcvd < 0); 385 386 act_state_req = part->sn.uv.act_state_req; 387 part->sn.uv.act_state_req = 0; 388 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 389 390 if (act_state_req == XPC_P_ASR_ACTIVATE_UV) { 391 if (part->act_state == XPC_P_AS_INACTIVE) 392 xpc_activate_partition(part); 393 else if (part->act_state == XPC_P_AS_DEACTIVATING) 394 XPC_DEACTIVATE_PARTITION(part, xpReactivating); 395 396 } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) { 397 if (part->act_state == XPC_P_AS_INACTIVE) 398 xpc_activate_partition(part); 399 else 400 XPC_DEACTIVATE_PARTITION(part, xpReactivating); 401 402 } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) { 403 XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason); 404 405 } else { 406 BUG(); 407 } 408 409 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 410 if (xpc_activate_IRQ_rcvd == 0) 411 break; 412 } 413 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 414 415 } 416 417 static void 418 xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, 419 struct xpc_activate_mq_msghdr_uv *msg_hdr, 420 int *wakeup_hb_checker) 421 { 422 unsigned long irq_flags; 423 struct xpc_partition_uv *part_uv = &part->sn.uv; 424 struct xpc_openclose_args *args; 425 426 part_uv->remote_act_state = msg_hdr->act_state; 427 428 switch (msg_hdr->type) { 429 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: 430 /* syncing of remote_act_state was just done above */ 431 break; 432 433 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { 434 struct xpc_activate_mq_msg_activate_req_uv *msg; 435 436 /* 437 * ??? Do we deal here with ts_jiffies being different 438 * ??? if act_state != XPC_P_AS_INACTIVE instead of 439 * ??? below? 440 */ 441 msg = container_of(msg_hdr, struct 442 xpc_activate_mq_msg_activate_req_uv, hdr); 443 444 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 445 if (part_uv->act_state_req == 0) 446 xpc_activate_IRQ_rcvd++; 447 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; 448 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ 449 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; 450 part_uv->heartbeat_gpa = msg->heartbeat_gpa; 451 452 if (msg->activate_gru_mq_desc_gpa != 453 part_uv->activate_gru_mq_desc_gpa) { 454 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 455 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; 456 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 457 part_uv->activate_gru_mq_desc_gpa = 458 msg->activate_gru_mq_desc_gpa; 459 } 460 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 461 462 (*wakeup_hb_checker)++; 463 break; 464 } 465 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { 466 struct xpc_activate_mq_msg_deactivate_req_uv *msg; 467 468 msg = container_of(msg_hdr, struct 469 xpc_activate_mq_msg_deactivate_req_uv, hdr); 470 471 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 472 if (part_uv->act_state_req == 0) 473 xpc_activate_IRQ_rcvd++; 474 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; 475 part_uv->reason = msg->reason; 476 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 477 478 (*wakeup_hb_checker)++; 479 return; 480 } 481 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { 482 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; 483 484 msg = container_of(msg_hdr, struct 485 xpc_activate_mq_msg_chctl_closerequest_uv, 486 hdr); 487 args = &part->remote_openclose_args[msg->ch_number]; 488 args->reason = msg->reason; 489 490 spin_lock_irqsave(&part->chctl_lock, irq_flags); 491 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; 492 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 493 494 xpc_wakeup_channel_mgr(part); 495 break; 496 } 497 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { 498 struct xpc_activate_mq_msg_chctl_closereply_uv *msg; 499 500 msg = container_of(msg_hdr, struct 501 xpc_activate_mq_msg_chctl_closereply_uv, 502 hdr); 503 504 spin_lock_irqsave(&part->chctl_lock, irq_flags); 505 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; 506 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 507 508 xpc_wakeup_channel_mgr(part); 509 break; 510 } 511 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { 512 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; 513 514 msg = container_of(msg_hdr, struct 515 xpc_activate_mq_msg_chctl_openrequest_uv, 516 hdr); 517 args = &part->remote_openclose_args[msg->ch_number]; 518 args->entry_size = msg->entry_size; 519 args->local_nentries = msg->local_nentries; 520 521 spin_lock_irqsave(&part->chctl_lock, irq_flags); 522 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; 523 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 524 525 xpc_wakeup_channel_mgr(part); 526 break; 527 } 528 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { 529 struct xpc_activate_mq_msg_chctl_openreply_uv *msg; 530 531 msg = container_of(msg_hdr, struct 532 xpc_activate_mq_msg_chctl_openreply_uv, hdr); 533 args = &part->remote_openclose_args[msg->ch_number]; 534 args->remote_nentries = msg->remote_nentries; 535 args->local_nentries = msg->local_nentries; 536 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa; 537 538 spin_lock_irqsave(&part->chctl_lock, irq_flags); 539 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; 540 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 541 542 xpc_wakeup_channel_mgr(part); 543 break; 544 } 545 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { 546 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; 547 548 msg = container_of(msg_hdr, struct 549 xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); 550 spin_lock_irqsave(&part->chctl_lock, irq_flags); 551 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE; 552 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 553 554 xpc_wakeup_channel_mgr(part); 555 } 556 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: 557 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 558 part_uv->flags |= XPC_P_ENGAGED_UV; 559 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 560 break; 561 562 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: 563 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 564 part_uv->flags &= ~XPC_P_ENGAGED_UV; 565 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 566 break; 567 568 default: 569 dev_err(xpc_part, "received unknown activate_mq msg type=%d " 570 "from partition=%d\n", msg_hdr->type, XPC_PARTID(part)); 571 572 /* get hb checker to deactivate from the remote partition */ 573 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 574 if (part_uv->act_state_req == 0) 575 xpc_activate_IRQ_rcvd++; 576 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; 577 part_uv->reason = xpBadMsgType; 578 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 579 580 (*wakeup_hb_checker)++; 581 return; 582 } 583 584 if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && 585 part->remote_rp_ts_jiffies != 0) { 586 /* 587 * ??? Does what we do here need to be sensitive to 588 * ??? act_state or remote_act_state? 589 */ 590 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 591 if (part_uv->act_state_req == 0) 592 xpc_activate_IRQ_rcvd++; 593 part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; 594 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 595 596 (*wakeup_hb_checker)++; 597 } 598 } 599 600 static irqreturn_t 601 xpc_handle_activate_IRQ_uv(int irq, void *dev_id) 602 { 603 struct xpc_activate_mq_msghdr_uv *msg_hdr; 604 short partid; 605 struct xpc_partition *part; 606 int wakeup_hb_checker = 0; 607 int part_referenced; 608 609 while (1) { 610 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc); 611 if (msg_hdr == NULL) 612 break; 613 614 partid = msg_hdr->partid; 615 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { 616 dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() " 617 "received invalid partid=0x%x in message\n", 618 partid); 619 } else { 620 part = &xpc_partitions[partid]; 621 622 part_referenced = xpc_part_ref(part); 623 xpc_handle_activate_mq_msg_uv(part, msg_hdr, 624 &wakeup_hb_checker); 625 if (part_referenced) 626 xpc_part_deref(part); 627 } 628 629 gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr); 630 } 631 632 if (wakeup_hb_checker) 633 wake_up_interruptible(&xpc_activate_IRQ_wq); 634 635 return IRQ_HANDLED; 636 } 637 638 static enum xp_retval 639 xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc, 640 unsigned long gru_mq_desc_gpa) 641 { 642 enum xp_retval ret; 643 644 ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa, 645 sizeof(struct gru_message_queue_desc)); 646 if (ret == xpSuccess) 647 gru_mq_desc->mq = NULL; 648 649 return ret; 650 } 651 652 static enum xp_retval 653 xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, 654 int msg_type) 655 { 656 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; 657 struct xpc_partition_uv *part_uv = &part->sn.uv; 658 struct gru_message_queue_desc *gru_mq_desc; 659 unsigned long irq_flags; 660 enum xp_retval ret; 661 662 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); 663 664 msg_hdr->type = msg_type; 665 msg_hdr->partid = xp_partition_id; 666 msg_hdr->act_state = part->act_state; 667 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; 668 669 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); 670 again: 671 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) { 672 gru_mq_desc = part_uv->cached_activate_gru_mq_desc; 673 if (gru_mq_desc == NULL) { 674 gru_mq_desc = kmalloc(sizeof(struct 675 gru_message_queue_desc), 676 GFP_KERNEL); 677 if (gru_mq_desc == NULL) { 678 ret = xpNoMemory; 679 goto done; 680 } 681 part_uv->cached_activate_gru_mq_desc = gru_mq_desc; 682 } 683 684 ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc, 685 part_uv-> 686 activate_gru_mq_desc_gpa); 687 if (ret != xpSuccess) 688 goto done; 689 690 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 691 part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; 692 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 693 } 694 695 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ 696 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg, 697 msg_size); 698 if (ret != xpSuccess) { 699 smp_rmb(); /* ensure a fresh copy of part_uv->flags */ 700 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) 701 goto again; 702 } 703 done: 704 mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex); 705 return ret; 706 } 707 708 static void 709 xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg, 710 size_t msg_size, int msg_type) 711 { 712 enum xp_retval ret; 713 714 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); 715 if (unlikely(ret != xpSuccess)) 716 XPC_DEACTIVATE_PARTITION(part, ret); 717 } 718 719 static void 720 xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, 721 void *msg, size_t msg_size, int msg_type) 722 { 723 struct xpc_partition *part = &xpc_partitions[ch->partid]; 724 enum xp_retval ret; 725 726 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); 727 if (unlikely(ret != xpSuccess)) { 728 if (irq_flags != NULL) 729 spin_unlock_irqrestore(&ch->lock, *irq_flags); 730 731 XPC_DEACTIVATE_PARTITION(part, ret); 732 733 if (irq_flags != NULL) 734 spin_lock_irqsave(&ch->lock, *irq_flags); 735 } 736 } 737 738 static void 739 xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) 740 { 741 unsigned long irq_flags; 742 struct xpc_partition_uv *part_uv = &part->sn.uv; 743 744 /* 745 * !!! Make our side think that the remote partition sent an activate 746 * !!! mq message our way by doing what the activate IRQ handler would 747 * !!! do had one really been sent. 748 */ 749 750 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 751 if (part_uv->act_state_req == 0) 752 xpc_activate_IRQ_rcvd++; 753 part_uv->act_state_req = act_state_req; 754 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 755 756 wake_up_interruptible(&xpc_activate_IRQ_wq); 757 } 758 759 static enum xp_retval 760 xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, 761 size_t *len) 762 { 763 s64 status; 764 enum xp_retval ret; 765 766 #if defined CONFIG_X86_64 767 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa, 768 (u64 *)len); 769 if (status == BIOS_STATUS_SUCCESS) 770 ret = xpSuccess; 771 else if (status == BIOS_STATUS_MORE_PASSES) 772 ret = xpNeedMoreInfo; 773 else 774 ret = xpBiosError; 775 776 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 777 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); 778 if (status == SALRET_OK) 779 ret = xpSuccess; 780 else if (status == SALRET_MORE_PASSES) 781 ret = xpNeedMoreInfo; 782 else 783 ret = xpSalError; 784 785 #else 786 #error not a supported configuration 787 #endif 788 789 return ret; 790 } 791 792 static int 793 xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp) 794 { 795 xpc_heartbeat_uv = 796 &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat; 797 rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv); 798 rp->sn.uv.activate_gru_mq_desc_gpa = 799 uv_gpa(xpc_activate_mq_uv->gru_mq_desc); 800 return 0; 801 } 802 803 static void 804 xpc_allow_hb_uv(short partid) 805 { 806 } 807 808 static void 809 xpc_disallow_hb_uv(short partid) 810 { 811 } 812 813 static void 814 xpc_disallow_all_hbs_uv(void) 815 { 816 } 817 818 static void 819 xpc_increment_heartbeat_uv(void) 820 { 821 xpc_heartbeat_uv->value++; 822 } 823 824 static void 825 xpc_offline_heartbeat_uv(void) 826 { 827 xpc_increment_heartbeat_uv(); 828 xpc_heartbeat_uv->offline = 1; 829 } 830 831 static void 832 xpc_online_heartbeat_uv(void) 833 { 834 xpc_increment_heartbeat_uv(); 835 xpc_heartbeat_uv->offline = 0; 836 } 837 838 static void 839 xpc_heartbeat_init_uv(void) 840 { 841 xpc_heartbeat_uv->value = 1; 842 xpc_heartbeat_uv->offline = 0; 843 } 844 845 static void 846 xpc_heartbeat_exit_uv(void) 847 { 848 xpc_offline_heartbeat_uv(); 849 } 850 851 static enum xp_retval 852 xpc_get_remote_heartbeat_uv(struct xpc_partition *part) 853 { 854 struct xpc_partition_uv *part_uv = &part->sn.uv; 855 enum xp_retval ret; 856 857 ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat), 858 part_uv->heartbeat_gpa, 859 sizeof(struct xpc_heartbeat_uv)); 860 if (ret != xpSuccess) 861 return ret; 862 863 if (part_uv->cached_heartbeat.value == part->last_heartbeat && 864 !part_uv->cached_heartbeat.offline) { 865 866 ret = xpNoHeartbeat; 867 } else { 868 part->last_heartbeat = part_uv->cached_heartbeat.value; 869 } 870 return ret; 871 } 872 873 static void 874 xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, 875 unsigned long remote_rp_gpa, int nasid) 876 { 877 short partid = remote_rp->SAL_partid; 878 struct xpc_partition *part = &xpc_partitions[partid]; 879 struct xpc_activate_mq_msg_activate_req_uv msg; 880 881 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ 882 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; 883 part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa; 884 part->sn.uv.activate_gru_mq_desc_gpa = 885 remote_rp->sn.uv.activate_gru_mq_desc_gpa; 886 887 /* 888 * ??? Is it a good idea to make this conditional on what is 889 * ??? potentially stale state information? 890 */ 891 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { 892 msg.rp_gpa = uv_gpa(xpc_rsvd_page); 893 msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa; 894 msg.activate_gru_mq_desc_gpa = 895 xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa; 896 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 897 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); 898 } 899 900 if (part->act_state == XPC_P_AS_INACTIVE) 901 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); 902 } 903 904 static void 905 xpc_request_partition_reactivation_uv(struct xpc_partition *part) 906 { 907 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); 908 } 909 910 static void 911 xpc_request_partition_deactivation_uv(struct xpc_partition *part) 912 { 913 struct xpc_activate_mq_msg_deactivate_req_uv msg; 914 915 /* 916 * ??? Is it a good idea to make this conditional on what is 917 * ??? potentially stale state information? 918 */ 919 if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING && 920 part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) { 921 922 msg.reason = part->reason; 923 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 924 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV); 925 } 926 } 927 928 static void 929 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part) 930 { 931 /* nothing needs to be done */ 932 return; 933 } 934 935 static void 936 xpc_init_fifo_uv(struct xpc_fifo_head_uv *head) 937 { 938 head->first = NULL; 939 head->last = NULL; 940 spin_lock_init(&head->lock); 941 head->n_entries = 0; 942 } 943 944 static void * 945 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) 946 { 947 unsigned long irq_flags; 948 struct xpc_fifo_entry_uv *first; 949 950 spin_lock_irqsave(&head->lock, irq_flags); 951 first = head->first; 952 if (head->first != NULL) { 953 head->first = first->next; 954 if (head->first == NULL) 955 head->last = NULL; 956 957 head->n_entries--; 958 BUG_ON(head->n_entries < 0); 959 960 first->next = NULL; 961 } 962 spin_unlock_irqrestore(&head->lock, irq_flags); 963 return first; 964 } 965 966 static void 967 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head, 968 struct xpc_fifo_entry_uv *last) 969 { 970 unsigned long irq_flags; 971 972 last->next = NULL; 973 spin_lock_irqsave(&head->lock, irq_flags); 974 if (head->last != NULL) 975 head->last->next = last; 976 else 977 head->first = last; 978 head->last = last; 979 head->n_entries++; 980 spin_unlock_irqrestore(&head->lock, irq_flags); 981 } 982 983 static int 984 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head) 985 { 986 return head->n_entries; 987 } 988 989 /* 990 * Setup the channel structures that are uv specific. 991 */ 992 static enum xp_retval 993 xpc_setup_ch_structures_uv(struct xpc_partition *part) 994 { 995 struct xpc_channel_uv *ch_uv; 996 int ch_number; 997 998 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 999 ch_uv = &part->channels[ch_number].sn.uv; 1000 1001 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 1002 xpc_init_fifo_uv(&ch_uv->recv_msg_list); 1003 } 1004 1005 return xpSuccess; 1006 } 1007 1008 /* 1009 * Teardown the channel structures that are uv specific. 1010 */ 1011 static void 1012 xpc_teardown_ch_structures_uv(struct xpc_partition *part) 1013 { 1014 /* nothing needs to be done */ 1015 return; 1016 } 1017 1018 static enum xp_retval 1019 xpc_make_first_contact_uv(struct xpc_partition *part) 1020 { 1021 struct xpc_activate_mq_msg_uv msg; 1022 1023 /* 1024 * We send a sync msg to get the remote partition's remote_act_state 1025 * updated to our current act_state which at this point should 1026 * be XPC_P_AS_ACTIVATING. 1027 */ 1028 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1029 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); 1030 1031 while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) || 1032 (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) { 1033 1034 dev_dbg(xpc_part, "waiting to make first contact with " 1035 "partition %d\n", XPC_PARTID(part)); 1036 1037 /* wait a 1/4 of a second or so */ 1038 (void)msleep_interruptible(250); 1039 1040 if (part->act_state == XPC_P_AS_DEACTIVATING) 1041 return part->reason; 1042 } 1043 1044 return xpSuccess; 1045 } 1046 1047 static u64 1048 xpc_get_chctl_all_flags_uv(struct xpc_partition *part) 1049 { 1050 unsigned long irq_flags; 1051 union xpc_channel_ctl_flags chctl; 1052 1053 spin_lock_irqsave(&part->chctl_lock, irq_flags); 1054 chctl = part->chctl; 1055 if (chctl.all_flags != 0) 1056 part->chctl.all_flags = 0; 1057 1058 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 1059 return chctl.all_flags; 1060 } 1061 1062 static enum xp_retval 1063 xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch) 1064 { 1065 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1066 struct xpc_send_msg_slot_uv *msg_slot; 1067 unsigned long irq_flags; 1068 int nentries; 1069 int entry; 1070 size_t nbytes; 1071 1072 for (nentries = ch->local_nentries; nentries > 0; nentries--) { 1073 nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv); 1074 ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL); 1075 if (ch_uv->send_msg_slots == NULL) 1076 continue; 1077 1078 for (entry = 0; entry < nentries; entry++) { 1079 msg_slot = &ch_uv->send_msg_slots[entry]; 1080 1081 msg_slot->msg_slot_number = entry; 1082 xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list, 1083 &msg_slot->next); 1084 } 1085 1086 spin_lock_irqsave(&ch->lock, irq_flags); 1087 if (nentries < ch->local_nentries) 1088 ch->local_nentries = nentries; 1089 spin_unlock_irqrestore(&ch->lock, irq_flags); 1090 return xpSuccess; 1091 } 1092 1093 return xpNoMemory; 1094 } 1095 1096 static enum xp_retval 1097 xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch) 1098 { 1099 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1100 struct xpc_notify_mq_msg_uv *msg_slot; 1101 unsigned long irq_flags; 1102 int nentries; 1103 int entry; 1104 size_t nbytes; 1105 1106 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { 1107 nbytes = nentries * ch->entry_size; 1108 ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL); 1109 if (ch_uv->recv_msg_slots == NULL) 1110 continue; 1111 1112 for (entry = 0; entry < nentries; entry++) { 1113 msg_slot = ch_uv->recv_msg_slots + 1114 entry * ch->entry_size; 1115 1116 msg_slot->hdr.msg_slot_number = entry; 1117 } 1118 1119 spin_lock_irqsave(&ch->lock, irq_flags); 1120 if (nentries < ch->remote_nentries) 1121 ch->remote_nentries = nentries; 1122 spin_unlock_irqrestore(&ch->lock, irq_flags); 1123 return xpSuccess; 1124 } 1125 1126 return xpNoMemory; 1127 } 1128 1129 /* 1130 * Allocate msg_slots associated with the channel. 1131 */ 1132 static enum xp_retval 1133 xpc_setup_msg_structures_uv(struct xpc_channel *ch) 1134 { 1135 static enum xp_retval ret; 1136 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1137 1138 DBUG_ON(ch->flags & XPC_C_SETUP); 1139 1140 ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct 1141 gru_message_queue_desc), 1142 GFP_KERNEL); 1143 if (ch_uv->cached_notify_gru_mq_desc == NULL) 1144 return xpNoMemory; 1145 1146 ret = xpc_allocate_send_msg_slot_uv(ch); 1147 if (ret == xpSuccess) { 1148 1149 ret = xpc_allocate_recv_msg_slot_uv(ch); 1150 if (ret != xpSuccess) { 1151 kfree(ch_uv->send_msg_slots); 1152 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 1153 } 1154 } 1155 return ret; 1156 } 1157 1158 /* 1159 * Free up msg_slots and clear other stuff that were setup for the specified 1160 * channel. 1161 */ 1162 static void 1163 xpc_teardown_msg_structures_uv(struct xpc_channel *ch) 1164 { 1165 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1166 1167 DBUG_ON(!spin_is_locked(&ch->lock)); 1168 1169 kfree(ch_uv->cached_notify_gru_mq_desc); 1170 ch_uv->cached_notify_gru_mq_desc = NULL; 1171 1172 if (ch->flags & XPC_C_SETUP) { 1173 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 1174 kfree(ch_uv->send_msg_slots); 1175 xpc_init_fifo_uv(&ch_uv->recv_msg_list); 1176 kfree(ch_uv->recv_msg_slots); 1177 } 1178 } 1179 1180 static void 1181 xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1182 { 1183 struct xpc_activate_mq_msg_chctl_closerequest_uv msg; 1184 1185 msg.ch_number = ch->number; 1186 msg.reason = ch->reason; 1187 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1188 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV); 1189 } 1190 1191 static void 1192 xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1193 { 1194 struct xpc_activate_mq_msg_chctl_closereply_uv msg; 1195 1196 msg.ch_number = ch->number; 1197 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1198 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV); 1199 } 1200 1201 static void 1202 xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1203 { 1204 struct xpc_activate_mq_msg_chctl_openrequest_uv msg; 1205 1206 msg.ch_number = ch->number; 1207 msg.entry_size = ch->entry_size; 1208 msg.local_nentries = ch->local_nentries; 1209 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1210 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); 1211 } 1212 1213 static void 1214 xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1215 { 1216 struct xpc_activate_mq_msg_chctl_openreply_uv msg; 1217 1218 msg.ch_number = ch->number; 1219 msg.local_nentries = ch->local_nentries; 1220 msg.remote_nentries = ch->remote_nentries; 1221 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc); 1222 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1223 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); 1224 } 1225 1226 static void 1227 xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1228 { 1229 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg; 1230 1231 msg.ch_number = ch->number; 1232 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1233 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV); 1234 } 1235 1236 static void 1237 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) 1238 { 1239 unsigned long irq_flags; 1240 1241 spin_lock_irqsave(&part->chctl_lock, irq_flags); 1242 part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST; 1243 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 1244 1245 xpc_wakeup_channel_mgr(part); 1246 } 1247 1248 static enum xp_retval 1249 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, 1250 unsigned long gru_mq_desc_gpa) 1251 { 1252 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1253 1254 DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL); 1255 return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc, 1256 gru_mq_desc_gpa); 1257 } 1258 1259 static void 1260 xpc_indicate_partition_engaged_uv(struct xpc_partition *part) 1261 { 1262 struct xpc_activate_mq_msg_uv msg; 1263 1264 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1265 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV); 1266 } 1267 1268 static void 1269 xpc_indicate_partition_disengaged_uv(struct xpc_partition *part) 1270 { 1271 struct xpc_activate_mq_msg_uv msg; 1272 1273 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1274 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV); 1275 } 1276 1277 static void 1278 xpc_assume_partition_disengaged_uv(short partid) 1279 { 1280 struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv; 1281 unsigned long irq_flags; 1282 1283 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 1284 part_uv->flags &= ~XPC_P_ENGAGED_UV; 1285 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 1286 } 1287 1288 static int 1289 xpc_partition_engaged_uv(short partid) 1290 { 1291 return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0; 1292 } 1293 1294 static int 1295 xpc_any_partition_engaged_uv(void) 1296 { 1297 struct xpc_partition_uv *part_uv; 1298 short partid; 1299 1300 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 1301 part_uv = &xpc_partitions[partid].sn.uv; 1302 if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0) 1303 return 1; 1304 } 1305 return 0; 1306 } 1307 1308 static enum xp_retval 1309 xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags, 1310 struct xpc_send_msg_slot_uv **address_of_msg_slot) 1311 { 1312 enum xp_retval ret; 1313 struct xpc_send_msg_slot_uv *msg_slot; 1314 struct xpc_fifo_entry_uv *entry; 1315 1316 while (1) { 1317 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list); 1318 if (entry != NULL) 1319 break; 1320 1321 if (flags & XPC_NOWAIT) 1322 return xpNoWait; 1323 1324 ret = xpc_allocate_msg_wait(ch); 1325 if (ret != xpInterrupted && ret != xpTimeout) 1326 return ret; 1327 } 1328 1329 msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next); 1330 *address_of_msg_slot = msg_slot; 1331 return xpSuccess; 1332 } 1333 1334 static void 1335 xpc_free_msg_slot_uv(struct xpc_channel *ch, 1336 struct xpc_send_msg_slot_uv *msg_slot) 1337 { 1338 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next); 1339 1340 /* wakeup anyone waiting for a free msg slot */ 1341 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) 1342 wake_up(&ch->msg_allocate_wq); 1343 } 1344 1345 static void 1346 xpc_notify_sender_uv(struct xpc_channel *ch, 1347 struct xpc_send_msg_slot_uv *msg_slot, 1348 enum xp_retval reason) 1349 { 1350 xpc_notify_func func = msg_slot->func; 1351 1352 if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) { 1353 1354 atomic_dec(&ch->n_to_notify); 1355 1356 dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p " 1357 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, 1358 msg_slot->msg_slot_number, ch->partid, ch->number); 1359 1360 func(reason, ch->partid, ch->number, msg_slot->key); 1361 1362 dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p " 1363 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, 1364 msg_slot->msg_slot_number, ch->partid, ch->number); 1365 } 1366 } 1367 1368 static void 1369 xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch, 1370 struct xpc_notify_mq_msg_uv *msg) 1371 { 1372 struct xpc_send_msg_slot_uv *msg_slot; 1373 int entry = msg->hdr.msg_slot_number % ch->local_nentries; 1374 1375 msg_slot = &ch->sn.uv.send_msg_slots[entry]; 1376 1377 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number); 1378 msg_slot->msg_slot_number += ch->local_nentries; 1379 1380 if (msg_slot->func != NULL) 1381 xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered); 1382 1383 xpc_free_msg_slot_uv(ch, msg_slot); 1384 } 1385 1386 static void 1387 xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, 1388 struct xpc_notify_mq_msg_uv *msg) 1389 { 1390 struct xpc_partition_uv *part_uv = &part->sn.uv; 1391 struct xpc_channel *ch; 1392 struct xpc_channel_uv *ch_uv; 1393 struct xpc_notify_mq_msg_uv *msg_slot; 1394 unsigned long irq_flags; 1395 int ch_number = msg->hdr.ch_number; 1396 1397 if (unlikely(ch_number >= part->nchannels)) { 1398 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid " 1399 "channel number=0x%x in message from partid=%d\n", 1400 ch_number, XPC_PARTID(part)); 1401 1402 /* get hb checker to deactivate from the remote partition */ 1403 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 1404 if (part_uv->act_state_req == 0) 1405 xpc_activate_IRQ_rcvd++; 1406 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; 1407 part_uv->reason = xpBadChannelNumber; 1408 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 1409 1410 wake_up_interruptible(&xpc_activate_IRQ_wq); 1411 return; 1412 } 1413 1414 ch = &part->channels[ch_number]; 1415 xpc_msgqueue_ref(ch); 1416 1417 if (!(ch->flags & XPC_C_CONNECTED)) { 1418 xpc_msgqueue_deref(ch); 1419 return; 1420 } 1421 1422 /* see if we're really dealing with an ACK for a previously sent msg */ 1423 if (msg->hdr.size == 0) { 1424 xpc_handle_notify_mq_ack_uv(ch, msg); 1425 xpc_msgqueue_deref(ch); 1426 return; 1427 } 1428 1429 /* we're dealing with a normal message sent via the notify_mq */ 1430 ch_uv = &ch->sn.uv; 1431 1432 msg_slot = ch_uv->recv_msg_slots + 1433 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; 1434 1435 BUG_ON(msg_slot->hdr.size != 0); 1436 1437 memcpy(msg_slot, msg, msg->hdr.size); 1438 1439 xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next); 1440 1441 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { 1442 /* 1443 * If there is an existing idle kthread get it to deliver 1444 * the payload, otherwise we'll have to get the channel mgr 1445 * for this partition to create a kthread to do the delivery. 1446 */ 1447 if (atomic_read(&ch->kthreads_idle) > 0) 1448 wake_up_nr(&ch->idle_wq, 1); 1449 else 1450 xpc_send_chctl_local_msgrequest_uv(part, ch->number); 1451 } 1452 xpc_msgqueue_deref(ch); 1453 } 1454 1455 static irqreturn_t 1456 xpc_handle_notify_IRQ_uv(int irq, void *dev_id) 1457 { 1458 struct xpc_notify_mq_msg_uv *msg; 1459 short partid; 1460 struct xpc_partition *part; 1461 1462 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) != 1463 NULL) { 1464 1465 partid = msg->hdr.partid; 1466 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { 1467 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received " 1468 "invalid partid=0x%x in message\n", partid); 1469 } else { 1470 part = &xpc_partitions[partid]; 1471 1472 if (xpc_part_ref(part)) { 1473 xpc_handle_notify_mq_msg_uv(part, msg); 1474 xpc_part_deref(part); 1475 } 1476 } 1477 1478 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg); 1479 } 1480 1481 return IRQ_HANDLED; 1482 } 1483 1484 static int 1485 xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch) 1486 { 1487 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list); 1488 } 1489 1490 static void 1491 xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number) 1492 { 1493 struct xpc_channel *ch = &part->channels[ch_number]; 1494 int ndeliverable_payloads; 1495 1496 xpc_msgqueue_ref(ch); 1497 1498 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch); 1499 1500 if (ndeliverable_payloads > 0 && 1501 (ch->flags & XPC_C_CONNECTED) && 1502 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) { 1503 1504 xpc_activate_kthreads(ch, ndeliverable_payloads); 1505 } 1506 1507 xpc_msgqueue_deref(ch); 1508 } 1509 1510 static enum xp_retval 1511 xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, 1512 u16 payload_size, u8 notify_type, xpc_notify_func func, 1513 void *key) 1514 { 1515 enum xp_retval ret = xpSuccess; 1516 struct xpc_send_msg_slot_uv *msg_slot = NULL; 1517 struct xpc_notify_mq_msg_uv *msg; 1518 u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV]; 1519 size_t msg_size; 1520 1521 DBUG_ON(notify_type != XPC_N_CALL); 1522 1523 msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size; 1524 if (msg_size > ch->entry_size) 1525 return xpPayloadTooBig; 1526 1527 xpc_msgqueue_ref(ch); 1528 1529 if (ch->flags & XPC_C_DISCONNECTING) { 1530 ret = ch->reason; 1531 goto out_1; 1532 } 1533 if (!(ch->flags & XPC_C_CONNECTED)) { 1534 ret = xpNotConnected; 1535 goto out_1; 1536 } 1537 1538 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot); 1539 if (ret != xpSuccess) 1540 goto out_1; 1541 1542 if (func != NULL) { 1543 atomic_inc(&ch->n_to_notify); 1544 1545 msg_slot->key = key; 1546 smp_wmb(); /* a non-NULL func must hit memory after the key */ 1547 msg_slot->func = func; 1548 1549 if (ch->flags & XPC_C_DISCONNECTING) { 1550 ret = ch->reason; 1551 goto out_2; 1552 } 1553 } 1554 1555 msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer; 1556 msg->hdr.partid = xp_partition_id; 1557 msg->hdr.ch_number = ch->number; 1558 msg->hdr.size = msg_size; 1559 msg->hdr.msg_slot_number = msg_slot->msg_slot_number; 1560 memcpy(&msg->payload, payload, payload_size); 1561 1562 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, 1563 msg_size); 1564 if (ret == xpSuccess) 1565 goto out_1; 1566 1567 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); 1568 out_2: 1569 if (func != NULL) { 1570 /* 1571 * Try to NULL the msg_slot's func field. If we fail, then 1572 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which 1573 * case we need to pretend we succeeded to send the message 1574 * since the user will get a callout for the disconnect error 1575 * by xpc_notify_senders_of_disconnect_uv(), and to also get an 1576 * error returned here will confuse them. Additionally, since 1577 * in this case the channel is being disconnected we don't need 1578 * to put the the msg_slot back on the free list. 1579 */ 1580 if (cmpxchg(&msg_slot->func, func, NULL) != func) { 1581 ret = xpSuccess; 1582 goto out_1; 1583 } 1584 1585 msg_slot->key = NULL; 1586 atomic_dec(&ch->n_to_notify); 1587 } 1588 xpc_free_msg_slot_uv(ch, msg_slot); 1589 out_1: 1590 xpc_msgqueue_deref(ch); 1591 return ret; 1592 } 1593 1594 /* 1595 * Tell the callers of xpc_send_notify() that the status of their payloads 1596 * is unknown because the channel is now disconnecting. 1597 * 1598 * We don't worry about putting these msg_slots on the free list since the 1599 * msg_slots themselves are about to be kfree'd. 1600 */ 1601 static void 1602 xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch) 1603 { 1604 struct xpc_send_msg_slot_uv *msg_slot; 1605 int entry; 1606 1607 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); 1608 1609 for (entry = 0; entry < ch->local_nentries; entry++) { 1610 1611 if (atomic_read(&ch->n_to_notify) == 0) 1612 break; 1613 1614 msg_slot = &ch->sn.uv.send_msg_slots[entry]; 1615 if (msg_slot->func != NULL) 1616 xpc_notify_sender_uv(ch, msg_slot, ch->reason); 1617 } 1618 } 1619 1620 /* 1621 * Get the next deliverable message's payload. 1622 */ 1623 static void * 1624 xpc_get_deliverable_payload_uv(struct xpc_channel *ch) 1625 { 1626 struct xpc_fifo_entry_uv *entry; 1627 struct xpc_notify_mq_msg_uv *msg; 1628 void *payload = NULL; 1629 1630 if (!(ch->flags & XPC_C_DISCONNECTING)) { 1631 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list); 1632 if (entry != NULL) { 1633 msg = container_of(entry, struct xpc_notify_mq_msg_uv, 1634 hdr.u.next); 1635 payload = &msg->payload; 1636 } 1637 } 1638 return payload; 1639 } 1640 1641 static void 1642 xpc_received_payload_uv(struct xpc_channel *ch, void *payload) 1643 { 1644 struct xpc_notify_mq_msg_uv *msg; 1645 enum xp_retval ret; 1646 1647 msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload); 1648 1649 /* return an ACK to the sender of this message */ 1650 1651 msg->hdr.partid = xp_partition_id; 1652 msg->hdr.size = 0; /* size of zero indicates this is an ACK */ 1653 1654 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, 1655 sizeof(struct xpc_notify_mq_msghdr_uv)); 1656 if (ret != xpSuccess) 1657 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); 1658 } 1659 1660 static struct xpc_arch_operations xpc_arch_ops_uv = { 1661 .setup_partitions = xpc_setup_partitions_uv, 1662 .teardown_partitions = xpc_teardown_partitions_uv, 1663 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv, 1664 .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv, 1665 .setup_rsvd_page = xpc_setup_rsvd_page_uv, 1666 1667 .allow_hb = xpc_allow_hb_uv, 1668 .disallow_hb = xpc_disallow_hb_uv, 1669 .disallow_all_hbs = xpc_disallow_all_hbs_uv, 1670 .increment_heartbeat = xpc_increment_heartbeat_uv, 1671 .offline_heartbeat = xpc_offline_heartbeat_uv, 1672 .online_heartbeat = xpc_online_heartbeat_uv, 1673 .heartbeat_init = xpc_heartbeat_init_uv, 1674 .heartbeat_exit = xpc_heartbeat_exit_uv, 1675 .get_remote_heartbeat = xpc_get_remote_heartbeat_uv, 1676 1677 .request_partition_activation = 1678 xpc_request_partition_activation_uv, 1679 .request_partition_reactivation = 1680 xpc_request_partition_reactivation_uv, 1681 .request_partition_deactivation = 1682 xpc_request_partition_deactivation_uv, 1683 .cancel_partition_deactivation_request = 1684 xpc_cancel_partition_deactivation_request_uv, 1685 1686 .setup_ch_structures = xpc_setup_ch_structures_uv, 1687 .teardown_ch_structures = xpc_teardown_ch_structures_uv, 1688 1689 .make_first_contact = xpc_make_first_contact_uv, 1690 1691 .get_chctl_all_flags = xpc_get_chctl_all_flags_uv, 1692 .send_chctl_closerequest = xpc_send_chctl_closerequest_uv, 1693 .send_chctl_closereply = xpc_send_chctl_closereply_uv, 1694 .send_chctl_openrequest = xpc_send_chctl_openrequest_uv, 1695 .send_chctl_openreply = xpc_send_chctl_openreply_uv, 1696 .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv, 1697 .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv, 1698 1699 .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv, 1700 1701 .setup_msg_structures = xpc_setup_msg_structures_uv, 1702 .teardown_msg_structures = xpc_teardown_msg_structures_uv, 1703 1704 .indicate_partition_engaged = xpc_indicate_partition_engaged_uv, 1705 .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv, 1706 .assume_partition_disengaged = xpc_assume_partition_disengaged_uv, 1707 .partition_engaged = xpc_partition_engaged_uv, 1708 .any_partition_engaged = xpc_any_partition_engaged_uv, 1709 1710 .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv, 1711 .send_payload = xpc_send_payload_uv, 1712 .get_deliverable_payload = xpc_get_deliverable_payload_uv, 1713 .received_payload = xpc_received_payload_uv, 1714 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, 1715 }; 1716 1717 int 1718 xpc_init_uv(void) 1719 { 1720 xpc_arch_ops = xpc_arch_ops_uv; 1721 1722 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { 1723 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", 1724 XPC_MSG_HDR_MAX_SIZE); 1725 return -E2BIG; 1726 } 1727 1728 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 1729 XPC_ACTIVATE_IRQ_NAME, 1730 xpc_handle_activate_IRQ_uv); 1731 if (IS_ERR(xpc_activate_mq_uv)) 1732 return PTR_ERR(xpc_activate_mq_uv); 1733 1734 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 1735 XPC_NOTIFY_IRQ_NAME, 1736 xpc_handle_notify_IRQ_uv); 1737 if (IS_ERR(xpc_notify_mq_uv)) { 1738 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1739 return PTR_ERR(xpc_notify_mq_uv); 1740 } 1741 1742 return 0; 1743 } 1744 1745 void 1746 xpc_exit_uv(void) 1747 { 1748 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); 1749 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1750 } 1751