1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 /* 10 * Cross Partition Communication (XPC) uv-based functions. 11 * 12 * Architecture specific implementation of common functions. 13 * 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/mm.h> 18 #include <linux/interrupt.h> 19 #include <linux/delay.h> 20 #include <linux/device.h> 21 #include <linux/err.h> 22 #include <asm/uv/uv_hub.h> 23 #if defined CONFIG_X86_64 24 #include <asm/uv/bios.h> 25 #include <asm/uv/uv_irq.h> 26 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 27 #include <asm/sn/intr.h> 28 #include <asm/sn/sn_sal.h> 29 #endif 30 #include "../sgi-gru/gru.h" 31 #include "../sgi-gru/grukservices.h" 32 #include "xpc.h" 33 34 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 35 struct uv_IO_APIC_route_entry { 36 __u64 vector : 8, 37 delivery_mode : 3, 38 dest_mode : 1, 39 delivery_status : 1, 40 polarity : 1, 41 __reserved_1 : 1, 42 trigger : 1, 43 mask : 1, 44 __reserved_2 : 15, 45 dest : 32; 46 }; 47 #endif 48 49 static struct xpc_heartbeat_uv *xpc_heartbeat_uv; 50 51 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) 52 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 53 XPC_ACTIVATE_MSG_SIZE_UV) 54 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate" 55 56 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) 57 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 58 XPC_NOTIFY_MSG_SIZE_UV) 59 #define XPC_NOTIFY_IRQ_NAME "xpc_notify" 60 61 static struct xpc_gru_mq_uv *xpc_activate_mq_uv; 62 static struct xpc_gru_mq_uv *xpc_notify_mq_uv; 63 64 static int 65 xpc_setup_partitions_uv(void) 66 { 67 short partid; 68 struct xpc_partition_uv *part_uv; 69 70 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 71 part_uv = &xpc_partitions[partid].sn.uv; 72 73 mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex); 74 spin_lock_init(&part_uv->flags_lock); 75 part_uv->remote_act_state = XPC_P_AS_INACTIVE; 76 } 77 return 0; 78 } 79 80 static void 81 xpc_teardown_partitions_uv(void) 82 { 83 short partid; 84 struct xpc_partition_uv *part_uv; 85 unsigned long irq_flags; 86 87 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 88 part_uv = &xpc_partitions[partid].sn.uv; 89 90 if (part_uv->cached_activate_gru_mq_desc != NULL) { 91 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); 92 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 93 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; 94 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 95 kfree(part_uv->cached_activate_gru_mq_desc); 96 part_uv->cached_activate_gru_mq_desc = NULL; 97 mutex_unlock(&part_uv-> 98 cached_activate_gru_mq_desc_mutex); 99 } 100 } 101 } 102 103 static int 104 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) 105 { 106 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 107 108 #if defined CONFIG_X86_64 109 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); 110 if (mq->irq < 0) { 111 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", 112 -mq->irq); 113 return mq->irq; 114 } 115 116 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); 117 118 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 119 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) 120 mq->irq = SGI_XPC_ACTIVATE; 121 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) 122 mq->irq = SGI_XPC_NOTIFY; 123 else 124 return -EINVAL; 125 126 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; 127 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value); 128 #else 129 #error not a supported configuration 130 #endif 131 132 return 0; 133 } 134 135 static void 136 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) 137 { 138 #if defined CONFIG_X86_64 139 uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); 140 141 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 142 int mmr_pnode; 143 unsigned long mmr_value; 144 145 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 146 mmr_value = 1UL << 16; 147 148 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); 149 #else 150 #error not a supported configuration 151 #endif 152 } 153 154 static int 155 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) 156 { 157 int ret; 158 159 #if defined CONFIG_X86_64 160 ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address), 161 mq->order, &mq->mmr_offset); 162 if (ret < 0) { 163 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " 164 "ret=%d\n", ret); 165 return ret; 166 } 167 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 168 ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address), 169 mq->order, &mq->mmr_offset); 170 if (ret < 0) { 171 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", 172 ret); 173 return -EBUSY; 174 } 175 #else 176 #error not a supported configuration 177 #endif 178 179 mq->watchlist_num = ret; 180 return 0; 181 } 182 183 static void 184 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) 185 { 186 int ret; 187 188 #if defined CONFIG_X86_64 189 ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); 190 BUG_ON(ret != BIOS_STATUS_SUCCESS); 191 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 192 ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); 193 BUG_ON(ret != SALRET_OK); 194 #else 195 #error not a supported configuration 196 #endif 197 } 198 199 static struct xpc_gru_mq_uv * 200 xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, 201 irq_handler_t irq_handler) 202 { 203 enum xp_retval xp_ret; 204 int ret; 205 int nid; 206 int pg_order; 207 struct page *page; 208 struct xpc_gru_mq_uv *mq; 209 struct uv_IO_APIC_route_entry *mmr_value; 210 211 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); 212 if (mq == NULL) { 213 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " 214 "a xpc_gru_mq_uv structure\n"); 215 ret = -ENOMEM; 216 goto out_0; 217 } 218 219 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc), 220 GFP_KERNEL); 221 if (mq->gru_mq_desc == NULL) { 222 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " 223 "a gru_message_queue_desc structure\n"); 224 ret = -ENOMEM; 225 goto out_1; 226 } 227 228 pg_order = get_order(mq_size); 229 mq->order = pg_order + PAGE_SHIFT; 230 mq_size = 1UL << mq->order; 231 232 mq->mmr_blade = uv_cpu_to_blade_id(cpu); 233 234 nid = cpu_to_node(cpu); 235 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 236 pg_order); 237 if (page == NULL) { 238 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 239 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); 240 ret = -ENOMEM; 241 goto out_2; 242 } 243 mq->address = page_address(page); 244 245 /* enable generation of irq when GRU mq operation occurs to this mq */ 246 ret = xpc_gru_mq_watchlist_alloc_uv(mq); 247 if (ret != 0) 248 goto out_3; 249 250 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); 251 if (ret != 0) 252 goto out_4; 253 254 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); 255 if (ret != 0) { 256 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", 257 mq->irq, -ret); 258 goto out_5; 259 } 260 261 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; 262 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, 263 nid, mmr_value->vector, mmr_value->dest); 264 if (ret != 0) { 265 dev_err(xpc_part, "gru_create_message_queue() returned " 266 "error=%d\n", ret); 267 ret = -EINVAL; 268 goto out_6; 269 } 270 271 /* allow other partitions to access this GRU mq */ 272 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); 273 if (xp_ret != xpSuccess) { 274 ret = -EACCES; 275 goto out_6; 276 } 277 278 return mq; 279 280 /* something went wrong */ 281 out_6: 282 free_irq(mq->irq, NULL); 283 out_5: 284 xpc_release_gru_mq_irq_uv(mq); 285 out_4: 286 xpc_gru_mq_watchlist_free_uv(mq); 287 out_3: 288 free_pages((unsigned long)mq->address, pg_order); 289 out_2: 290 kfree(mq->gru_mq_desc); 291 out_1: 292 kfree(mq); 293 out_0: 294 return ERR_PTR(ret); 295 } 296 297 static void 298 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) 299 { 300 unsigned int mq_size; 301 int pg_order; 302 int ret; 303 304 /* disallow other partitions to access GRU mq */ 305 mq_size = 1UL << mq->order; 306 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size); 307 BUG_ON(ret != xpSuccess); 308 309 /* unregister irq handler and release mq irq/vector mapping */ 310 free_irq(mq->irq, NULL); 311 xpc_release_gru_mq_irq_uv(mq); 312 313 /* disable generation of irq when GRU mq op occurs to this mq */ 314 xpc_gru_mq_watchlist_free_uv(mq); 315 316 pg_order = mq->order - PAGE_SHIFT; 317 free_pages((unsigned long)mq->address, pg_order); 318 319 kfree(mq); 320 } 321 322 static enum xp_retval 323 xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg, 324 size_t msg_size) 325 { 326 enum xp_retval xp_ret; 327 int ret; 328 329 while (1) { 330 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size); 331 if (ret == MQE_OK) { 332 xp_ret = xpSuccess; 333 break; 334 } 335 336 if (ret == MQE_QUEUE_FULL) { 337 dev_dbg(xpc_chan, "gru_send_message_gpa() returned " 338 "error=MQE_QUEUE_FULL\n"); 339 /* !!! handle QLimit reached; delay & try again */ 340 /* ??? Do we add a limit to the number of retries? */ 341 (void)msleep_interruptible(10); 342 } else if (ret == MQE_CONGESTION) { 343 dev_dbg(xpc_chan, "gru_send_message_gpa() returned " 344 "error=MQE_CONGESTION\n"); 345 /* !!! handle LB Overflow; simply try again */ 346 /* ??? Do we add a limit to the number of retries? */ 347 } else { 348 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */ 349 dev_err(xpc_chan, "gru_send_message_gpa() returned " 350 "error=%d\n", ret); 351 xp_ret = xpGruSendMqError; 352 break; 353 } 354 } 355 return xp_ret; 356 } 357 358 static void 359 xpc_process_activate_IRQ_rcvd_uv(void) 360 { 361 unsigned long irq_flags; 362 short partid; 363 struct xpc_partition *part; 364 u8 act_state_req; 365 366 DBUG_ON(xpc_activate_IRQ_rcvd == 0); 367 368 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 369 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 370 part = &xpc_partitions[partid]; 371 372 if (part->sn.uv.act_state_req == 0) 373 continue; 374 375 xpc_activate_IRQ_rcvd--; 376 BUG_ON(xpc_activate_IRQ_rcvd < 0); 377 378 act_state_req = part->sn.uv.act_state_req; 379 part->sn.uv.act_state_req = 0; 380 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 381 382 if (act_state_req == XPC_P_ASR_ACTIVATE_UV) { 383 if (part->act_state == XPC_P_AS_INACTIVE) 384 xpc_activate_partition(part); 385 else if (part->act_state == XPC_P_AS_DEACTIVATING) 386 XPC_DEACTIVATE_PARTITION(part, xpReactivating); 387 388 } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) { 389 if (part->act_state == XPC_P_AS_INACTIVE) 390 xpc_activate_partition(part); 391 else 392 XPC_DEACTIVATE_PARTITION(part, xpReactivating); 393 394 } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) { 395 XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason); 396 397 } else { 398 BUG(); 399 } 400 401 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 402 if (xpc_activate_IRQ_rcvd == 0) 403 break; 404 } 405 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 406 407 } 408 409 static void 410 xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, 411 struct xpc_activate_mq_msghdr_uv *msg_hdr, 412 int *wakeup_hb_checker) 413 { 414 unsigned long irq_flags; 415 struct xpc_partition_uv *part_uv = &part->sn.uv; 416 struct xpc_openclose_args *args; 417 418 part_uv->remote_act_state = msg_hdr->act_state; 419 420 switch (msg_hdr->type) { 421 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: 422 /* syncing of remote_act_state was just done above */ 423 break; 424 425 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { 426 struct xpc_activate_mq_msg_activate_req_uv *msg; 427 428 /* 429 * ??? Do we deal here with ts_jiffies being different 430 * ??? if act_state != XPC_P_AS_INACTIVE instead of 431 * ??? below? 432 */ 433 msg = container_of(msg_hdr, struct 434 xpc_activate_mq_msg_activate_req_uv, hdr); 435 436 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 437 if (part_uv->act_state_req == 0) 438 xpc_activate_IRQ_rcvd++; 439 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; 440 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ 441 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; 442 part_uv->heartbeat_gpa = msg->heartbeat_gpa; 443 444 if (msg->activate_gru_mq_desc_gpa != 445 part_uv->activate_gru_mq_desc_gpa) { 446 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 447 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; 448 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 449 part_uv->activate_gru_mq_desc_gpa = 450 msg->activate_gru_mq_desc_gpa; 451 } 452 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 453 454 (*wakeup_hb_checker)++; 455 break; 456 } 457 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { 458 struct xpc_activate_mq_msg_deactivate_req_uv *msg; 459 460 msg = container_of(msg_hdr, struct 461 xpc_activate_mq_msg_deactivate_req_uv, hdr); 462 463 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 464 if (part_uv->act_state_req == 0) 465 xpc_activate_IRQ_rcvd++; 466 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; 467 part_uv->reason = msg->reason; 468 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 469 470 (*wakeup_hb_checker)++; 471 return; 472 } 473 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { 474 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; 475 476 msg = container_of(msg_hdr, struct 477 xpc_activate_mq_msg_chctl_closerequest_uv, 478 hdr); 479 args = &part->remote_openclose_args[msg->ch_number]; 480 args->reason = msg->reason; 481 482 spin_lock_irqsave(&part->chctl_lock, irq_flags); 483 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; 484 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 485 486 xpc_wakeup_channel_mgr(part); 487 break; 488 } 489 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { 490 struct xpc_activate_mq_msg_chctl_closereply_uv *msg; 491 492 msg = container_of(msg_hdr, struct 493 xpc_activate_mq_msg_chctl_closereply_uv, 494 hdr); 495 496 spin_lock_irqsave(&part->chctl_lock, irq_flags); 497 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; 498 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 499 500 xpc_wakeup_channel_mgr(part); 501 break; 502 } 503 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { 504 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; 505 506 msg = container_of(msg_hdr, struct 507 xpc_activate_mq_msg_chctl_openrequest_uv, 508 hdr); 509 args = &part->remote_openclose_args[msg->ch_number]; 510 args->entry_size = msg->entry_size; 511 args->local_nentries = msg->local_nentries; 512 513 spin_lock_irqsave(&part->chctl_lock, irq_flags); 514 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; 515 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 516 517 xpc_wakeup_channel_mgr(part); 518 break; 519 } 520 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { 521 struct xpc_activate_mq_msg_chctl_openreply_uv *msg; 522 523 msg = container_of(msg_hdr, struct 524 xpc_activate_mq_msg_chctl_openreply_uv, hdr); 525 args = &part->remote_openclose_args[msg->ch_number]; 526 args->remote_nentries = msg->remote_nentries; 527 args->local_nentries = msg->local_nentries; 528 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa; 529 530 spin_lock_irqsave(&part->chctl_lock, irq_flags); 531 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; 532 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 533 534 xpc_wakeup_channel_mgr(part); 535 break; 536 } 537 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { 538 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; 539 540 msg = container_of(msg_hdr, struct 541 xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); 542 spin_lock_irqsave(&part->chctl_lock, irq_flags); 543 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE; 544 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 545 546 xpc_wakeup_channel_mgr(part); 547 } 548 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: 549 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 550 part_uv->flags |= XPC_P_ENGAGED_UV; 551 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 552 break; 553 554 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: 555 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 556 part_uv->flags &= ~XPC_P_ENGAGED_UV; 557 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 558 break; 559 560 default: 561 dev_err(xpc_part, "received unknown activate_mq msg type=%d " 562 "from partition=%d\n", msg_hdr->type, XPC_PARTID(part)); 563 564 /* get hb checker to deactivate from the remote partition */ 565 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 566 if (part_uv->act_state_req == 0) 567 xpc_activate_IRQ_rcvd++; 568 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; 569 part_uv->reason = xpBadMsgType; 570 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 571 572 (*wakeup_hb_checker)++; 573 return; 574 } 575 576 if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && 577 part->remote_rp_ts_jiffies != 0) { 578 /* 579 * ??? Does what we do here need to be sensitive to 580 * ??? act_state or remote_act_state? 581 */ 582 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 583 if (part_uv->act_state_req == 0) 584 xpc_activate_IRQ_rcvd++; 585 part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; 586 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 587 588 (*wakeup_hb_checker)++; 589 } 590 } 591 592 static irqreturn_t 593 xpc_handle_activate_IRQ_uv(int irq, void *dev_id) 594 { 595 struct xpc_activate_mq_msghdr_uv *msg_hdr; 596 short partid; 597 struct xpc_partition *part; 598 int wakeup_hb_checker = 0; 599 int part_referenced; 600 601 while (1) { 602 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc); 603 if (msg_hdr == NULL) 604 break; 605 606 partid = msg_hdr->partid; 607 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { 608 dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() " 609 "received invalid partid=0x%x in message\n", 610 partid); 611 } else { 612 part = &xpc_partitions[partid]; 613 614 part_referenced = xpc_part_ref(part); 615 xpc_handle_activate_mq_msg_uv(part, msg_hdr, 616 &wakeup_hb_checker); 617 if (part_referenced) 618 xpc_part_deref(part); 619 } 620 621 gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr); 622 } 623 624 if (wakeup_hb_checker) 625 wake_up_interruptible(&xpc_activate_IRQ_wq); 626 627 return IRQ_HANDLED; 628 } 629 630 static enum xp_retval 631 xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc, 632 unsigned long gru_mq_desc_gpa) 633 { 634 enum xp_retval ret; 635 636 ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa, 637 sizeof(struct gru_message_queue_desc)); 638 if (ret == xpSuccess) 639 gru_mq_desc->mq = NULL; 640 641 return ret; 642 } 643 644 static enum xp_retval 645 xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, 646 int msg_type) 647 { 648 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; 649 struct xpc_partition_uv *part_uv = &part->sn.uv; 650 struct gru_message_queue_desc *gru_mq_desc; 651 unsigned long irq_flags; 652 enum xp_retval ret; 653 654 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); 655 656 msg_hdr->type = msg_type; 657 msg_hdr->partid = xp_partition_id; 658 msg_hdr->act_state = part->act_state; 659 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; 660 661 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); 662 again: 663 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) { 664 gru_mq_desc = part_uv->cached_activate_gru_mq_desc; 665 if (gru_mq_desc == NULL) { 666 gru_mq_desc = kmalloc(sizeof(struct 667 gru_message_queue_desc), 668 GFP_KERNEL); 669 if (gru_mq_desc == NULL) { 670 ret = xpNoMemory; 671 goto done; 672 } 673 part_uv->cached_activate_gru_mq_desc = gru_mq_desc; 674 } 675 676 ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc, 677 part_uv-> 678 activate_gru_mq_desc_gpa); 679 if (ret != xpSuccess) 680 goto done; 681 682 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 683 part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; 684 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 685 } 686 687 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ 688 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg, 689 msg_size); 690 if (ret != xpSuccess) { 691 smp_rmb(); /* ensure a fresh copy of part_uv->flags */ 692 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) 693 goto again; 694 } 695 done: 696 mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex); 697 return ret; 698 } 699 700 static void 701 xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg, 702 size_t msg_size, int msg_type) 703 { 704 enum xp_retval ret; 705 706 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); 707 if (unlikely(ret != xpSuccess)) 708 XPC_DEACTIVATE_PARTITION(part, ret); 709 } 710 711 static void 712 xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, 713 void *msg, size_t msg_size, int msg_type) 714 { 715 struct xpc_partition *part = &xpc_partitions[ch->partid]; 716 enum xp_retval ret; 717 718 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); 719 if (unlikely(ret != xpSuccess)) { 720 if (irq_flags != NULL) 721 spin_unlock_irqrestore(&ch->lock, *irq_flags); 722 723 XPC_DEACTIVATE_PARTITION(part, ret); 724 725 if (irq_flags != NULL) 726 spin_lock_irqsave(&ch->lock, *irq_flags); 727 } 728 } 729 730 static void 731 xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) 732 { 733 unsigned long irq_flags; 734 struct xpc_partition_uv *part_uv = &part->sn.uv; 735 736 /* 737 * !!! Make our side think that the remote partition sent an activate 738 * !!! mq message our way by doing what the activate IRQ handler would 739 * !!! do had one really been sent. 740 */ 741 742 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 743 if (part_uv->act_state_req == 0) 744 xpc_activate_IRQ_rcvd++; 745 part_uv->act_state_req = act_state_req; 746 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 747 748 wake_up_interruptible(&xpc_activate_IRQ_wq); 749 } 750 751 static enum xp_retval 752 xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, 753 size_t *len) 754 { 755 s64 status; 756 enum xp_retval ret; 757 758 #if defined CONFIG_X86_64 759 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa, 760 (u64 *)len); 761 if (status == BIOS_STATUS_SUCCESS) 762 ret = xpSuccess; 763 else if (status == BIOS_STATUS_MORE_PASSES) 764 ret = xpNeedMoreInfo; 765 else 766 ret = xpBiosError; 767 768 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 769 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); 770 if (status == SALRET_OK) 771 ret = xpSuccess; 772 else if (status == SALRET_MORE_PASSES) 773 ret = xpNeedMoreInfo; 774 else 775 ret = xpSalError; 776 777 #else 778 #error not a supported configuration 779 #endif 780 781 return ret; 782 } 783 784 static int 785 xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp) 786 { 787 xpc_heartbeat_uv = 788 &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat; 789 rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv); 790 rp->sn.uv.activate_gru_mq_desc_gpa = 791 uv_gpa(xpc_activate_mq_uv->gru_mq_desc); 792 return 0; 793 } 794 795 static void 796 xpc_allow_hb_uv(short partid) 797 { 798 } 799 800 static void 801 xpc_disallow_hb_uv(short partid) 802 { 803 } 804 805 static void 806 xpc_disallow_all_hbs_uv(void) 807 { 808 } 809 810 static void 811 xpc_increment_heartbeat_uv(void) 812 { 813 xpc_heartbeat_uv->value++; 814 } 815 816 static void 817 xpc_offline_heartbeat_uv(void) 818 { 819 xpc_increment_heartbeat_uv(); 820 xpc_heartbeat_uv->offline = 1; 821 } 822 823 static void 824 xpc_online_heartbeat_uv(void) 825 { 826 xpc_increment_heartbeat_uv(); 827 xpc_heartbeat_uv->offline = 0; 828 } 829 830 static void 831 xpc_heartbeat_init_uv(void) 832 { 833 xpc_heartbeat_uv->value = 1; 834 xpc_heartbeat_uv->offline = 0; 835 } 836 837 static void 838 xpc_heartbeat_exit_uv(void) 839 { 840 xpc_offline_heartbeat_uv(); 841 } 842 843 static enum xp_retval 844 xpc_get_remote_heartbeat_uv(struct xpc_partition *part) 845 { 846 struct xpc_partition_uv *part_uv = &part->sn.uv; 847 enum xp_retval ret; 848 849 ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat), 850 part_uv->heartbeat_gpa, 851 sizeof(struct xpc_heartbeat_uv)); 852 if (ret != xpSuccess) 853 return ret; 854 855 if (part_uv->cached_heartbeat.value == part->last_heartbeat && 856 !part_uv->cached_heartbeat.offline) { 857 858 ret = xpNoHeartbeat; 859 } else { 860 part->last_heartbeat = part_uv->cached_heartbeat.value; 861 } 862 return ret; 863 } 864 865 static void 866 xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, 867 unsigned long remote_rp_gpa, int nasid) 868 { 869 short partid = remote_rp->SAL_partid; 870 struct xpc_partition *part = &xpc_partitions[partid]; 871 struct xpc_activate_mq_msg_activate_req_uv msg; 872 873 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ 874 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; 875 part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa; 876 part->sn.uv.activate_gru_mq_desc_gpa = 877 remote_rp->sn.uv.activate_gru_mq_desc_gpa; 878 879 /* 880 * ??? Is it a good idea to make this conditional on what is 881 * ??? potentially stale state information? 882 */ 883 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { 884 msg.rp_gpa = uv_gpa(xpc_rsvd_page); 885 msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa; 886 msg.activate_gru_mq_desc_gpa = 887 xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa; 888 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 889 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); 890 } 891 892 if (part->act_state == XPC_P_AS_INACTIVE) 893 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); 894 } 895 896 static void 897 xpc_request_partition_reactivation_uv(struct xpc_partition *part) 898 { 899 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); 900 } 901 902 static void 903 xpc_request_partition_deactivation_uv(struct xpc_partition *part) 904 { 905 struct xpc_activate_mq_msg_deactivate_req_uv msg; 906 907 /* 908 * ??? Is it a good idea to make this conditional on what is 909 * ??? potentially stale state information? 910 */ 911 if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING && 912 part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) { 913 914 msg.reason = part->reason; 915 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 916 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV); 917 } 918 } 919 920 static void 921 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part) 922 { 923 /* nothing needs to be done */ 924 return; 925 } 926 927 static void 928 xpc_init_fifo_uv(struct xpc_fifo_head_uv *head) 929 { 930 head->first = NULL; 931 head->last = NULL; 932 spin_lock_init(&head->lock); 933 head->n_entries = 0; 934 } 935 936 static void * 937 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) 938 { 939 unsigned long irq_flags; 940 struct xpc_fifo_entry_uv *first; 941 942 spin_lock_irqsave(&head->lock, irq_flags); 943 first = head->first; 944 if (head->first != NULL) { 945 head->first = first->next; 946 if (head->first == NULL) 947 head->last = NULL; 948 } 949 head->n_entries--; 950 BUG_ON(head->n_entries < 0); 951 spin_unlock_irqrestore(&head->lock, irq_flags); 952 first->next = NULL; 953 return first; 954 } 955 956 static void 957 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head, 958 struct xpc_fifo_entry_uv *last) 959 { 960 unsigned long irq_flags; 961 962 last->next = NULL; 963 spin_lock_irqsave(&head->lock, irq_flags); 964 if (head->last != NULL) 965 head->last->next = last; 966 else 967 head->first = last; 968 head->last = last; 969 head->n_entries++; 970 spin_unlock_irqrestore(&head->lock, irq_flags); 971 } 972 973 static int 974 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head) 975 { 976 return head->n_entries; 977 } 978 979 /* 980 * Setup the channel structures that are uv specific. 981 */ 982 static enum xp_retval 983 xpc_setup_ch_structures_uv(struct xpc_partition *part) 984 { 985 struct xpc_channel_uv *ch_uv; 986 int ch_number; 987 988 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 989 ch_uv = &part->channels[ch_number].sn.uv; 990 991 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 992 xpc_init_fifo_uv(&ch_uv->recv_msg_list); 993 } 994 995 return xpSuccess; 996 } 997 998 /* 999 * Teardown the channel structures that are uv specific. 1000 */ 1001 static void 1002 xpc_teardown_ch_structures_uv(struct xpc_partition *part) 1003 { 1004 /* nothing needs to be done */ 1005 return; 1006 } 1007 1008 static enum xp_retval 1009 xpc_make_first_contact_uv(struct xpc_partition *part) 1010 { 1011 struct xpc_activate_mq_msg_uv msg; 1012 1013 /* 1014 * We send a sync msg to get the remote partition's remote_act_state 1015 * updated to our current act_state which at this point should 1016 * be XPC_P_AS_ACTIVATING. 1017 */ 1018 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1019 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); 1020 1021 while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) { 1022 1023 dev_dbg(xpc_part, "waiting to make first contact with " 1024 "partition %d\n", XPC_PARTID(part)); 1025 1026 /* wait a 1/4 of a second or so */ 1027 (void)msleep_interruptible(250); 1028 1029 if (part->act_state == XPC_P_AS_DEACTIVATING) 1030 return part->reason; 1031 } 1032 1033 return xpSuccess; 1034 } 1035 1036 static u64 1037 xpc_get_chctl_all_flags_uv(struct xpc_partition *part) 1038 { 1039 unsigned long irq_flags; 1040 union xpc_channel_ctl_flags chctl; 1041 1042 spin_lock_irqsave(&part->chctl_lock, irq_flags); 1043 chctl = part->chctl; 1044 if (chctl.all_flags != 0) 1045 part->chctl.all_flags = 0; 1046 1047 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 1048 return chctl.all_flags; 1049 } 1050 1051 static enum xp_retval 1052 xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch) 1053 { 1054 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1055 struct xpc_send_msg_slot_uv *msg_slot; 1056 unsigned long irq_flags; 1057 int nentries; 1058 int entry; 1059 size_t nbytes; 1060 1061 for (nentries = ch->local_nentries; nentries > 0; nentries--) { 1062 nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv); 1063 ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL); 1064 if (ch_uv->send_msg_slots == NULL) 1065 continue; 1066 1067 for (entry = 0; entry < nentries; entry++) { 1068 msg_slot = &ch_uv->send_msg_slots[entry]; 1069 1070 msg_slot->msg_slot_number = entry; 1071 xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list, 1072 &msg_slot->next); 1073 } 1074 1075 spin_lock_irqsave(&ch->lock, irq_flags); 1076 if (nentries < ch->local_nentries) 1077 ch->local_nentries = nentries; 1078 spin_unlock_irqrestore(&ch->lock, irq_flags); 1079 return xpSuccess; 1080 } 1081 1082 return xpNoMemory; 1083 } 1084 1085 static enum xp_retval 1086 xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch) 1087 { 1088 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1089 struct xpc_notify_mq_msg_uv *msg_slot; 1090 unsigned long irq_flags; 1091 int nentries; 1092 int entry; 1093 size_t nbytes; 1094 1095 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { 1096 nbytes = nentries * ch->entry_size; 1097 ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL); 1098 if (ch_uv->recv_msg_slots == NULL) 1099 continue; 1100 1101 for (entry = 0; entry < nentries; entry++) { 1102 msg_slot = ch_uv->recv_msg_slots + 1103 entry * ch->entry_size; 1104 1105 msg_slot->hdr.msg_slot_number = entry; 1106 } 1107 1108 spin_lock_irqsave(&ch->lock, irq_flags); 1109 if (nentries < ch->remote_nentries) 1110 ch->remote_nentries = nentries; 1111 spin_unlock_irqrestore(&ch->lock, irq_flags); 1112 return xpSuccess; 1113 } 1114 1115 return xpNoMemory; 1116 } 1117 1118 /* 1119 * Allocate msg_slots associated with the channel. 1120 */ 1121 static enum xp_retval 1122 xpc_setup_msg_structures_uv(struct xpc_channel *ch) 1123 { 1124 static enum xp_retval ret; 1125 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1126 1127 DBUG_ON(ch->flags & XPC_C_SETUP); 1128 1129 ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct 1130 gru_message_queue_desc), 1131 GFP_KERNEL); 1132 if (ch_uv->cached_notify_gru_mq_desc == NULL) 1133 return xpNoMemory; 1134 1135 ret = xpc_allocate_send_msg_slot_uv(ch); 1136 if (ret == xpSuccess) { 1137 1138 ret = xpc_allocate_recv_msg_slot_uv(ch); 1139 if (ret != xpSuccess) { 1140 kfree(ch_uv->send_msg_slots); 1141 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 1142 } 1143 } 1144 return ret; 1145 } 1146 1147 /* 1148 * Free up msg_slots and clear other stuff that were setup for the specified 1149 * channel. 1150 */ 1151 static void 1152 xpc_teardown_msg_structures_uv(struct xpc_channel *ch) 1153 { 1154 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1155 1156 DBUG_ON(!spin_is_locked(&ch->lock)); 1157 1158 kfree(ch_uv->cached_notify_gru_mq_desc); 1159 ch_uv->cached_notify_gru_mq_desc = NULL; 1160 1161 if (ch->flags & XPC_C_SETUP) { 1162 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 1163 kfree(ch_uv->send_msg_slots); 1164 xpc_init_fifo_uv(&ch_uv->recv_msg_list); 1165 kfree(ch_uv->recv_msg_slots); 1166 } 1167 } 1168 1169 static void 1170 xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1171 { 1172 struct xpc_activate_mq_msg_chctl_closerequest_uv msg; 1173 1174 msg.ch_number = ch->number; 1175 msg.reason = ch->reason; 1176 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1177 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV); 1178 } 1179 1180 static void 1181 xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1182 { 1183 struct xpc_activate_mq_msg_chctl_closereply_uv msg; 1184 1185 msg.ch_number = ch->number; 1186 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1187 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV); 1188 } 1189 1190 static void 1191 xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1192 { 1193 struct xpc_activate_mq_msg_chctl_openrequest_uv msg; 1194 1195 msg.ch_number = ch->number; 1196 msg.entry_size = ch->entry_size; 1197 msg.local_nentries = ch->local_nentries; 1198 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1199 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); 1200 } 1201 1202 static void 1203 xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1204 { 1205 struct xpc_activate_mq_msg_chctl_openreply_uv msg; 1206 1207 msg.ch_number = ch->number; 1208 msg.local_nentries = ch->local_nentries; 1209 msg.remote_nentries = ch->remote_nentries; 1210 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc); 1211 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1212 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); 1213 } 1214 1215 static void 1216 xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags) 1217 { 1218 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg; 1219 1220 msg.ch_number = ch->number; 1221 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1222 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV); 1223 } 1224 1225 static void 1226 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) 1227 { 1228 unsigned long irq_flags; 1229 1230 spin_lock_irqsave(&part->chctl_lock, irq_flags); 1231 part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST; 1232 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 1233 1234 xpc_wakeup_channel_mgr(part); 1235 } 1236 1237 static enum xp_retval 1238 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, 1239 unsigned long gru_mq_desc_gpa) 1240 { 1241 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1242 1243 DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL); 1244 return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc, 1245 gru_mq_desc_gpa); 1246 } 1247 1248 static void 1249 xpc_indicate_partition_engaged_uv(struct xpc_partition *part) 1250 { 1251 struct xpc_activate_mq_msg_uv msg; 1252 1253 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1254 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV); 1255 } 1256 1257 static void 1258 xpc_indicate_partition_disengaged_uv(struct xpc_partition *part) 1259 { 1260 struct xpc_activate_mq_msg_uv msg; 1261 1262 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1263 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV); 1264 } 1265 1266 static void 1267 xpc_assume_partition_disengaged_uv(short partid) 1268 { 1269 struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv; 1270 unsigned long irq_flags; 1271 1272 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 1273 part_uv->flags &= ~XPC_P_ENGAGED_UV; 1274 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); 1275 } 1276 1277 static int 1278 xpc_partition_engaged_uv(short partid) 1279 { 1280 return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0; 1281 } 1282 1283 static int 1284 xpc_any_partition_engaged_uv(void) 1285 { 1286 struct xpc_partition_uv *part_uv; 1287 short partid; 1288 1289 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 1290 part_uv = &xpc_partitions[partid].sn.uv; 1291 if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0) 1292 return 1; 1293 } 1294 return 0; 1295 } 1296 1297 static enum xp_retval 1298 xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags, 1299 struct xpc_send_msg_slot_uv **address_of_msg_slot) 1300 { 1301 enum xp_retval ret; 1302 struct xpc_send_msg_slot_uv *msg_slot; 1303 struct xpc_fifo_entry_uv *entry; 1304 1305 while (1) { 1306 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list); 1307 if (entry != NULL) 1308 break; 1309 1310 if (flags & XPC_NOWAIT) 1311 return xpNoWait; 1312 1313 ret = xpc_allocate_msg_wait(ch); 1314 if (ret != xpInterrupted && ret != xpTimeout) 1315 return ret; 1316 } 1317 1318 msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next); 1319 *address_of_msg_slot = msg_slot; 1320 return xpSuccess; 1321 } 1322 1323 static void 1324 xpc_free_msg_slot_uv(struct xpc_channel *ch, 1325 struct xpc_send_msg_slot_uv *msg_slot) 1326 { 1327 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next); 1328 1329 /* wakeup anyone waiting for a free msg slot */ 1330 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) 1331 wake_up(&ch->msg_allocate_wq); 1332 } 1333 1334 static void 1335 xpc_notify_sender_uv(struct xpc_channel *ch, 1336 struct xpc_send_msg_slot_uv *msg_slot, 1337 enum xp_retval reason) 1338 { 1339 xpc_notify_func func = msg_slot->func; 1340 1341 if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) { 1342 1343 atomic_dec(&ch->n_to_notify); 1344 1345 dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p " 1346 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, 1347 msg_slot->msg_slot_number, ch->partid, ch->number); 1348 1349 func(reason, ch->partid, ch->number, msg_slot->key); 1350 1351 dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p " 1352 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, 1353 msg_slot->msg_slot_number, ch->partid, ch->number); 1354 } 1355 } 1356 1357 static void 1358 xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch, 1359 struct xpc_notify_mq_msg_uv *msg) 1360 { 1361 struct xpc_send_msg_slot_uv *msg_slot; 1362 int entry = msg->hdr.msg_slot_number % ch->local_nentries; 1363 1364 msg_slot = &ch->sn.uv.send_msg_slots[entry]; 1365 1366 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number); 1367 msg_slot->msg_slot_number += ch->local_nentries; 1368 1369 if (msg_slot->func != NULL) 1370 xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered); 1371 1372 xpc_free_msg_slot_uv(ch, msg_slot); 1373 } 1374 1375 static void 1376 xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, 1377 struct xpc_notify_mq_msg_uv *msg) 1378 { 1379 struct xpc_partition_uv *part_uv = &part->sn.uv; 1380 struct xpc_channel *ch; 1381 struct xpc_channel_uv *ch_uv; 1382 struct xpc_notify_mq_msg_uv *msg_slot; 1383 unsigned long irq_flags; 1384 int ch_number = msg->hdr.ch_number; 1385 1386 if (unlikely(ch_number >= part->nchannels)) { 1387 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid " 1388 "channel number=0x%x in message from partid=%d\n", 1389 ch_number, XPC_PARTID(part)); 1390 1391 /* get hb checker to deactivate from the remote partition */ 1392 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 1393 if (part_uv->act_state_req == 0) 1394 xpc_activate_IRQ_rcvd++; 1395 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; 1396 part_uv->reason = xpBadChannelNumber; 1397 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 1398 1399 wake_up_interruptible(&xpc_activate_IRQ_wq); 1400 return; 1401 } 1402 1403 ch = &part->channels[ch_number]; 1404 xpc_msgqueue_ref(ch); 1405 1406 if (!(ch->flags & XPC_C_CONNECTED)) { 1407 xpc_msgqueue_deref(ch); 1408 return; 1409 } 1410 1411 /* see if we're really dealing with an ACK for a previously sent msg */ 1412 if (msg->hdr.size == 0) { 1413 xpc_handle_notify_mq_ack_uv(ch, msg); 1414 xpc_msgqueue_deref(ch); 1415 return; 1416 } 1417 1418 /* we're dealing with a normal message sent via the notify_mq */ 1419 ch_uv = &ch->sn.uv; 1420 1421 msg_slot = ch_uv->recv_msg_slots + 1422 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; 1423 1424 BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number); 1425 BUG_ON(msg_slot->hdr.size != 0); 1426 1427 memcpy(msg_slot, msg, msg->hdr.size); 1428 1429 xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next); 1430 1431 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { 1432 /* 1433 * If there is an existing idle kthread get it to deliver 1434 * the payload, otherwise we'll have to get the channel mgr 1435 * for this partition to create a kthread to do the delivery. 1436 */ 1437 if (atomic_read(&ch->kthreads_idle) > 0) 1438 wake_up_nr(&ch->idle_wq, 1); 1439 else 1440 xpc_send_chctl_local_msgrequest_uv(part, ch->number); 1441 } 1442 xpc_msgqueue_deref(ch); 1443 } 1444 1445 static irqreturn_t 1446 xpc_handle_notify_IRQ_uv(int irq, void *dev_id) 1447 { 1448 struct xpc_notify_mq_msg_uv *msg; 1449 short partid; 1450 struct xpc_partition *part; 1451 1452 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) != 1453 NULL) { 1454 1455 partid = msg->hdr.partid; 1456 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { 1457 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received " 1458 "invalid partid=0x%x in message\n", partid); 1459 } else { 1460 part = &xpc_partitions[partid]; 1461 1462 if (xpc_part_ref(part)) { 1463 xpc_handle_notify_mq_msg_uv(part, msg); 1464 xpc_part_deref(part); 1465 } 1466 } 1467 1468 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg); 1469 } 1470 1471 return IRQ_HANDLED; 1472 } 1473 1474 static int 1475 xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch) 1476 { 1477 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list); 1478 } 1479 1480 static void 1481 xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number) 1482 { 1483 struct xpc_channel *ch = &part->channels[ch_number]; 1484 int ndeliverable_payloads; 1485 1486 xpc_msgqueue_ref(ch); 1487 1488 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch); 1489 1490 if (ndeliverable_payloads > 0 && 1491 (ch->flags & XPC_C_CONNECTED) && 1492 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) { 1493 1494 xpc_activate_kthreads(ch, ndeliverable_payloads); 1495 } 1496 1497 xpc_msgqueue_deref(ch); 1498 } 1499 1500 static enum xp_retval 1501 xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, 1502 u16 payload_size, u8 notify_type, xpc_notify_func func, 1503 void *key) 1504 { 1505 enum xp_retval ret = xpSuccess; 1506 struct xpc_send_msg_slot_uv *msg_slot = NULL; 1507 struct xpc_notify_mq_msg_uv *msg; 1508 u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV]; 1509 size_t msg_size; 1510 1511 DBUG_ON(notify_type != XPC_N_CALL); 1512 1513 msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size; 1514 if (msg_size > ch->entry_size) 1515 return xpPayloadTooBig; 1516 1517 xpc_msgqueue_ref(ch); 1518 1519 if (ch->flags & XPC_C_DISCONNECTING) { 1520 ret = ch->reason; 1521 goto out_1; 1522 } 1523 if (!(ch->flags & XPC_C_CONNECTED)) { 1524 ret = xpNotConnected; 1525 goto out_1; 1526 } 1527 1528 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot); 1529 if (ret != xpSuccess) 1530 goto out_1; 1531 1532 if (func != NULL) { 1533 atomic_inc(&ch->n_to_notify); 1534 1535 msg_slot->key = key; 1536 smp_wmb(); /* a non-NULL func must hit memory after the key */ 1537 msg_slot->func = func; 1538 1539 if (ch->flags & XPC_C_DISCONNECTING) { 1540 ret = ch->reason; 1541 goto out_2; 1542 } 1543 } 1544 1545 msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer; 1546 msg->hdr.partid = xp_partition_id; 1547 msg->hdr.ch_number = ch->number; 1548 msg->hdr.size = msg_size; 1549 msg->hdr.msg_slot_number = msg_slot->msg_slot_number; 1550 memcpy(&msg->payload, payload, payload_size); 1551 1552 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, 1553 msg_size); 1554 if (ret == xpSuccess) 1555 goto out_1; 1556 1557 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); 1558 out_2: 1559 if (func != NULL) { 1560 /* 1561 * Try to NULL the msg_slot's func field. If we fail, then 1562 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which 1563 * case we need to pretend we succeeded to send the message 1564 * since the user will get a callout for the disconnect error 1565 * by xpc_notify_senders_of_disconnect_uv(), and to also get an 1566 * error returned here will confuse them. Additionally, since 1567 * in this case the channel is being disconnected we don't need 1568 * to put the the msg_slot back on the free list. 1569 */ 1570 if (cmpxchg(&msg_slot->func, func, NULL) != func) { 1571 ret = xpSuccess; 1572 goto out_1; 1573 } 1574 1575 msg_slot->key = NULL; 1576 atomic_dec(&ch->n_to_notify); 1577 } 1578 xpc_free_msg_slot_uv(ch, msg_slot); 1579 out_1: 1580 xpc_msgqueue_deref(ch); 1581 return ret; 1582 } 1583 1584 /* 1585 * Tell the callers of xpc_send_notify() that the status of their payloads 1586 * is unknown because the channel is now disconnecting. 1587 * 1588 * We don't worry about putting these msg_slots on the free list since the 1589 * msg_slots themselves are about to be kfree'd. 1590 */ 1591 static void 1592 xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch) 1593 { 1594 struct xpc_send_msg_slot_uv *msg_slot; 1595 int entry; 1596 1597 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); 1598 1599 for (entry = 0; entry < ch->local_nentries; entry++) { 1600 1601 if (atomic_read(&ch->n_to_notify) == 0) 1602 break; 1603 1604 msg_slot = &ch->sn.uv.send_msg_slots[entry]; 1605 if (msg_slot->func != NULL) 1606 xpc_notify_sender_uv(ch, msg_slot, ch->reason); 1607 } 1608 } 1609 1610 /* 1611 * Get the next deliverable message's payload. 1612 */ 1613 static void * 1614 xpc_get_deliverable_payload_uv(struct xpc_channel *ch) 1615 { 1616 struct xpc_fifo_entry_uv *entry; 1617 struct xpc_notify_mq_msg_uv *msg; 1618 void *payload = NULL; 1619 1620 if (!(ch->flags & XPC_C_DISCONNECTING)) { 1621 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list); 1622 if (entry != NULL) { 1623 msg = container_of(entry, struct xpc_notify_mq_msg_uv, 1624 hdr.u.next); 1625 payload = &msg->payload; 1626 } 1627 } 1628 return payload; 1629 } 1630 1631 static void 1632 xpc_received_payload_uv(struct xpc_channel *ch, void *payload) 1633 { 1634 struct xpc_notify_mq_msg_uv *msg; 1635 enum xp_retval ret; 1636 1637 msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload); 1638 1639 /* return an ACK to the sender of this message */ 1640 1641 msg->hdr.partid = xp_partition_id; 1642 msg->hdr.size = 0; /* size of zero indicates this is an ACK */ 1643 1644 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, 1645 sizeof(struct xpc_notify_mq_msghdr_uv)); 1646 if (ret != xpSuccess) 1647 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); 1648 1649 msg->hdr.msg_slot_number += ch->remote_nentries; 1650 } 1651 1652 static struct xpc_arch_operations xpc_arch_ops_uv = { 1653 .setup_partitions = xpc_setup_partitions_uv, 1654 .teardown_partitions = xpc_teardown_partitions_uv, 1655 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv, 1656 .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv, 1657 .setup_rsvd_page = xpc_setup_rsvd_page_uv, 1658 1659 .allow_hb = xpc_allow_hb_uv, 1660 .disallow_hb = xpc_disallow_hb_uv, 1661 .disallow_all_hbs = xpc_disallow_all_hbs_uv, 1662 .increment_heartbeat = xpc_increment_heartbeat_uv, 1663 .offline_heartbeat = xpc_offline_heartbeat_uv, 1664 .online_heartbeat = xpc_online_heartbeat_uv, 1665 .heartbeat_init = xpc_heartbeat_init_uv, 1666 .heartbeat_exit = xpc_heartbeat_exit_uv, 1667 .get_remote_heartbeat = xpc_get_remote_heartbeat_uv, 1668 1669 .request_partition_activation = 1670 xpc_request_partition_activation_uv, 1671 .request_partition_reactivation = 1672 xpc_request_partition_reactivation_uv, 1673 .request_partition_deactivation = 1674 xpc_request_partition_deactivation_uv, 1675 .cancel_partition_deactivation_request = 1676 xpc_cancel_partition_deactivation_request_uv, 1677 1678 .setup_ch_structures = xpc_setup_ch_structures_uv, 1679 .teardown_ch_structures = xpc_teardown_ch_structures_uv, 1680 1681 .make_first_contact = xpc_make_first_contact_uv, 1682 1683 .get_chctl_all_flags = xpc_get_chctl_all_flags_uv, 1684 .send_chctl_closerequest = xpc_send_chctl_closerequest_uv, 1685 .send_chctl_closereply = xpc_send_chctl_closereply_uv, 1686 .send_chctl_openrequest = xpc_send_chctl_openrequest_uv, 1687 .send_chctl_openreply = xpc_send_chctl_openreply_uv, 1688 .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv, 1689 .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv, 1690 1691 .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv, 1692 1693 .setup_msg_structures = xpc_setup_msg_structures_uv, 1694 .teardown_msg_structures = xpc_teardown_msg_structures_uv, 1695 1696 .indicate_partition_engaged = xpc_indicate_partition_engaged_uv, 1697 .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv, 1698 .assume_partition_disengaged = xpc_assume_partition_disengaged_uv, 1699 .partition_engaged = xpc_partition_engaged_uv, 1700 .any_partition_engaged = xpc_any_partition_engaged_uv, 1701 1702 .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv, 1703 .send_payload = xpc_send_payload_uv, 1704 .get_deliverable_payload = xpc_get_deliverable_payload_uv, 1705 .received_payload = xpc_received_payload_uv, 1706 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, 1707 }; 1708 1709 int 1710 xpc_init_uv(void) 1711 { 1712 xpc_arch_ops = xpc_arch_ops_uv; 1713 1714 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { 1715 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", 1716 XPC_MSG_HDR_MAX_SIZE); 1717 return -E2BIG; 1718 } 1719 1720 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 1721 XPC_ACTIVATE_IRQ_NAME, 1722 xpc_handle_activate_IRQ_uv); 1723 if (IS_ERR(xpc_activate_mq_uv)) 1724 return PTR_ERR(xpc_activate_mq_uv); 1725 1726 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 1727 XPC_NOTIFY_IRQ_NAME, 1728 xpc_handle_notify_IRQ_uv); 1729 if (IS_ERR(xpc_notify_mq_uv)) { 1730 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1731 return PTR_ERR(xpc_notify_mq_uv); 1732 } 1733 1734 return 0; 1735 } 1736 1737 void 1738 xpc_exit_uv(void) 1739 { 1740 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); 1741 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1742 } 1743