1 /* 2 * Copyright 2016-17 IBM Corp. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #define pr_fmt(fmt) "vas: " fmt 11 12 #include <linux/types.h> 13 #include <linux/mutex.h> 14 #include <linux/slab.h> 15 #include <linux/io.h> 16 #include <linux/log2.h> 17 #include <linux/rcupdate.h> 18 #include <linux/cred.h> 19 #include <asm/switch_to.h> 20 #include <asm/ppc-opcode.h> 21 #include "vas.h" 22 #include "copy-paste.h" 23 24 #define CREATE_TRACE_POINTS 25 #include "vas-trace.h" 26 27 /* 28 * Compute the paste address region for the window @window using the 29 * ->paste_base_addr and ->paste_win_id_shift we got from device tree. 30 */ 31 static void compute_paste_address(struct vas_window *window, u64 *addr, int *len) 32 { 33 int winid; 34 u64 base, shift; 35 36 base = window->vinst->paste_base_addr; 37 shift = window->vinst->paste_win_id_shift; 38 winid = window->winid; 39 40 *addr = base + (winid << shift); 41 if (len) 42 *len = PAGE_SIZE; 43 44 pr_debug("Txwin #%d: Paste addr 0x%llx\n", winid, *addr); 45 } 46 47 u64 vas_win_paste_addr(struct vas_window *win) 48 { 49 u64 addr; 50 51 compute_paste_address(win, &addr, NULL); 52 53 return addr; 54 } 55 EXPORT_SYMBOL(vas_win_paste_addr); 56 57 static inline void get_hvwc_mmio_bar(struct vas_window *window, 58 u64 *start, int *len) 59 { 60 u64 pbaddr; 61 62 pbaddr = window->vinst->hvwc_bar_start; 63 *start = pbaddr + window->winid * VAS_HVWC_SIZE; 64 *len = VAS_HVWC_SIZE; 65 } 66 67 static inline void get_uwc_mmio_bar(struct vas_window *window, 68 u64 *start, int *len) 69 { 70 u64 pbaddr; 71 72 pbaddr = window->vinst->uwc_bar_start; 73 *start = pbaddr + window->winid * VAS_UWC_SIZE; 74 *len = VAS_UWC_SIZE; 75 } 76 77 /* 78 * Map the paste bus address of the given send window into kernel address 79 * space. Unlike MMIO regions (map_mmio_region() below), paste region must 80 * be mapped cache-able and is only applicable to send windows. 81 */ 82 static void *map_paste_region(struct vas_window *txwin) 83 { 84 int len; 85 void *map; 86 char *name; 87 u64 start; 88 89 name = kasprintf(GFP_KERNEL, "window-v%d-w%d", txwin->vinst->vas_id, 90 txwin->winid); 91 if (!name) 92 goto free_name; 93 94 txwin->paste_addr_name = name; 95 compute_paste_address(txwin, &start, &len); 96 97 if (!request_mem_region(start, len, name)) { 98 pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n", 99 __func__, start, len); 100 goto free_name; 101 } 102 103 map = ioremap_cache(start, len); 104 if (!map) { 105 pr_devel("%s(): ioremap_cache(0x%llx, %d) failed\n", __func__, 106 start, len); 107 goto free_name; 108 } 109 110 pr_devel("Mapped paste addr 0x%llx to kaddr 0x%p\n", start, map); 111 return map; 112 113 free_name: 114 kfree(name); 115 return ERR_PTR(-ENOMEM); 116 } 117 118 static void *map_mmio_region(char *name, u64 start, int len) 119 { 120 void *map; 121 122 if (!request_mem_region(start, len, name)) { 123 pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n", 124 __func__, start, len); 125 return NULL; 126 } 127 128 map = ioremap(start, len); 129 if (!map) { 130 pr_devel("%s(): ioremap(0x%llx, %d) failed\n", __func__, start, 131 len); 132 return NULL; 133 } 134 135 return map; 136 } 137 138 static void unmap_region(void *addr, u64 start, int len) 139 { 140 iounmap(addr); 141 release_mem_region((phys_addr_t)start, len); 142 } 143 144 /* 145 * Unmap the paste address region for a window. 146 */ 147 static void unmap_paste_region(struct vas_window *window) 148 { 149 int len; 150 u64 busaddr_start; 151 152 if (window->paste_kaddr) { 153 compute_paste_address(window, &busaddr_start, &len); 154 unmap_region(window->paste_kaddr, busaddr_start, len); 155 window->paste_kaddr = NULL; 156 kfree(window->paste_addr_name); 157 window->paste_addr_name = NULL; 158 } 159 } 160 161 /* 162 * Unmap the MMIO regions for a window. Hold the vas_mutex so we don't 163 * unmap when the window's debugfs dir is in use. This serializes close 164 * of a window even on another VAS instance but since its not a critical 165 * path, just minimize the time we hold the mutex for now. We can add 166 * a per-instance mutex later if necessary. 167 */ 168 static void unmap_winctx_mmio_bars(struct vas_window *window) 169 { 170 int len; 171 void *uwc_map; 172 void *hvwc_map; 173 u64 busaddr_start; 174 175 mutex_lock(&vas_mutex); 176 177 hvwc_map = window->hvwc_map; 178 window->hvwc_map = NULL; 179 180 uwc_map = window->uwc_map; 181 window->uwc_map = NULL; 182 183 mutex_unlock(&vas_mutex); 184 185 if (hvwc_map) { 186 get_hvwc_mmio_bar(window, &busaddr_start, &len); 187 unmap_region(hvwc_map, busaddr_start, len); 188 } 189 190 if (uwc_map) { 191 get_uwc_mmio_bar(window, &busaddr_start, &len); 192 unmap_region(uwc_map, busaddr_start, len); 193 } 194 } 195 196 /* 197 * Find the Hypervisor Window Context (HVWC) MMIO Base Address Region and the 198 * OS/User Window Context (UWC) MMIO Base Address Region for the given window. 199 * Map these bus addresses and save the mapped kernel addresses in @window. 200 */ 201 int map_winctx_mmio_bars(struct vas_window *window) 202 { 203 int len; 204 u64 start; 205 206 get_hvwc_mmio_bar(window, &start, &len); 207 window->hvwc_map = map_mmio_region("HVWCM_Window", start, len); 208 209 get_uwc_mmio_bar(window, &start, &len); 210 window->uwc_map = map_mmio_region("UWCM_Window", start, len); 211 212 if (!window->hvwc_map || !window->uwc_map) { 213 unmap_winctx_mmio_bars(window); 214 return -1; 215 } 216 217 return 0; 218 } 219 220 /* 221 * Reset all valid registers in the HV and OS/User Window Contexts for 222 * the window identified by @window. 223 * 224 * NOTE: We cannot really use a for loop to reset window context. Not all 225 * offsets in a window context are valid registers and the valid 226 * registers are not sequential. And, we can only write to offsets 227 * with valid registers. 228 */ 229 void reset_window_regs(struct vas_window *window) 230 { 231 write_hvwc_reg(window, VREG(LPID), 0ULL); 232 write_hvwc_reg(window, VREG(PID), 0ULL); 233 write_hvwc_reg(window, VREG(XLATE_MSR), 0ULL); 234 write_hvwc_reg(window, VREG(XLATE_LPCR), 0ULL); 235 write_hvwc_reg(window, VREG(XLATE_CTL), 0ULL); 236 write_hvwc_reg(window, VREG(AMR), 0ULL); 237 write_hvwc_reg(window, VREG(SEIDR), 0ULL); 238 write_hvwc_reg(window, VREG(FAULT_TX_WIN), 0ULL); 239 write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL); 240 write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), 0ULL); 241 write_hvwc_reg(window, VREG(PSWID), 0ULL); 242 write_hvwc_reg(window, VREG(LFIFO_BAR), 0ULL); 243 write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), 0ULL); 244 write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), 0ULL); 245 write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL); 246 write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL); 247 write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL); 248 write_hvwc_reg(window, VREG(LRX_WCRED), 0ULL); 249 write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL); 250 write_hvwc_reg(window, VREG(TX_WCRED), 0ULL); 251 write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL); 252 write_hvwc_reg(window, VREG(LFIFO_SIZE), 0ULL); 253 write_hvwc_reg(window, VREG(WINCTL), 0ULL); 254 write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL); 255 write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), 0ULL); 256 write_hvwc_reg(window, VREG(TX_RSVD_BUF_COUNT), 0ULL); 257 write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), 0ULL); 258 write_hvwc_reg(window, VREG(LNOTIFY_CTL), 0ULL); 259 write_hvwc_reg(window, VREG(LNOTIFY_PID), 0ULL); 260 write_hvwc_reg(window, VREG(LNOTIFY_LPID), 0ULL); 261 write_hvwc_reg(window, VREG(LNOTIFY_TID), 0ULL); 262 write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), 0ULL); 263 write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL); 264 265 /* Skip read-only registers: NX_UTIL and NX_UTIL_SE */ 266 267 /* 268 * The send and receive window credit adder registers are also 269 * accessible from HVWC and have been initialized above. We don't 270 * need to initialize from the OS/User Window Context, so skip 271 * following calls: 272 * 273 * write_uwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL); 274 * write_uwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL); 275 */ 276 } 277 278 /* 279 * Initialize window context registers related to Address Translation. 280 * These registers are common to send/receive windows although they 281 * differ for user/kernel windows. As we resolve the TODOs we may 282 * want to add fields to vas_winctx and move the initialization to 283 * init_vas_winctx_regs(). 284 */ 285 static void init_xlate_regs(struct vas_window *window, bool user_win) 286 { 287 u64 lpcr, val; 288 289 /* 290 * MSR_TA, MSR_US are false for both kernel and user. 291 * MSR_DR and MSR_PR are false for kernel. 292 */ 293 val = 0ULL; 294 val = SET_FIELD(VAS_XLATE_MSR_HV, val, 1); 295 val = SET_FIELD(VAS_XLATE_MSR_SF, val, 1); 296 if (user_win) { 297 val = SET_FIELD(VAS_XLATE_MSR_DR, val, 1); 298 val = SET_FIELD(VAS_XLATE_MSR_PR, val, 1); 299 } 300 write_hvwc_reg(window, VREG(XLATE_MSR), val); 301 302 lpcr = mfspr(SPRN_LPCR); 303 val = 0ULL; 304 /* 305 * NOTE: From Section 5.7.8.1 Segment Lookaside Buffer of the 306 * Power ISA, v3.0B, Page size encoding is 0 = 4KB, 5 = 64KB. 307 * 308 * NOTE: From Section 1.3.1, Address Translation Context of the 309 * Nest MMU Workbook, LPCR_SC should be 0 for Power9. 310 */ 311 val = SET_FIELD(VAS_XLATE_LPCR_PAGE_SIZE, val, 5); 312 val = SET_FIELD(VAS_XLATE_LPCR_ISL, val, lpcr & LPCR_ISL); 313 val = SET_FIELD(VAS_XLATE_LPCR_TC, val, lpcr & LPCR_TC); 314 val = SET_FIELD(VAS_XLATE_LPCR_SC, val, 0); 315 write_hvwc_reg(window, VREG(XLATE_LPCR), val); 316 317 /* 318 * Section 1.3.1 (Address translation Context) of NMMU workbook. 319 * 0b00 Hashed Page Table mode 320 * 0b01 Reserved 321 * 0b10 Radix on HPT 322 * 0b11 Radix on Radix 323 */ 324 val = 0ULL; 325 val = SET_FIELD(VAS_XLATE_MODE, val, radix_enabled() ? 3 : 2); 326 write_hvwc_reg(window, VREG(XLATE_CTL), val); 327 328 /* 329 * TODO: Can we mfspr(AMR) even for user windows? 330 */ 331 val = 0ULL; 332 val = SET_FIELD(VAS_AMR, val, mfspr(SPRN_AMR)); 333 write_hvwc_reg(window, VREG(AMR), val); 334 335 val = 0ULL; 336 val = SET_FIELD(VAS_SEIDR, val, 0); 337 write_hvwc_reg(window, VREG(SEIDR), val); 338 } 339 340 /* 341 * Initialize Reserved Send Buffer Count for the send window. It involves 342 * writing to the register, reading it back to confirm that the hardware 343 * has enough buffers to reserve. See section 1.3.1.2.1 of VAS workbook. 344 * 345 * Since we can only make a best-effort attempt to fulfill the request, 346 * we don't return any errors if we cannot. 347 * 348 * TODO: Reserved (aka dedicated) send buffers are not supported yet. 349 */ 350 static void init_rsvd_tx_buf_count(struct vas_window *txwin, 351 struct vas_winctx *winctx) 352 { 353 write_hvwc_reg(txwin, VREG(TX_RSVD_BUF_COUNT), 0ULL); 354 } 355 356 /* 357 * init_winctx_regs() 358 * Initialize window context registers for a receive window. 359 * Except for caching control and marking window open, the registers 360 * are initialized in the order listed in Section 3.1.4 (Window Context 361 * Cache Register Details) of the VAS workbook although they don't need 362 * to be. 363 * 364 * Design note: For NX receive windows, NX allocates the FIFO buffer in OPAL 365 * (so that it can get a large contiguous area) and passes that buffer 366 * to kernel via device tree. We now write that buffer address to the 367 * FIFO BAR. Would it make sense to do this all in OPAL? i.e have OPAL 368 * write the per-chip RX FIFO addresses to the windows during boot-up 369 * as a one-time task? That could work for NX but what about other 370 * receivers? Let the receivers tell us the rx-fifo buffers for now. 371 */ 372 int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx) 373 { 374 u64 val; 375 int fifo_size; 376 377 reset_window_regs(window); 378 379 val = 0ULL; 380 val = SET_FIELD(VAS_LPID, val, winctx->lpid); 381 write_hvwc_reg(window, VREG(LPID), val); 382 383 val = 0ULL; 384 val = SET_FIELD(VAS_PID_ID, val, winctx->pidr); 385 write_hvwc_reg(window, VREG(PID), val); 386 387 init_xlate_regs(window, winctx->user_win); 388 389 val = 0ULL; 390 val = SET_FIELD(VAS_FAULT_TX_WIN, val, 0); 391 write_hvwc_reg(window, VREG(FAULT_TX_WIN), val); 392 393 /* In PowerNV, interrupts go to HV. */ 394 write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL); 395 396 val = 0ULL; 397 val = SET_FIELD(VAS_HV_INTR_SRC_RA, val, winctx->irq_port); 398 write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), val); 399 400 val = 0ULL; 401 val = SET_FIELD(VAS_PSWID_EA_HANDLE, val, winctx->pswid); 402 write_hvwc_reg(window, VREG(PSWID), val); 403 404 write_hvwc_reg(window, VREG(SPARE1), 0ULL); 405 write_hvwc_reg(window, VREG(SPARE2), 0ULL); 406 write_hvwc_reg(window, VREG(SPARE3), 0ULL); 407 408 /* 409 * NOTE: VAS expects the FIFO address to be copied into the LFIFO_BAR 410 * register as is - do NOT shift the address into VAS_LFIFO_BAR 411 * bit fields! Ok to set the page migration select fields - 412 * VAS ignores the lower 10+ bits in the address anyway, because 413 * the minimum FIFO size is 1K? 414 * 415 * See also: Design note in function header. 416 */ 417 val = __pa(winctx->rx_fifo); 418 val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0); 419 write_hvwc_reg(window, VREG(LFIFO_BAR), val); 420 421 val = 0ULL; 422 val = SET_FIELD(VAS_LDATA_STAMP, val, winctx->data_stamp); 423 write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), val); 424 425 val = 0ULL; 426 val = SET_FIELD(VAS_LDMA_TYPE, val, winctx->dma_type); 427 val = SET_FIELD(VAS_LDMA_FIFO_DISABLE, val, winctx->fifo_disable); 428 write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), val); 429 430 write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL); 431 write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL); 432 write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL); 433 434 val = 0ULL; 435 val = SET_FIELD(VAS_LRX_WCRED, val, winctx->wcreds_max); 436 write_hvwc_reg(window, VREG(LRX_WCRED), val); 437 438 val = 0ULL; 439 val = SET_FIELD(VAS_TX_WCRED, val, winctx->wcreds_max); 440 write_hvwc_reg(window, VREG(TX_WCRED), val); 441 442 write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL); 443 write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL); 444 445 fifo_size = winctx->rx_fifo_size / 1024; 446 447 val = 0ULL; 448 val = SET_FIELD(VAS_LFIFO_SIZE, val, ilog2(fifo_size)); 449 write_hvwc_reg(window, VREG(LFIFO_SIZE), val); 450 451 /* Update window control and caching control registers last so 452 * we mark the window open only after fully initializing it and 453 * pushing context to cache. 454 */ 455 456 write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL); 457 458 init_rsvd_tx_buf_count(window, winctx); 459 460 /* for a send window, point to the matching receive window */ 461 val = 0ULL; 462 val = SET_FIELD(VAS_LRX_WIN_ID, val, winctx->rx_win_id); 463 write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), val); 464 465 write_hvwc_reg(window, VREG(SPARE4), 0ULL); 466 467 val = 0ULL; 468 val = SET_FIELD(VAS_NOTIFY_DISABLE, val, winctx->notify_disable); 469 val = SET_FIELD(VAS_INTR_DISABLE, val, winctx->intr_disable); 470 val = SET_FIELD(VAS_NOTIFY_EARLY, val, winctx->notify_early); 471 val = SET_FIELD(VAS_NOTIFY_OSU_INTR, val, winctx->notify_os_intr_reg); 472 write_hvwc_reg(window, VREG(LNOTIFY_CTL), val); 473 474 val = 0ULL; 475 val = SET_FIELD(VAS_LNOTIFY_PID, val, winctx->lnotify_pid); 476 write_hvwc_reg(window, VREG(LNOTIFY_PID), val); 477 478 val = 0ULL; 479 val = SET_FIELD(VAS_LNOTIFY_LPID, val, winctx->lnotify_lpid); 480 write_hvwc_reg(window, VREG(LNOTIFY_LPID), val); 481 482 val = 0ULL; 483 val = SET_FIELD(VAS_LNOTIFY_TID, val, winctx->lnotify_tid); 484 write_hvwc_reg(window, VREG(LNOTIFY_TID), val); 485 486 val = 0ULL; 487 val = SET_FIELD(VAS_LNOTIFY_MIN_SCOPE, val, winctx->min_scope); 488 val = SET_FIELD(VAS_LNOTIFY_MAX_SCOPE, val, winctx->max_scope); 489 write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), val); 490 491 /* Skip read-only registers NX_UTIL and NX_UTIL_SE */ 492 493 write_hvwc_reg(window, VREG(SPARE5), 0ULL); 494 write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL); 495 write_hvwc_reg(window, VREG(SPARE6), 0ULL); 496 497 /* Finally, push window context to memory and... */ 498 val = 0ULL; 499 val = SET_FIELD(VAS_PUSH_TO_MEM, val, 1); 500 write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), val); 501 502 /* ... mark the window open for business */ 503 val = 0ULL; 504 val = SET_FIELD(VAS_WINCTL_REJ_NO_CREDIT, val, winctx->rej_no_credit); 505 val = SET_FIELD(VAS_WINCTL_PIN, val, winctx->pin_win); 506 val = SET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val, winctx->tx_wcred_mode); 507 val = SET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val, winctx->rx_wcred_mode); 508 val = SET_FIELD(VAS_WINCTL_TX_WORD_MODE, val, winctx->tx_word_mode); 509 val = SET_FIELD(VAS_WINCTL_RX_WORD_MODE, val, winctx->rx_word_mode); 510 val = SET_FIELD(VAS_WINCTL_FAULT_WIN, val, winctx->fault_win); 511 val = SET_FIELD(VAS_WINCTL_NX_WIN, val, winctx->nx_win); 512 val = SET_FIELD(VAS_WINCTL_OPEN, val, 1); 513 write_hvwc_reg(window, VREG(WINCTL), val); 514 515 return 0; 516 } 517 518 static DEFINE_SPINLOCK(vas_ida_lock); 519 520 static void vas_release_window_id(struct ida *ida, int winid) 521 { 522 spin_lock(&vas_ida_lock); 523 ida_remove(ida, winid); 524 spin_unlock(&vas_ida_lock); 525 } 526 527 static int vas_assign_window_id(struct ida *ida) 528 { 529 int rc, winid; 530 531 do { 532 rc = ida_pre_get(ida, GFP_KERNEL); 533 if (!rc) 534 return -EAGAIN; 535 536 spin_lock(&vas_ida_lock); 537 rc = ida_get_new(ida, &winid); 538 spin_unlock(&vas_ida_lock); 539 } while (rc == -EAGAIN); 540 541 if (rc) 542 return rc; 543 544 if (winid > VAS_WINDOWS_PER_CHIP) { 545 pr_err("Too many (%d) open windows\n", winid); 546 vas_release_window_id(ida, winid); 547 return -EAGAIN; 548 } 549 550 return winid; 551 } 552 553 static void vas_window_free(struct vas_window *window) 554 { 555 int winid = window->winid; 556 struct vas_instance *vinst = window->vinst; 557 558 unmap_winctx_mmio_bars(window); 559 560 vas_window_free_dbgdir(window); 561 562 kfree(window); 563 564 vas_release_window_id(&vinst->ida, winid); 565 } 566 567 static struct vas_window *vas_window_alloc(struct vas_instance *vinst) 568 { 569 int winid; 570 struct vas_window *window; 571 572 winid = vas_assign_window_id(&vinst->ida); 573 if (winid < 0) 574 return ERR_PTR(winid); 575 576 window = kzalloc(sizeof(*window), GFP_KERNEL); 577 if (!window) 578 goto out_free; 579 580 window->vinst = vinst; 581 window->winid = winid; 582 583 if (map_winctx_mmio_bars(window)) 584 goto out_free; 585 586 vas_window_init_dbgdir(window); 587 588 return window; 589 590 out_free: 591 kfree(window); 592 vas_release_window_id(&vinst->ida, winid); 593 return ERR_PTR(-ENOMEM); 594 } 595 596 static void put_rx_win(struct vas_window *rxwin) 597 { 598 /* Better not be a send window! */ 599 WARN_ON_ONCE(rxwin->tx_win); 600 601 atomic_dec(&rxwin->num_txwins); 602 } 603 604 /* 605 * Find the user space receive window given the @pswid. 606 * - We must have a valid vasid and it must belong to this instance. 607 * (so both send and receive windows are on the same VAS instance) 608 * - The window must refer to an OPEN, FTW, RECEIVE window. 609 * 610 * NOTE: We access ->windows[] table and assume that vinst->mutex is held. 611 */ 612 static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid) 613 { 614 int vasid, winid; 615 struct vas_window *rxwin; 616 617 decode_pswid(pswid, &vasid, &winid); 618 619 if (vinst->vas_id != vasid) 620 return ERR_PTR(-EINVAL); 621 622 rxwin = vinst->windows[winid]; 623 624 if (!rxwin || rxwin->tx_win || rxwin->cop != VAS_COP_TYPE_FTW) 625 return ERR_PTR(-EINVAL); 626 627 return rxwin; 628 } 629 630 /* 631 * Get the VAS receive window associated with NX engine identified 632 * by @cop and if applicable, @pswid. 633 * 634 * See also function header of set_vinst_win(). 635 */ 636 static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst, 637 enum vas_cop_type cop, u32 pswid) 638 { 639 struct vas_window *rxwin; 640 641 mutex_lock(&vinst->mutex); 642 643 if (cop == VAS_COP_TYPE_FTW) 644 rxwin = get_user_rxwin(vinst, pswid); 645 else 646 rxwin = vinst->rxwin[cop] ?: ERR_PTR(-EINVAL); 647 648 if (!IS_ERR(rxwin)) 649 atomic_inc(&rxwin->num_txwins); 650 651 mutex_unlock(&vinst->mutex); 652 653 return rxwin; 654 } 655 656 /* 657 * We have two tables of windows in a VAS instance. The first one, 658 * ->windows[], contains all the windows in the instance and allows 659 * looking up a window by its id. It is used to look up send windows 660 * during fault handling and receive windows when pairing user space 661 * send/receive windows. 662 * 663 * The second table, ->rxwin[], contains receive windows that are 664 * associated with NX engines. This table has VAS_COP_TYPE_MAX 665 * entries and is used to look up a receive window by its 666 * coprocessor type. 667 * 668 * Here, we save @window in the ->windows[] table. If it is a receive 669 * window, we also save the window in the ->rxwin[] table. 670 */ 671 static void set_vinst_win(struct vas_instance *vinst, 672 struct vas_window *window) 673 { 674 int id = window->winid; 675 676 mutex_lock(&vinst->mutex); 677 678 /* 679 * There should only be one receive window for a coprocessor type 680 * unless its a user (FTW) window. 681 */ 682 if (!window->user_win && !window->tx_win) { 683 WARN_ON_ONCE(vinst->rxwin[window->cop]); 684 vinst->rxwin[window->cop] = window; 685 } 686 687 WARN_ON_ONCE(vinst->windows[id] != NULL); 688 vinst->windows[id] = window; 689 690 mutex_unlock(&vinst->mutex); 691 } 692 693 /* 694 * Clear this window from the table(s) of windows for this VAS instance. 695 * See also function header of set_vinst_win(). 696 */ 697 static void clear_vinst_win(struct vas_window *window) 698 { 699 int id = window->winid; 700 struct vas_instance *vinst = window->vinst; 701 702 mutex_lock(&vinst->mutex); 703 704 if (!window->user_win && !window->tx_win) { 705 WARN_ON_ONCE(!vinst->rxwin[window->cop]); 706 vinst->rxwin[window->cop] = NULL; 707 } 708 709 WARN_ON_ONCE(vinst->windows[id] != window); 710 vinst->windows[id] = NULL; 711 712 mutex_unlock(&vinst->mutex); 713 } 714 715 static void init_winctx_for_rxwin(struct vas_window *rxwin, 716 struct vas_rx_win_attr *rxattr, 717 struct vas_winctx *winctx) 718 { 719 /* 720 * We first zero (memset()) all fields and only set non-zero fields. 721 * Following fields are 0/false but maybe deserve a comment: 722 * 723 * ->notify_os_intr_reg In powerNV, send intrs to HV 724 * ->notify_disable False for NX windows 725 * ->intr_disable False for Fault Windows 726 * ->xtra_write False for NX windows 727 * ->notify_early NA for NX windows 728 * ->rsvd_txbuf_count NA for Rx windows 729 * ->lpid, ->pid, ->tid NA for Rx windows 730 */ 731 732 memset(winctx, 0, sizeof(struct vas_winctx)); 733 734 winctx->rx_fifo = rxattr->rx_fifo; 735 winctx->rx_fifo_size = rxattr->rx_fifo_size; 736 winctx->wcreds_max = rxwin->wcreds_max; 737 winctx->pin_win = rxattr->pin_win; 738 739 winctx->nx_win = rxattr->nx_win; 740 winctx->fault_win = rxattr->fault_win; 741 winctx->user_win = rxattr->user_win; 742 winctx->rej_no_credit = rxattr->rej_no_credit; 743 winctx->rx_word_mode = rxattr->rx_win_ord_mode; 744 winctx->tx_word_mode = rxattr->tx_win_ord_mode; 745 winctx->rx_wcred_mode = rxattr->rx_wcred_mode; 746 winctx->tx_wcred_mode = rxattr->tx_wcred_mode; 747 winctx->notify_early = rxattr->notify_early; 748 749 if (winctx->nx_win) { 750 winctx->data_stamp = true; 751 winctx->intr_disable = true; 752 winctx->pin_win = true; 753 754 WARN_ON_ONCE(winctx->fault_win); 755 WARN_ON_ONCE(!winctx->rx_word_mode); 756 WARN_ON_ONCE(!winctx->tx_word_mode); 757 WARN_ON_ONCE(winctx->notify_after_count); 758 } else if (winctx->fault_win) { 759 winctx->notify_disable = true; 760 } else if (winctx->user_win) { 761 /* 762 * Section 1.8.1 Low Latency Core-Core Wake up of 763 * the VAS workbook: 764 * 765 * - disable credit checks ([tr]x_wcred_mode = false) 766 * - disable FIFO writes 767 * - enable ASB_Notify, disable interrupt 768 */ 769 winctx->fifo_disable = true; 770 winctx->intr_disable = true; 771 winctx->rx_fifo = NULL; 772 } 773 774 winctx->lnotify_lpid = rxattr->lnotify_lpid; 775 winctx->lnotify_pid = rxattr->lnotify_pid; 776 winctx->lnotify_tid = rxattr->lnotify_tid; 777 winctx->pswid = rxattr->pswid; 778 winctx->dma_type = VAS_DMA_TYPE_INJECT; 779 winctx->tc_mode = rxattr->tc_mode; 780 781 winctx->min_scope = VAS_SCOPE_LOCAL; 782 winctx->max_scope = VAS_SCOPE_VECTORED_GROUP; 783 } 784 785 static bool rx_win_args_valid(enum vas_cop_type cop, 786 struct vas_rx_win_attr *attr) 787 { 788 pr_debug("Rxattr: fault %d, notify %d, intr %d, early %d, fifo %d\n", 789 attr->fault_win, attr->notify_disable, 790 attr->intr_disable, attr->notify_early, 791 attr->rx_fifo_size); 792 793 if (cop >= VAS_COP_TYPE_MAX) 794 return false; 795 796 if (cop != VAS_COP_TYPE_FTW && 797 attr->rx_fifo_size < VAS_RX_FIFO_SIZE_MIN) 798 return false; 799 800 if (attr->rx_fifo_size > VAS_RX_FIFO_SIZE_MAX) 801 return false; 802 803 if (attr->wcreds_max > VAS_RX_WCREDS_MAX) 804 return false; 805 806 if (attr->nx_win) { 807 /* cannot be fault or user window if it is nx */ 808 if (attr->fault_win || attr->user_win) 809 return false; 810 /* 811 * Section 3.1.4.32: NX Windows must not disable notification, 812 * and must not enable interrupts or early notification. 813 */ 814 if (attr->notify_disable || !attr->intr_disable || 815 attr->notify_early) 816 return false; 817 } else if (attr->fault_win) { 818 /* cannot be both fault and user window */ 819 if (attr->user_win) 820 return false; 821 822 /* 823 * Section 3.1.4.32: Fault windows must disable notification 824 * but not interrupts. 825 */ 826 if (!attr->notify_disable || attr->intr_disable) 827 return false; 828 829 } else if (attr->user_win) { 830 /* 831 * User receive windows are only for fast-thread-wakeup 832 * (FTW). They don't need a FIFO and must disable interrupts 833 */ 834 if (attr->rx_fifo || attr->rx_fifo_size || !attr->intr_disable) 835 return false; 836 } else { 837 /* Rx window must be one of NX or Fault or User window. */ 838 return false; 839 } 840 841 return true; 842 } 843 844 void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop) 845 { 846 memset(rxattr, 0, sizeof(*rxattr)); 847 848 if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) { 849 rxattr->pin_win = true; 850 rxattr->nx_win = true; 851 rxattr->fault_win = false; 852 rxattr->intr_disable = true; 853 rxattr->rx_wcred_mode = true; 854 rxattr->tx_wcred_mode = true; 855 rxattr->rx_win_ord_mode = true; 856 rxattr->tx_win_ord_mode = true; 857 } else if (cop == VAS_COP_TYPE_FAULT) { 858 rxattr->pin_win = true; 859 rxattr->fault_win = true; 860 rxattr->notify_disable = true; 861 rxattr->rx_wcred_mode = true; 862 rxattr->tx_wcred_mode = true; 863 rxattr->rx_win_ord_mode = true; 864 rxattr->tx_win_ord_mode = true; 865 } else if (cop == VAS_COP_TYPE_FTW) { 866 rxattr->user_win = true; 867 rxattr->intr_disable = true; 868 869 /* 870 * As noted in the VAS Workbook we disable credit checks. 871 * If we enable credit checks in the future, we must also 872 * implement a mechanism to return the user credits or new 873 * paste operations will fail. 874 */ 875 } 876 } 877 EXPORT_SYMBOL_GPL(vas_init_rx_win_attr); 878 879 struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop, 880 struct vas_rx_win_attr *rxattr) 881 { 882 struct vas_window *rxwin; 883 struct vas_winctx winctx; 884 struct vas_instance *vinst; 885 886 trace_vas_rx_win_open(current, vasid, cop, rxattr); 887 888 if (!rx_win_args_valid(cop, rxattr)) 889 return ERR_PTR(-EINVAL); 890 891 vinst = find_vas_instance(vasid); 892 if (!vinst) { 893 pr_devel("vasid %d not found!\n", vasid); 894 return ERR_PTR(-EINVAL); 895 } 896 pr_devel("Found instance %d\n", vasid); 897 898 rxwin = vas_window_alloc(vinst); 899 if (IS_ERR(rxwin)) { 900 pr_devel("Unable to allocate memory for Rx window\n"); 901 return rxwin; 902 } 903 904 rxwin->tx_win = false; 905 rxwin->nx_win = rxattr->nx_win; 906 rxwin->user_win = rxattr->user_win; 907 rxwin->cop = cop; 908 rxwin->wcreds_max = rxattr->wcreds_max ?: VAS_WCREDS_DEFAULT; 909 if (rxattr->user_win) 910 rxwin->pid = task_pid_vnr(current); 911 912 init_winctx_for_rxwin(rxwin, rxattr, &winctx); 913 init_winctx_regs(rxwin, &winctx); 914 915 set_vinst_win(vinst, rxwin); 916 917 return rxwin; 918 } 919 EXPORT_SYMBOL_GPL(vas_rx_win_open); 920 921 void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop) 922 { 923 memset(txattr, 0, sizeof(*txattr)); 924 925 if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) { 926 txattr->rej_no_credit = false; 927 txattr->rx_wcred_mode = true; 928 txattr->tx_wcred_mode = true; 929 txattr->rx_win_ord_mode = true; 930 txattr->tx_win_ord_mode = true; 931 } else if (cop == VAS_COP_TYPE_FTW) { 932 txattr->user_win = true; 933 } 934 } 935 EXPORT_SYMBOL_GPL(vas_init_tx_win_attr); 936 937 static void init_winctx_for_txwin(struct vas_window *txwin, 938 struct vas_tx_win_attr *txattr, 939 struct vas_winctx *winctx) 940 { 941 /* 942 * We first zero all fields and only set non-zero ones. Following 943 * are some fields set to 0/false for the stated reason: 944 * 945 * ->notify_os_intr_reg In powernv, send intrs to HV 946 * ->rsvd_txbuf_count Not supported yet. 947 * ->notify_disable False for NX windows 948 * ->xtra_write False for NX windows 949 * ->notify_early NA for NX windows 950 * ->lnotify_lpid NA for Tx windows 951 * ->lnotify_pid NA for Tx windows 952 * ->lnotify_tid NA for Tx windows 953 * ->tx_win_cred_mode Ignore for now for NX windows 954 * ->rx_win_cred_mode Ignore for now for NX windows 955 */ 956 memset(winctx, 0, sizeof(struct vas_winctx)); 957 958 winctx->wcreds_max = txwin->wcreds_max; 959 960 winctx->user_win = txattr->user_win; 961 winctx->nx_win = txwin->rxwin->nx_win; 962 winctx->pin_win = txattr->pin_win; 963 winctx->rej_no_credit = txattr->rej_no_credit; 964 winctx->rsvd_txbuf_enable = txattr->rsvd_txbuf_enable; 965 966 winctx->rx_wcred_mode = txattr->rx_wcred_mode; 967 winctx->tx_wcred_mode = txattr->tx_wcred_mode; 968 winctx->rx_word_mode = txattr->rx_win_ord_mode; 969 winctx->tx_word_mode = txattr->tx_win_ord_mode; 970 winctx->rsvd_txbuf_count = txattr->rsvd_txbuf_count; 971 972 winctx->intr_disable = true; 973 if (winctx->nx_win) 974 winctx->data_stamp = true; 975 976 winctx->lpid = txattr->lpid; 977 winctx->pidr = txattr->pidr; 978 winctx->rx_win_id = txwin->rxwin->winid; 979 980 winctx->dma_type = VAS_DMA_TYPE_INJECT; 981 winctx->tc_mode = txattr->tc_mode; 982 winctx->min_scope = VAS_SCOPE_LOCAL; 983 winctx->max_scope = VAS_SCOPE_VECTORED_GROUP; 984 985 winctx->pswid = 0; 986 } 987 988 static bool tx_win_args_valid(enum vas_cop_type cop, 989 struct vas_tx_win_attr *attr) 990 { 991 if (attr->tc_mode != VAS_THRESH_DISABLED) 992 return false; 993 994 if (cop > VAS_COP_TYPE_MAX) 995 return false; 996 997 if (attr->wcreds_max > VAS_TX_WCREDS_MAX) 998 return false; 999 1000 if (attr->user_win && 1001 (cop != VAS_COP_TYPE_FTW || attr->rsvd_txbuf_count)) 1002 return false; 1003 1004 return true; 1005 } 1006 1007 struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, 1008 struct vas_tx_win_attr *attr) 1009 { 1010 int rc; 1011 struct vas_window *txwin; 1012 struct vas_window *rxwin; 1013 struct vas_winctx winctx; 1014 struct vas_instance *vinst; 1015 1016 trace_vas_tx_win_open(current, vasid, cop, attr); 1017 1018 if (!tx_win_args_valid(cop, attr)) 1019 return ERR_PTR(-EINVAL); 1020 1021 /* 1022 * If caller did not specify a vasid but specified the PSWID of a 1023 * receive window (applicable only to FTW windows), use the vasid 1024 * from that receive window. 1025 */ 1026 if (vasid == -1 && attr->pswid) 1027 decode_pswid(attr->pswid, &vasid, NULL); 1028 1029 vinst = find_vas_instance(vasid); 1030 if (!vinst) { 1031 pr_devel("vasid %d not found!\n", vasid); 1032 return ERR_PTR(-EINVAL); 1033 } 1034 1035 rxwin = get_vinst_rxwin(vinst, cop, attr->pswid); 1036 if (IS_ERR(rxwin)) { 1037 pr_devel("No RxWin for vasid %d, cop %d\n", vasid, cop); 1038 return rxwin; 1039 } 1040 1041 txwin = vas_window_alloc(vinst); 1042 if (IS_ERR(txwin)) { 1043 rc = PTR_ERR(txwin); 1044 goto put_rxwin; 1045 } 1046 1047 txwin->cop = cop; 1048 txwin->tx_win = 1; 1049 txwin->rxwin = rxwin; 1050 txwin->nx_win = txwin->rxwin->nx_win; 1051 txwin->pid = attr->pid; 1052 txwin->user_win = attr->user_win; 1053 txwin->wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT; 1054 1055 init_winctx_for_txwin(txwin, attr, &winctx); 1056 1057 init_winctx_regs(txwin, &winctx); 1058 1059 /* 1060 * If its a kernel send window, map the window address into the 1061 * kernel's address space. For user windows, user must issue an 1062 * mmap() to map the window into their address space. 1063 * 1064 * NOTE: If kernel ever resubmits a user CRB after handling a page 1065 * fault, we will need to map this into kernel as well. 1066 */ 1067 if (!txwin->user_win) { 1068 txwin->paste_kaddr = map_paste_region(txwin); 1069 if (IS_ERR(txwin->paste_kaddr)) { 1070 rc = PTR_ERR(txwin->paste_kaddr); 1071 goto free_window; 1072 } 1073 } else { 1074 /* 1075 * A user mapping must ensure that context switch issues 1076 * CP_ABORT for this thread. 1077 */ 1078 rc = set_thread_uses_vas(); 1079 if (rc) 1080 goto free_window; 1081 } 1082 1083 set_vinst_win(vinst, txwin); 1084 1085 return txwin; 1086 1087 free_window: 1088 vas_window_free(txwin); 1089 1090 put_rxwin: 1091 put_rx_win(rxwin); 1092 return ERR_PTR(rc); 1093 1094 } 1095 EXPORT_SYMBOL_GPL(vas_tx_win_open); 1096 1097 int vas_copy_crb(void *crb, int offset) 1098 { 1099 return vas_copy(crb, offset); 1100 } 1101 EXPORT_SYMBOL_GPL(vas_copy_crb); 1102 1103 #define RMA_LSMP_REPORT_ENABLE PPC_BIT(53) 1104 int vas_paste_crb(struct vas_window *txwin, int offset, bool re) 1105 { 1106 int rc; 1107 void *addr; 1108 uint64_t val; 1109 1110 trace_vas_paste_crb(current, txwin); 1111 1112 /* 1113 * Only NX windows are supported for now and hardware assumes 1114 * report-enable flag is set for NX windows. Ensure software 1115 * complies too. 1116 */ 1117 WARN_ON_ONCE(txwin->nx_win && !re); 1118 1119 addr = txwin->paste_kaddr; 1120 if (re) { 1121 /* 1122 * Set the REPORT_ENABLE bit (equivalent to writing 1123 * to 1K offset of the paste address) 1124 */ 1125 val = SET_FIELD(RMA_LSMP_REPORT_ENABLE, 0ULL, 1); 1126 addr += val; 1127 } 1128 1129 /* 1130 * Map the raw CR value from vas_paste() to an error code (there 1131 * is just pass or fail for now though). 1132 */ 1133 rc = vas_paste(addr, offset); 1134 if (rc == 2) 1135 rc = 0; 1136 else 1137 rc = -EINVAL; 1138 1139 pr_debug("Txwin #%d: Msg count %llu\n", txwin->winid, 1140 read_hvwc_reg(txwin, VREG(LRFIFO_PUSH))); 1141 1142 return rc; 1143 } 1144 EXPORT_SYMBOL_GPL(vas_paste_crb); 1145 1146 /* 1147 * If credit checking is enabled for this window, poll for the return 1148 * of window credits (i.e for NX engines to process any outstanding CRBs). 1149 * Since NX-842 waits for the CRBs to be processed before closing the 1150 * window, we should not have to wait for too long. 1151 * 1152 * TODO: We retry in 10ms intervals now. We could/should probably peek at 1153 * the VAS_LRFIFO_PUSH_OFFSET register to get an estimate of pending 1154 * CRBs on the FIFO and compute the delay dynamically on each retry. 1155 * But that is not really needed until we support NX-GZIP access from 1156 * user space. (NX-842 driver waits for CSB and Fast thread-wakeup 1157 * doesn't use credit checking). 1158 */ 1159 static void poll_window_credits(struct vas_window *window) 1160 { 1161 u64 val; 1162 int creds, mode; 1163 1164 val = read_hvwc_reg(window, VREG(WINCTL)); 1165 if (window->tx_win) 1166 mode = GET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val); 1167 else 1168 mode = GET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val); 1169 1170 if (!mode) 1171 return; 1172 retry: 1173 if (window->tx_win) { 1174 val = read_hvwc_reg(window, VREG(TX_WCRED)); 1175 creds = GET_FIELD(VAS_TX_WCRED, val); 1176 } else { 1177 val = read_hvwc_reg(window, VREG(LRX_WCRED)); 1178 creds = GET_FIELD(VAS_LRX_WCRED, val); 1179 } 1180 1181 if (creds < window->wcreds_max) { 1182 val = 0; 1183 set_current_state(TASK_UNINTERRUPTIBLE); 1184 schedule_timeout(msecs_to_jiffies(10)); 1185 goto retry; 1186 } 1187 } 1188 1189 /* 1190 * Wait for the window to go to "not-busy" state. It should only take a 1191 * short time to queue a CRB, so window should not be busy for too long. 1192 * Trying 5ms intervals. 1193 */ 1194 static void poll_window_busy_state(struct vas_window *window) 1195 { 1196 int busy; 1197 u64 val; 1198 1199 retry: 1200 val = read_hvwc_reg(window, VREG(WIN_STATUS)); 1201 busy = GET_FIELD(VAS_WIN_BUSY, val); 1202 if (busy) { 1203 val = 0; 1204 set_current_state(TASK_UNINTERRUPTIBLE); 1205 schedule_timeout(msecs_to_jiffies(5)); 1206 goto retry; 1207 } 1208 } 1209 1210 /* 1211 * Have the hardware cast a window out of cache and wait for it to 1212 * be completed. 1213 * 1214 * NOTE: It can take a relatively long time to cast the window context 1215 * out of the cache. It is not strictly necessary to cast out if: 1216 * 1217 * - we clear the "Pin Window" bit (so hardware is free to evict) 1218 * 1219 * - we re-initialize the window context when it is reassigned. 1220 * 1221 * We do the former in vas_win_close() and latter in vas_win_open(). 1222 * So, ignoring the cast-out for now. We can add it as needed. If 1223 * casting out becomes necessary we should consider offloading the 1224 * job to a worker thread, so the window close can proceed quickly. 1225 */ 1226 static void poll_window_castout(struct vas_window *window) 1227 { 1228 /* stub for now */ 1229 } 1230 1231 /* 1232 * Unpin and close a window so no new requests are accepted and the 1233 * hardware can evict this window from cache if necessary. 1234 */ 1235 static void unpin_close_window(struct vas_window *window) 1236 { 1237 u64 val; 1238 1239 val = read_hvwc_reg(window, VREG(WINCTL)); 1240 val = SET_FIELD(VAS_WINCTL_PIN, val, 0); 1241 val = SET_FIELD(VAS_WINCTL_OPEN, val, 0); 1242 write_hvwc_reg(window, VREG(WINCTL), val); 1243 } 1244 1245 /* 1246 * Close a window. 1247 * 1248 * See Section 1.12.1 of VAS workbook v1.05 for details on closing window: 1249 * - Disable new paste operations (unmap paste address) 1250 * - Poll for the "Window Busy" bit to be cleared 1251 * - Clear the Open/Enable bit for the Window. 1252 * - Poll for return of window Credits (implies FIFO empty for Rx win?) 1253 * - Unpin and cast window context out of cache 1254 * 1255 * Besides the hardware, kernel has some bookkeeping of course. 1256 */ 1257 int vas_win_close(struct vas_window *window) 1258 { 1259 if (!window) 1260 return 0; 1261 1262 if (!window->tx_win && atomic_read(&window->num_txwins) != 0) { 1263 pr_devel("Attempting to close an active Rx window!\n"); 1264 WARN_ON_ONCE(1); 1265 return -EBUSY; 1266 } 1267 1268 unmap_paste_region(window); 1269 1270 clear_vinst_win(window); 1271 1272 poll_window_busy_state(window); 1273 1274 unpin_close_window(window); 1275 1276 poll_window_credits(window); 1277 1278 poll_window_castout(window); 1279 1280 /* if send window, drop reference to matching receive window */ 1281 if (window->tx_win) 1282 put_rx_win(window->rxwin); 1283 1284 vas_window_free(window); 1285 1286 return 0; 1287 } 1288 EXPORT_SYMBOL_GPL(vas_win_close); 1289 1290 /* 1291 * Return a system-wide unique window id for the window @win. 1292 */ 1293 u32 vas_win_id(struct vas_window *win) 1294 { 1295 return encode_pswid(win->vinst->vas_id, win->winid); 1296 } 1297 EXPORT_SYMBOL_GPL(vas_win_id); 1298