1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2016-17 IBM Corp.
4  */
5 
6 #define pr_fmt(fmt) "vas: " fmt
7 
8 #include <linux/types.h>
9 #include <linux/mutex.h>
10 #include <linux/slab.h>
11 #include <linux/io.h>
12 #include <linux/log2.h>
13 #include <linux/rcupdate.h>
14 #include <linux/cred.h>
15 #include <linux/sched/mm.h>
16 #include <linux/mmu_context.h>
17 #include <asm/switch_to.h>
18 #include <asm/ppc-opcode.h>
19 #include "vas.h"
20 #include "copy-paste.h"
21 
22 #define CREATE_TRACE_POINTS
23 #include "vas-trace.h"
24 
25 /*
26  * Compute the paste address region for the window @window using the
27  * ->paste_base_addr and ->paste_win_id_shift we got from device tree.
28  */
29 void vas_win_paste_addr(struct vas_window *window, u64 *addr, int *len)
30 {
31 	int winid;
32 	u64 base, shift;
33 
34 	base = window->vinst->paste_base_addr;
35 	shift = window->vinst->paste_win_id_shift;
36 	winid = window->winid;
37 
38 	*addr  = base + (winid << shift);
39 	if (len)
40 		*len = PAGE_SIZE;
41 
42 	pr_debug("Txwin #%d: Paste addr 0x%llx\n", winid, *addr);
43 }
44 
45 static inline void get_hvwc_mmio_bar(struct vas_window *window,
46 			u64 *start, int *len)
47 {
48 	u64 pbaddr;
49 
50 	pbaddr = window->vinst->hvwc_bar_start;
51 	*start = pbaddr + window->winid * VAS_HVWC_SIZE;
52 	*len = VAS_HVWC_SIZE;
53 }
54 
55 static inline void get_uwc_mmio_bar(struct vas_window *window,
56 			u64 *start, int *len)
57 {
58 	u64 pbaddr;
59 
60 	pbaddr = window->vinst->uwc_bar_start;
61 	*start = pbaddr + window->winid * VAS_UWC_SIZE;
62 	*len = VAS_UWC_SIZE;
63 }
64 
65 /*
66  * Map the paste bus address of the given send window into kernel address
67  * space. Unlike MMIO regions (map_mmio_region() below), paste region must
68  * be mapped cache-able and is only applicable to send windows.
69  */
70 static void *map_paste_region(struct vas_window *txwin)
71 {
72 	int len;
73 	void *map;
74 	char *name;
75 	u64 start;
76 
77 	name = kasprintf(GFP_KERNEL, "window-v%d-w%d", txwin->vinst->vas_id,
78 				txwin->winid);
79 	if (!name)
80 		goto free_name;
81 
82 	txwin->paste_addr_name = name;
83 	vas_win_paste_addr(txwin, &start, &len);
84 
85 	if (!request_mem_region(start, len, name)) {
86 		pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n",
87 				__func__, start, len);
88 		goto free_name;
89 	}
90 
91 	map = ioremap_cache(start, len);
92 	if (!map) {
93 		pr_devel("%s(): ioremap_cache(0x%llx, %d) failed\n", __func__,
94 				start, len);
95 		goto free_name;
96 	}
97 
98 	pr_devel("Mapped paste addr 0x%llx to kaddr 0x%p\n", start, map);
99 	return map;
100 
101 free_name:
102 	kfree(name);
103 	return ERR_PTR(-ENOMEM);
104 }
105 
106 static void *map_mmio_region(char *name, u64 start, int len)
107 {
108 	void *map;
109 
110 	if (!request_mem_region(start, len, name)) {
111 		pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n",
112 				__func__, start, len);
113 		return NULL;
114 	}
115 
116 	map = ioremap(start, len);
117 	if (!map) {
118 		pr_devel("%s(): ioremap(0x%llx, %d) failed\n", __func__, start,
119 				len);
120 		return NULL;
121 	}
122 
123 	return map;
124 }
125 
126 static void unmap_region(void *addr, u64 start, int len)
127 {
128 	iounmap(addr);
129 	release_mem_region((phys_addr_t)start, len);
130 }
131 
132 /*
133  * Unmap the paste address region for a window.
134  */
135 static void unmap_paste_region(struct vas_window *window)
136 {
137 	int len;
138 	u64 busaddr_start;
139 
140 	if (window->paste_kaddr) {
141 		vas_win_paste_addr(window, &busaddr_start, &len);
142 		unmap_region(window->paste_kaddr, busaddr_start, len);
143 		window->paste_kaddr = NULL;
144 		kfree(window->paste_addr_name);
145 		window->paste_addr_name = NULL;
146 	}
147 }
148 
149 /*
150  * Unmap the MMIO regions for a window. Hold the vas_mutex so we don't
151  * unmap when the window's debugfs dir is in use. This serializes close
152  * of a window even on another VAS instance but since its not a critical
153  * path, just minimize the time we hold the mutex for now. We can add
154  * a per-instance mutex later if necessary.
155  */
156 static void unmap_winctx_mmio_bars(struct vas_window *window)
157 {
158 	int len;
159 	void *uwc_map;
160 	void *hvwc_map;
161 	u64 busaddr_start;
162 
163 	mutex_lock(&vas_mutex);
164 
165 	hvwc_map = window->hvwc_map;
166 	window->hvwc_map = NULL;
167 
168 	uwc_map = window->uwc_map;
169 	window->uwc_map = NULL;
170 
171 	mutex_unlock(&vas_mutex);
172 
173 	if (hvwc_map) {
174 		get_hvwc_mmio_bar(window, &busaddr_start, &len);
175 		unmap_region(hvwc_map, busaddr_start, len);
176 	}
177 
178 	if (uwc_map) {
179 		get_uwc_mmio_bar(window, &busaddr_start, &len);
180 		unmap_region(uwc_map, busaddr_start, len);
181 	}
182 }
183 
184 /*
185  * Find the Hypervisor Window Context (HVWC) MMIO Base Address Region and the
186  * OS/User Window Context (UWC) MMIO Base Address Region for the given window.
187  * Map these bus addresses and save the mapped kernel addresses in @window.
188  */
189 static int map_winctx_mmio_bars(struct vas_window *window)
190 {
191 	int len;
192 	u64 start;
193 
194 	get_hvwc_mmio_bar(window, &start, &len);
195 	window->hvwc_map = map_mmio_region("HVWCM_Window", start, len);
196 
197 	get_uwc_mmio_bar(window, &start, &len);
198 	window->uwc_map = map_mmio_region("UWCM_Window", start, len);
199 
200 	if (!window->hvwc_map || !window->uwc_map) {
201 		unmap_winctx_mmio_bars(window);
202 		return -1;
203 	}
204 
205 	return 0;
206 }
207 
208 /*
209  * Reset all valid registers in the HV and OS/User Window Contexts for
210  * the window identified by @window.
211  *
212  * NOTE: We cannot really use a for loop to reset window context. Not all
213  *	 offsets in a window context are valid registers and the valid
214  *	 registers are not sequential. And, we can only write to offsets
215  *	 with valid registers.
216  */
217 static void reset_window_regs(struct vas_window *window)
218 {
219 	write_hvwc_reg(window, VREG(LPID), 0ULL);
220 	write_hvwc_reg(window, VREG(PID), 0ULL);
221 	write_hvwc_reg(window, VREG(XLATE_MSR), 0ULL);
222 	write_hvwc_reg(window, VREG(XLATE_LPCR), 0ULL);
223 	write_hvwc_reg(window, VREG(XLATE_CTL), 0ULL);
224 	write_hvwc_reg(window, VREG(AMR), 0ULL);
225 	write_hvwc_reg(window, VREG(SEIDR), 0ULL);
226 	write_hvwc_reg(window, VREG(FAULT_TX_WIN), 0ULL);
227 	write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL);
228 	write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), 0ULL);
229 	write_hvwc_reg(window, VREG(PSWID), 0ULL);
230 	write_hvwc_reg(window, VREG(LFIFO_BAR), 0ULL);
231 	write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), 0ULL);
232 	write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), 0ULL);
233 	write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL);
234 	write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL);
235 	write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL);
236 	write_hvwc_reg(window, VREG(LRX_WCRED), 0ULL);
237 	write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
238 	write_hvwc_reg(window, VREG(TX_WCRED), 0ULL);
239 	write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);
240 	write_hvwc_reg(window, VREG(LFIFO_SIZE), 0ULL);
241 	write_hvwc_reg(window, VREG(WINCTL), 0ULL);
242 	write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL);
243 	write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), 0ULL);
244 	write_hvwc_reg(window, VREG(TX_RSVD_BUF_COUNT), 0ULL);
245 	write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), 0ULL);
246 	write_hvwc_reg(window, VREG(LNOTIFY_CTL), 0ULL);
247 	write_hvwc_reg(window, VREG(LNOTIFY_PID), 0ULL);
248 	write_hvwc_reg(window, VREG(LNOTIFY_LPID), 0ULL);
249 	write_hvwc_reg(window, VREG(LNOTIFY_TID), 0ULL);
250 	write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), 0ULL);
251 	write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL);
252 
253 	/* Skip read-only registers: NX_UTIL and NX_UTIL_SE */
254 
255 	/*
256 	 * The send and receive window credit adder registers are also
257 	 * accessible from HVWC and have been initialized above. We don't
258 	 * need to initialize from the OS/User Window Context, so skip
259 	 * following calls:
260 	 *
261 	 *	write_uwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);
262 	 *	write_uwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
263 	 */
264 }
265 
266 /*
267  * Initialize window context registers related to Address Translation.
268  * These registers are common to send/receive windows although they
269  * differ for user/kernel windows. As we resolve the TODOs we may
270  * want to add fields to vas_winctx and move the initialization to
271  * init_vas_winctx_regs().
272  */
273 static void init_xlate_regs(struct vas_window *window, bool user_win)
274 {
275 	u64 lpcr, val;
276 
277 	/*
278 	 * MSR_TA, MSR_US are false for both kernel and user.
279 	 * MSR_DR and MSR_PR are false for kernel.
280 	 */
281 	val = 0ULL;
282 	val = SET_FIELD(VAS_XLATE_MSR_HV, val, 1);
283 	val = SET_FIELD(VAS_XLATE_MSR_SF, val, 1);
284 	if (user_win) {
285 		val = SET_FIELD(VAS_XLATE_MSR_DR, val, 1);
286 		val = SET_FIELD(VAS_XLATE_MSR_PR, val, 1);
287 	}
288 	write_hvwc_reg(window, VREG(XLATE_MSR), val);
289 
290 	lpcr = mfspr(SPRN_LPCR);
291 	val = 0ULL;
292 	/*
293 	 * NOTE: From Section 5.7.8.1 Segment Lookaside Buffer of the
294 	 *	 Power ISA, v3.0B, Page size encoding is 0 = 4KB, 5 = 64KB.
295 	 *
296 	 * NOTE: From Section 1.3.1, Address Translation Context of the
297 	 *	 Nest MMU Workbook, LPCR_SC should be 0 for Power9.
298 	 */
299 	val = SET_FIELD(VAS_XLATE_LPCR_PAGE_SIZE, val, 5);
300 	val = SET_FIELD(VAS_XLATE_LPCR_ISL, val, lpcr & LPCR_ISL);
301 	val = SET_FIELD(VAS_XLATE_LPCR_TC, val, lpcr & LPCR_TC);
302 	val = SET_FIELD(VAS_XLATE_LPCR_SC, val, 0);
303 	write_hvwc_reg(window, VREG(XLATE_LPCR), val);
304 
305 	/*
306 	 * Section 1.3.1 (Address translation Context) of NMMU workbook.
307 	 *	0b00	Hashed Page Table mode
308 	 *	0b01	Reserved
309 	 *	0b10	Radix on HPT
310 	 *	0b11	Radix on Radix
311 	 */
312 	val = 0ULL;
313 	val = SET_FIELD(VAS_XLATE_MODE, val, radix_enabled() ? 3 : 2);
314 	write_hvwc_reg(window, VREG(XLATE_CTL), val);
315 
316 	/*
317 	 * TODO: Can we mfspr(AMR) even for user windows?
318 	 */
319 	val = 0ULL;
320 	val = SET_FIELD(VAS_AMR, val, mfspr(SPRN_AMR));
321 	write_hvwc_reg(window, VREG(AMR), val);
322 
323 	val = 0ULL;
324 	val = SET_FIELD(VAS_SEIDR, val, 0);
325 	write_hvwc_reg(window, VREG(SEIDR), val);
326 }
327 
328 /*
329  * Initialize Reserved Send Buffer Count for the send window. It involves
330  * writing to the register, reading it back to confirm that the hardware
331  * has enough buffers to reserve. See section 1.3.1.2.1 of VAS workbook.
332  *
333  * Since we can only make a best-effort attempt to fulfill the request,
334  * we don't return any errors if we cannot.
335  *
336  * TODO: Reserved (aka dedicated) send buffers are not supported yet.
337  */
338 static void init_rsvd_tx_buf_count(struct vas_window *txwin,
339 				struct vas_winctx *winctx)
340 {
341 	write_hvwc_reg(txwin, VREG(TX_RSVD_BUF_COUNT), 0ULL);
342 }
343 
344 /*
345  * init_winctx_regs()
346  *	Initialize window context registers for a receive window.
347  *	Except for caching control and marking window open, the registers
348  *	are initialized in the order listed in Section 3.1.4 (Window Context
349  *	Cache Register Details) of the VAS workbook although they don't need
350  *	to be.
351  *
352  * Design note: For NX receive windows, NX allocates the FIFO buffer in OPAL
353  *	(so that it can get a large contiguous area) and passes that buffer
354  *	to kernel via device tree. We now write that buffer address to the
355  *	FIFO BAR. Would it make sense to do this all in OPAL? i.e have OPAL
356  *	write the per-chip RX FIFO addresses to the windows during boot-up
357  *	as a one-time task? That could work for NX but what about other
358  *	receivers?  Let the receivers tell us the rx-fifo buffers for now.
359  */
360 static void init_winctx_regs(struct vas_window *window,
361 			     struct vas_winctx *winctx)
362 {
363 	u64 val;
364 	int fifo_size;
365 
366 	reset_window_regs(window);
367 
368 	val = 0ULL;
369 	val = SET_FIELD(VAS_LPID, val, winctx->lpid);
370 	write_hvwc_reg(window, VREG(LPID), val);
371 
372 	val = 0ULL;
373 	val = SET_FIELD(VAS_PID_ID, val, winctx->pidr);
374 	write_hvwc_reg(window, VREG(PID), val);
375 
376 	init_xlate_regs(window, winctx->user_win);
377 
378 	val = 0ULL;
379 	val = SET_FIELD(VAS_FAULT_TX_WIN, val, winctx->fault_win_id);
380 	write_hvwc_reg(window, VREG(FAULT_TX_WIN), val);
381 
382 	/* In PowerNV, interrupts go to HV. */
383 	write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL);
384 
385 	val = 0ULL;
386 	val = SET_FIELD(VAS_HV_INTR_SRC_RA, val, winctx->irq_port);
387 	write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), val);
388 
389 	val = 0ULL;
390 	val = SET_FIELD(VAS_PSWID_EA_HANDLE, val, winctx->pswid);
391 	write_hvwc_reg(window, VREG(PSWID), val);
392 
393 	write_hvwc_reg(window, VREG(SPARE1), 0ULL);
394 	write_hvwc_reg(window, VREG(SPARE2), 0ULL);
395 	write_hvwc_reg(window, VREG(SPARE3), 0ULL);
396 
397 	/*
398 	 * NOTE: VAS expects the FIFO address to be copied into the LFIFO_BAR
399 	 *	 register as is - do NOT shift the address into VAS_LFIFO_BAR
400 	 *	 bit fields! Ok to set the page migration select fields -
401 	 *	 VAS ignores the lower 10+ bits in the address anyway, because
402 	 *	 the minimum FIFO size is 1K?
403 	 *
404 	 * See also: Design note in function header.
405 	 */
406 	val = __pa(winctx->rx_fifo);
407 	val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0);
408 	write_hvwc_reg(window, VREG(LFIFO_BAR), val);
409 
410 	val = 0ULL;
411 	val = SET_FIELD(VAS_LDATA_STAMP, val, winctx->data_stamp);
412 	write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), val);
413 
414 	val = 0ULL;
415 	val = SET_FIELD(VAS_LDMA_TYPE, val, winctx->dma_type);
416 	val = SET_FIELD(VAS_LDMA_FIFO_DISABLE, val, winctx->fifo_disable);
417 	write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), val);
418 
419 	write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL);
420 	write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL);
421 	write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL);
422 
423 	val = 0ULL;
424 	val = SET_FIELD(VAS_LRX_WCRED, val, winctx->wcreds_max);
425 	write_hvwc_reg(window, VREG(LRX_WCRED), val);
426 
427 	val = 0ULL;
428 	val = SET_FIELD(VAS_TX_WCRED, val, winctx->wcreds_max);
429 	write_hvwc_reg(window, VREG(TX_WCRED), val);
430 
431 	write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
432 	write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);
433 
434 	fifo_size = winctx->rx_fifo_size / 1024;
435 
436 	val = 0ULL;
437 	val = SET_FIELD(VAS_LFIFO_SIZE, val, ilog2(fifo_size));
438 	write_hvwc_reg(window, VREG(LFIFO_SIZE), val);
439 
440 	/* Update window control and caching control registers last so
441 	 * we mark the window open only after fully initializing it and
442 	 * pushing context to cache.
443 	 */
444 
445 	write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL);
446 
447 	init_rsvd_tx_buf_count(window, winctx);
448 
449 	/* for a send window, point to the matching receive window */
450 	val = 0ULL;
451 	val = SET_FIELD(VAS_LRX_WIN_ID, val, winctx->rx_win_id);
452 	write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), val);
453 
454 	write_hvwc_reg(window, VREG(SPARE4), 0ULL);
455 
456 	val = 0ULL;
457 	val = SET_FIELD(VAS_NOTIFY_DISABLE, val, winctx->notify_disable);
458 	val = SET_FIELD(VAS_INTR_DISABLE, val, winctx->intr_disable);
459 	val = SET_FIELD(VAS_NOTIFY_EARLY, val, winctx->notify_early);
460 	val = SET_FIELD(VAS_NOTIFY_OSU_INTR, val, winctx->notify_os_intr_reg);
461 	write_hvwc_reg(window, VREG(LNOTIFY_CTL), val);
462 
463 	val = 0ULL;
464 	val = SET_FIELD(VAS_LNOTIFY_PID, val, winctx->lnotify_pid);
465 	write_hvwc_reg(window, VREG(LNOTIFY_PID), val);
466 
467 	val = 0ULL;
468 	val = SET_FIELD(VAS_LNOTIFY_LPID, val, winctx->lnotify_lpid);
469 	write_hvwc_reg(window, VREG(LNOTIFY_LPID), val);
470 
471 	val = 0ULL;
472 	val = SET_FIELD(VAS_LNOTIFY_TID, val, winctx->lnotify_tid);
473 	write_hvwc_reg(window, VREG(LNOTIFY_TID), val);
474 
475 	val = 0ULL;
476 	val = SET_FIELD(VAS_LNOTIFY_MIN_SCOPE, val, winctx->min_scope);
477 	val = SET_FIELD(VAS_LNOTIFY_MAX_SCOPE, val, winctx->max_scope);
478 	write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), val);
479 
480 	/* Skip read-only registers NX_UTIL and NX_UTIL_SE */
481 
482 	write_hvwc_reg(window, VREG(SPARE5), 0ULL);
483 	write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL);
484 	write_hvwc_reg(window, VREG(SPARE6), 0ULL);
485 
486 	/* Finally, push window context to memory and... */
487 	val = 0ULL;
488 	val = SET_FIELD(VAS_PUSH_TO_MEM, val, 1);
489 	write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), val);
490 
491 	/* ... mark the window open for business */
492 	val = 0ULL;
493 	val = SET_FIELD(VAS_WINCTL_REJ_NO_CREDIT, val, winctx->rej_no_credit);
494 	val = SET_FIELD(VAS_WINCTL_PIN, val, winctx->pin_win);
495 	val = SET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val, winctx->tx_wcred_mode);
496 	val = SET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val, winctx->rx_wcred_mode);
497 	val = SET_FIELD(VAS_WINCTL_TX_WORD_MODE, val, winctx->tx_word_mode);
498 	val = SET_FIELD(VAS_WINCTL_RX_WORD_MODE, val, winctx->rx_word_mode);
499 	val = SET_FIELD(VAS_WINCTL_FAULT_WIN, val, winctx->fault_win);
500 	val = SET_FIELD(VAS_WINCTL_NX_WIN, val, winctx->nx_win);
501 	val = SET_FIELD(VAS_WINCTL_OPEN, val, 1);
502 	write_hvwc_reg(window, VREG(WINCTL), val);
503 }
504 
505 static void vas_release_window_id(struct ida *ida, int winid)
506 {
507 	ida_free(ida, winid);
508 }
509 
510 static int vas_assign_window_id(struct ida *ida)
511 {
512 	int winid = ida_alloc_max(ida, VAS_WINDOWS_PER_CHIP - 1, GFP_KERNEL);
513 
514 	if (winid == -ENOSPC) {
515 		pr_err("Too many (%d) open windows\n", VAS_WINDOWS_PER_CHIP);
516 		return -EAGAIN;
517 	}
518 
519 	return winid;
520 }
521 
522 static void vas_window_free(struct vas_window *window)
523 {
524 	int winid = window->winid;
525 	struct vas_instance *vinst = window->vinst;
526 
527 	unmap_winctx_mmio_bars(window);
528 
529 	vas_window_free_dbgdir(window);
530 
531 	kfree(window);
532 
533 	vas_release_window_id(&vinst->ida, winid);
534 }
535 
536 static struct vas_window *vas_window_alloc(struct vas_instance *vinst)
537 {
538 	int winid;
539 	struct vas_window *window;
540 
541 	winid = vas_assign_window_id(&vinst->ida);
542 	if (winid < 0)
543 		return ERR_PTR(winid);
544 
545 	window = kzalloc(sizeof(*window), GFP_KERNEL);
546 	if (!window)
547 		goto out_free;
548 
549 	window->vinst = vinst;
550 	window->winid = winid;
551 
552 	if (map_winctx_mmio_bars(window))
553 		goto out_free;
554 
555 	vas_window_init_dbgdir(window);
556 
557 	return window;
558 
559 out_free:
560 	kfree(window);
561 	vas_release_window_id(&vinst->ida, winid);
562 	return ERR_PTR(-ENOMEM);
563 }
564 
565 static void put_rx_win(struct vas_window *rxwin)
566 {
567 	/* Better not be a send window! */
568 	WARN_ON_ONCE(rxwin->tx_win);
569 
570 	atomic_dec(&rxwin->num_txwins);
571 }
572 
573 /*
574  * Find the user space receive window given the @pswid.
575  *      - We must have a valid vasid and it must belong to this instance.
576  *        (so both send and receive windows are on the same VAS instance)
577  *      - The window must refer to an OPEN, FTW, RECEIVE window.
578  *
579  * NOTE: We access ->windows[] table and assume that vinst->mutex is held.
580  */
581 static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
582 {
583 	int vasid, winid;
584 	struct vas_window *rxwin;
585 
586 	decode_pswid(pswid, &vasid, &winid);
587 
588 	if (vinst->vas_id != vasid)
589 		return ERR_PTR(-EINVAL);
590 
591 	rxwin = vinst->windows[winid];
592 
593 	if (!rxwin || rxwin->tx_win || rxwin->cop != VAS_COP_TYPE_FTW)
594 		return ERR_PTR(-EINVAL);
595 
596 	return rxwin;
597 }
598 
599 /*
600  * Get the VAS receive window associated with NX engine identified
601  * by @cop and if applicable, @pswid.
602  *
603  * See also function header of set_vinst_win().
604  */
605 static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst,
606 			enum vas_cop_type cop, u32 pswid)
607 {
608 	struct vas_window *rxwin;
609 
610 	mutex_lock(&vinst->mutex);
611 
612 	if (cop == VAS_COP_TYPE_FTW)
613 		rxwin = get_user_rxwin(vinst, pswid);
614 	else
615 		rxwin = vinst->rxwin[cop] ?: ERR_PTR(-EINVAL);
616 
617 	if (!IS_ERR(rxwin))
618 		atomic_inc(&rxwin->num_txwins);
619 
620 	mutex_unlock(&vinst->mutex);
621 
622 	return rxwin;
623 }
624 
625 /*
626  * We have two tables of windows in a VAS instance. The first one,
627  * ->windows[], contains all the windows in the instance and allows
628  * looking up a window by its id. It is used to look up send windows
629  * during fault handling and receive windows when pairing user space
630  * send/receive windows.
631  *
632  * The second table, ->rxwin[], contains receive windows that are
633  * associated with NX engines. This table has VAS_COP_TYPE_MAX
634  * entries and is used to look up a receive window by its
635  * coprocessor type.
636  *
637  * Here, we save @window in the ->windows[] table. If it is a receive
638  * window, we also save the window in the ->rxwin[] table.
639  */
640 static void set_vinst_win(struct vas_instance *vinst,
641 			struct vas_window *window)
642 {
643 	int id = window->winid;
644 
645 	mutex_lock(&vinst->mutex);
646 
647 	/*
648 	 * There should only be one receive window for a coprocessor type
649 	 * unless its a user (FTW) window.
650 	 */
651 	if (!window->user_win && !window->tx_win) {
652 		WARN_ON_ONCE(vinst->rxwin[window->cop]);
653 		vinst->rxwin[window->cop] = window;
654 	}
655 
656 	WARN_ON_ONCE(vinst->windows[id] != NULL);
657 	vinst->windows[id] = window;
658 
659 	mutex_unlock(&vinst->mutex);
660 }
661 
662 /*
663  * Clear this window from the table(s) of windows for this VAS instance.
664  * See also function header of set_vinst_win().
665  */
666 static void clear_vinst_win(struct vas_window *window)
667 {
668 	int id = window->winid;
669 	struct vas_instance *vinst = window->vinst;
670 
671 	mutex_lock(&vinst->mutex);
672 
673 	if (!window->user_win && !window->tx_win) {
674 		WARN_ON_ONCE(!vinst->rxwin[window->cop]);
675 		vinst->rxwin[window->cop] = NULL;
676 	}
677 
678 	WARN_ON_ONCE(vinst->windows[id] != window);
679 	vinst->windows[id] = NULL;
680 
681 	mutex_unlock(&vinst->mutex);
682 }
683 
684 static void init_winctx_for_rxwin(struct vas_window *rxwin,
685 			struct vas_rx_win_attr *rxattr,
686 			struct vas_winctx *winctx)
687 {
688 	/*
689 	 * We first zero (memset()) all fields and only set non-zero fields.
690 	 * Following fields are 0/false but maybe deserve a comment:
691 	 *
692 	 *	->notify_os_intr_reg	In powerNV, send intrs to HV
693 	 *	->notify_disable	False for NX windows
694 	 *	->intr_disable		False for Fault Windows
695 	 *	->xtra_write		False for NX windows
696 	 *	->notify_early		NA for NX windows
697 	 *	->rsvd_txbuf_count	NA for Rx windows
698 	 *	->lpid, ->pid, ->tid	NA for Rx windows
699 	 */
700 
701 	memset(winctx, 0, sizeof(struct vas_winctx));
702 
703 	winctx->rx_fifo = rxattr->rx_fifo;
704 	winctx->rx_fifo_size = rxattr->rx_fifo_size;
705 	winctx->wcreds_max = rxwin->wcreds_max;
706 	winctx->pin_win = rxattr->pin_win;
707 
708 	winctx->nx_win = rxattr->nx_win;
709 	winctx->fault_win = rxattr->fault_win;
710 	winctx->user_win = rxattr->user_win;
711 	winctx->rej_no_credit = rxattr->rej_no_credit;
712 	winctx->rx_word_mode = rxattr->rx_win_ord_mode;
713 	winctx->tx_word_mode = rxattr->tx_win_ord_mode;
714 	winctx->rx_wcred_mode = rxattr->rx_wcred_mode;
715 	winctx->tx_wcred_mode = rxattr->tx_wcred_mode;
716 	winctx->notify_early = rxattr->notify_early;
717 
718 	if (winctx->nx_win) {
719 		winctx->data_stamp = true;
720 		winctx->intr_disable = true;
721 		winctx->pin_win = true;
722 
723 		WARN_ON_ONCE(winctx->fault_win);
724 		WARN_ON_ONCE(!winctx->rx_word_mode);
725 		WARN_ON_ONCE(!winctx->tx_word_mode);
726 		WARN_ON_ONCE(winctx->notify_after_count);
727 	} else if (winctx->fault_win) {
728 		winctx->notify_disable = true;
729 	} else if (winctx->user_win) {
730 		/*
731 		 * Section 1.8.1 Low Latency Core-Core Wake up of
732 		 * the VAS workbook:
733 		 *
734 		 *      - disable credit checks ([tr]x_wcred_mode = false)
735 		 *      - disable FIFO writes
736 		 *      - enable ASB_Notify, disable interrupt
737 		 */
738 		winctx->fifo_disable = true;
739 		winctx->intr_disable = true;
740 		winctx->rx_fifo = NULL;
741 	}
742 
743 	winctx->lnotify_lpid = rxattr->lnotify_lpid;
744 	winctx->lnotify_pid = rxattr->lnotify_pid;
745 	winctx->lnotify_tid = rxattr->lnotify_tid;
746 	winctx->pswid = rxattr->pswid;
747 	winctx->dma_type = VAS_DMA_TYPE_INJECT;
748 	winctx->tc_mode = rxattr->tc_mode;
749 
750 	winctx->min_scope = VAS_SCOPE_LOCAL;
751 	winctx->max_scope = VAS_SCOPE_VECTORED_GROUP;
752 	if (rxwin->vinst->virq)
753 		winctx->irq_port = rxwin->vinst->irq_port;
754 }
755 
756 static bool rx_win_args_valid(enum vas_cop_type cop,
757 			struct vas_rx_win_attr *attr)
758 {
759 	pr_debug("Rxattr: fault %d, notify %d, intr %d, early %d, fifo %d\n",
760 			attr->fault_win, attr->notify_disable,
761 			attr->intr_disable, attr->notify_early,
762 			attr->rx_fifo_size);
763 
764 	if (cop >= VAS_COP_TYPE_MAX)
765 		return false;
766 
767 	if (cop != VAS_COP_TYPE_FTW &&
768 				attr->rx_fifo_size < VAS_RX_FIFO_SIZE_MIN)
769 		return false;
770 
771 	if (attr->rx_fifo_size > VAS_RX_FIFO_SIZE_MAX)
772 		return false;
773 
774 	if (!attr->wcreds_max)
775 		return false;
776 
777 	if (attr->nx_win) {
778 		/* cannot be fault or user window if it is nx */
779 		if (attr->fault_win || attr->user_win)
780 			return false;
781 		/*
782 		 * Section 3.1.4.32: NX Windows must not disable notification,
783 		 *	and must not enable interrupts or early notification.
784 		 */
785 		if (attr->notify_disable || !attr->intr_disable ||
786 				attr->notify_early)
787 			return false;
788 	} else if (attr->fault_win) {
789 		/* cannot be both fault and user window */
790 		if (attr->user_win)
791 			return false;
792 
793 		/*
794 		 * Section 3.1.4.32: Fault windows must disable notification
795 		 *	but not interrupts.
796 		 */
797 		if (!attr->notify_disable || attr->intr_disable)
798 			return false;
799 
800 	} else if (attr->user_win) {
801 		/*
802 		 * User receive windows are only for fast-thread-wakeup
803 		 * (FTW). They don't need a FIFO and must disable interrupts
804 		 */
805 		if (attr->rx_fifo || attr->rx_fifo_size || !attr->intr_disable)
806 			return false;
807 	} else {
808 		/* Rx window must be one of NX or Fault or User window. */
809 		return false;
810 	}
811 
812 	return true;
813 }
814 
815 void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop)
816 {
817 	memset(rxattr, 0, sizeof(*rxattr));
818 
819 	if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI ||
820 		cop == VAS_COP_TYPE_GZIP || cop == VAS_COP_TYPE_GZIP_HIPRI) {
821 		rxattr->pin_win = true;
822 		rxattr->nx_win = true;
823 		rxattr->fault_win = false;
824 		rxattr->intr_disable = true;
825 		rxattr->rx_wcred_mode = true;
826 		rxattr->tx_wcred_mode = true;
827 		rxattr->rx_win_ord_mode = true;
828 		rxattr->tx_win_ord_mode = true;
829 	} else if (cop == VAS_COP_TYPE_FAULT) {
830 		rxattr->pin_win = true;
831 		rxattr->fault_win = true;
832 		rxattr->notify_disable = true;
833 		rxattr->rx_wcred_mode = true;
834 		rxattr->rx_win_ord_mode = true;
835 		rxattr->rej_no_credit = true;
836 		rxattr->tc_mode = VAS_THRESH_DISABLED;
837 	} else if (cop == VAS_COP_TYPE_FTW) {
838 		rxattr->user_win = true;
839 		rxattr->intr_disable = true;
840 
841 		/*
842 		 * As noted in the VAS Workbook we disable credit checks.
843 		 * If we enable credit checks in the future, we must also
844 		 * implement a mechanism to return the user credits or new
845 		 * paste operations will fail.
846 		 */
847 	}
848 }
849 EXPORT_SYMBOL_GPL(vas_init_rx_win_attr);
850 
851 struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
852 			struct vas_rx_win_attr *rxattr)
853 {
854 	struct vas_window *rxwin;
855 	struct vas_winctx winctx;
856 	struct vas_instance *vinst;
857 
858 	trace_vas_rx_win_open(current, vasid, cop, rxattr);
859 
860 	if (!rx_win_args_valid(cop, rxattr))
861 		return ERR_PTR(-EINVAL);
862 
863 	vinst = find_vas_instance(vasid);
864 	if (!vinst) {
865 		pr_devel("vasid %d not found!\n", vasid);
866 		return ERR_PTR(-EINVAL);
867 	}
868 	pr_devel("Found instance %d\n", vasid);
869 
870 	rxwin = vas_window_alloc(vinst);
871 	if (IS_ERR(rxwin)) {
872 		pr_devel("Unable to allocate memory for Rx window\n");
873 		return rxwin;
874 	}
875 
876 	rxwin->tx_win = false;
877 	rxwin->nx_win = rxattr->nx_win;
878 	rxwin->user_win = rxattr->user_win;
879 	rxwin->cop = cop;
880 	rxwin->wcreds_max = rxattr->wcreds_max;
881 
882 	init_winctx_for_rxwin(rxwin, rxattr, &winctx);
883 	init_winctx_regs(rxwin, &winctx);
884 
885 	set_vinst_win(vinst, rxwin);
886 
887 	return rxwin;
888 }
889 EXPORT_SYMBOL_GPL(vas_rx_win_open);
890 
891 void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop)
892 {
893 	memset(txattr, 0, sizeof(*txattr));
894 
895 	if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI ||
896 		cop == VAS_COP_TYPE_GZIP || cop == VAS_COP_TYPE_GZIP_HIPRI) {
897 		txattr->rej_no_credit = false;
898 		txattr->rx_wcred_mode = true;
899 		txattr->tx_wcred_mode = true;
900 		txattr->rx_win_ord_mode = true;
901 		txattr->tx_win_ord_mode = true;
902 	} else if (cop == VAS_COP_TYPE_FTW) {
903 		txattr->user_win = true;
904 	}
905 }
906 EXPORT_SYMBOL_GPL(vas_init_tx_win_attr);
907 
908 static void init_winctx_for_txwin(struct vas_window *txwin,
909 			struct vas_tx_win_attr *txattr,
910 			struct vas_winctx *winctx)
911 {
912 	/*
913 	 * We first zero all fields and only set non-zero ones. Following
914 	 * are some fields set to 0/false for the stated reason:
915 	 *
916 	 *	->notify_os_intr_reg	In powernv, send intrs to HV
917 	 *	->rsvd_txbuf_count	Not supported yet.
918 	 *	->notify_disable	False for NX windows
919 	 *	->xtra_write		False for NX windows
920 	 *	->notify_early		NA for NX windows
921 	 *	->lnotify_lpid		NA for Tx windows
922 	 *	->lnotify_pid		NA for Tx windows
923 	 *	->lnotify_tid		NA for Tx windows
924 	 *	->tx_win_cred_mode	Ignore for now for NX windows
925 	 *	->rx_win_cred_mode	Ignore for now for NX windows
926 	 */
927 	memset(winctx, 0, sizeof(struct vas_winctx));
928 
929 	winctx->wcreds_max = txwin->wcreds_max;
930 
931 	winctx->user_win = txattr->user_win;
932 	winctx->nx_win = txwin->rxwin->nx_win;
933 	winctx->pin_win = txattr->pin_win;
934 	winctx->rej_no_credit = txattr->rej_no_credit;
935 	winctx->rsvd_txbuf_enable = txattr->rsvd_txbuf_enable;
936 
937 	winctx->rx_wcred_mode = txattr->rx_wcred_mode;
938 	winctx->tx_wcred_mode = txattr->tx_wcred_mode;
939 	winctx->rx_word_mode = txattr->rx_win_ord_mode;
940 	winctx->tx_word_mode = txattr->tx_win_ord_mode;
941 	winctx->rsvd_txbuf_count = txattr->rsvd_txbuf_count;
942 
943 	winctx->intr_disable = true;
944 	if (winctx->nx_win)
945 		winctx->data_stamp = true;
946 
947 	winctx->lpid = txattr->lpid;
948 	winctx->pidr = txattr->pidr;
949 	winctx->rx_win_id = txwin->rxwin->winid;
950 	/*
951 	 * IRQ and fault window setup is successful. Set fault window
952 	 * for the send window so that ready to handle faults.
953 	 */
954 	if (txwin->vinst->virq)
955 		winctx->fault_win_id = txwin->vinst->fault_win->winid;
956 
957 	winctx->dma_type = VAS_DMA_TYPE_INJECT;
958 	winctx->tc_mode = txattr->tc_mode;
959 	winctx->min_scope = VAS_SCOPE_LOCAL;
960 	winctx->max_scope = VAS_SCOPE_VECTORED_GROUP;
961 	if (txwin->vinst->virq)
962 		winctx->irq_port = txwin->vinst->irq_port;
963 
964 	winctx->pswid = txattr->pswid ? txattr->pswid :
965 			encode_pswid(txwin->vinst->vas_id, txwin->winid);
966 }
967 
968 static bool tx_win_args_valid(enum vas_cop_type cop,
969 			struct vas_tx_win_attr *attr)
970 {
971 	if (attr->tc_mode != VAS_THRESH_DISABLED)
972 		return false;
973 
974 	if (cop > VAS_COP_TYPE_MAX)
975 		return false;
976 
977 	if (attr->wcreds_max > VAS_TX_WCREDS_MAX)
978 		return false;
979 
980 	if (attr->user_win) {
981 		if (attr->rsvd_txbuf_count)
982 			return false;
983 
984 		if (cop != VAS_COP_TYPE_FTW && cop != VAS_COP_TYPE_GZIP &&
985 			cop != VAS_COP_TYPE_GZIP_HIPRI)
986 			return false;
987 	}
988 
989 	return true;
990 }
991 
992 struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
993 			struct vas_tx_win_attr *attr)
994 {
995 	int rc;
996 	struct vas_window *txwin;
997 	struct vas_window *rxwin;
998 	struct vas_winctx winctx;
999 	struct vas_instance *vinst;
1000 
1001 	trace_vas_tx_win_open(current, vasid, cop, attr);
1002 
1003 	if (!tx_win_args_valid(cop, attr))
1004 		return ERR_PTR(-EINVAL);
1005 
1006 	/*
1007 	 * If caller did not specify a vasid but specified the PSWID of a
1008 	 * receive window (applicable only to FTW windows), use the vasid
1009 	 * from that receive window.
1010 	 */
1011 	if (vasid == -1 && attr->pswid)
1012 		decode_pswid(attr->pswid, &vasid, NULL);
1013 
1014 	vinst = find_vas_instance(vasid);
1015 	if (!vinst) {
1016 		pr_devel("vasid %d not found!\n", vasid);
1017 		return ERR_PTR(-EINVAL);
1018 	}
1019 
1020 	rxwin = get_vinst_rxwin(vinst, cop, attr->pswid);
1021 	if (IS_ERR(rxwin)) {
1022 		pr_devel("No RxWin for vasid %d, cop %d\n", vasid, cop);
1023 		return rxwin;
1024 	}
1025 
1026 	txwin = vas_window_alloc(vinst);
1027 	if (IS_ERR(txwin)) {
1028 		rc = PTR_ERR(txwin);
1029 		goto put_rxwin;
1030 	}
1031 
1032 	txwin->cop = cop;
1033 	txwin->tx_win = 1;
1034 	txwin->rxwin = rxwin;
1035 	txwin->nx_win = txwin->rxwin->nx_win;
1036 	txwin->user_win = attr->user_win;
1037 	txwin->wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT;
1038 
1039 	init_winctx_for_txwin(txwin, attr, &winctx);
1040 
1041 	init_winctx_regs(txwin, &winctx);
1042 
1043 	/*
1044 	 * If its a kernel send window, map the window address into the
1045 	 * kernel's address space. For user windows, user must issue an
1046 	 * mmap() to map the window into their address space.
1047 	 *
1048 	 * NOTE: If kernel ever resubmits a user CRB after handling a page
1049 	 *	 fault, we will need to map this into kernel as well.
1050 	 */
1051 	if (!txwin->user_win) {
1052 		txwin->paste_kaddr = map_paste_region(txwin);
1053 		if (IS_ERR(txwin->paste_kaddr)) {
1054 			rc = PTR_ERR(txwin->paste_kaddr);
1055 			goto free_window;
1056 		}
1057 	} else {
1058 		/*
1059 		 * Interrupt hanlder or fault window setup failed. Means
1060 		 * NX can not generate fault for page fault. So not
1061 		 * opening for user space tx window.
1062 		 */
1063 		if (!vinst->virq) {
1064 			rc = -ENODEV;
1065 			goto free_window;
1066 		}
1067 
1068 		/*
1069 		 * Window opened by a child thread may not be closed when
1070 		 * it exits. So take reference to its pid and release it
1071 		 * when the window is free by parent thread.
1072 		 * Acquire a reference to the task's pid to make sure
1073 		 * pid will not be re-used - needed only for multithread
1074 		 * applications.
1075 		 */
1076 		txwin->pid = get_task_pid(current, PIDTYPE_PID);
1077 		/*
1078 		 * Acquire a reference to the task's mm.
1079 		 */
1080 		txwin->mm = get_task_mm(current);
1081 
1082 		if (!txwin->mm) {
1083 			put_pid(txwin->pid);
1084 			pr_err("VAS: pid(%d): mm_struct is not found\n",
1085 					current->pid);
1086 			rc = -EPERM;
1087 			goto free_window;
1088 		}
1089 
1090 		mmgrab(txwin->mm);
1091 		mmput(txwin->mm);
1092 		mm_context_add_vas_window(txwin->mm);
1093 		/*
1094 		 * Process closes window during exit. In the case of
1095 		 * multithread application, the child thread can open
1096 		 * window and can exit without closing it. so takes tgid
1097 		 * reference until window closed to make sure tgid is not
1098 		 * reused.
1099 		 */
1100 		txwin->tgid = find_get_pid(task_tgid_vnr(current));
1101 		/*
1102 		 * Even a process that has no foreign real address mapping can
1103 		 * use an unpaired COPY instruction (to no real effect). Issue
1104 		 * CP_ABORT to clear any pending COPY and prevent a covert
1105 		 * channel.
1106 		 *
1107 		 * __switch_to() will issue CP_ABORT on future context switches
1108 		 * if process / thread has any open VAS window (Use
1109 		 * current->mm->context.vas_windows).
1110 		 */
1111 		asm volatile(PPC_CP_ABORT);
1112 	}
1113 
1114 	set_vinst_win(vinst, txwin);
1115 
1116 	return txwin;
1117 
1118 free_window:
1119 	vas_window_free(txwin);
1120 
1121 put_rxwin:
1122 	put_rx_win(rxwin);
1123 	return ERR_PTR(rc);
1124 
1125 }
1126 EXPORT_SYMBOL_GPL(vas_tx_win_open);
1127 
1128 int vas_copy_crb(void *crb, int offset)
1129 {
1130 	return vas_copy(crb, offset);
1131 }
1132 EXPORT_SYMBOL_GPL(vas_copy_crb);
1133 
1134 #define RMA_LSMP_REPORT_ENABLE PPC_BIT(53)
1135 int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
1136 {
1137 	int rc;
1138 	void *addr;
1139 	uint64_t val;
1140 
1141 	trace_vas_paste_crb(current, txwin);
1142 
1143 	/*
1144 	 * Only NX windows are supported for now and hardware assumes
1145 	 * report-enable flag is set for NX windows. Ensure software
1146 	 * complies too.
1147 	 */
1148 	WARN_ON_ONCE(txwin->nx_win && !re);
1149 
1150 	addr = txwin->paste_kaddr;
1151 	if (re) {
1152 		/*
1153 		 * Set the REPORT_ENABLE bit (equivalent to writing
1154 		 * to 1K offset of the paste address)
1155 		 */
1156 		val = SET_FIELD(RMA_LSMP_REPORT_ENABLE, 0ULL, 1);
1157 		addr += val;
1158 	}
1159 
1160 	/*
1161 	 * Map the raw CR value from vas_paste() to an error code (there
1162 	 * is just pass or fail for now though).
1163 	 */
1164 	rc = vas_paste(addr, offset);
1165 	if (rc == 2)
1166 		rc = 0;
1167 	else
1168 		rc = -EINVAL;
1169 
1170 	pr_debug("Txwin #%d: Msg count %llu\n", txwin->winid,
1171 			read_hvwc_reg(txwin, VREG(LRFIFO_PUSH)));
1172 
1173 	return rc;
1174 }
1175 EXPORT_SYMBOL_GPL(vas_paste_crb);
1176 
1177 /*
1178  * If credit checking is enabled for this window, poll for the return
1179  * of window credits (i.e for NX engines to process any outstanding CRBs).
1180  * Since NX-842 waits for the CRBs to be processed before closing the
1181  * window, we should not have to wait for too long.
1182  *
1183  * TODO: We retry in 10ms intervals now. We could/should probably peek at
1184  *	the VAS_LRFIFO_PUSH_OFFSET register to get an estimate of pending
1185  *	CRBs on the FIFO and compute the delay dynamically on each retry.
1186  *	But that is not really needed until we support NX-GZIP access from
1187  *	user space. (NX-842 driver waits for CSB and Fast thread-wakeup
1188  *	doesn't use credit checking).
1189  */
1190 static void poll_window_credits(struct vas_window *window)
1191 {
1192 	u64 val;
1193 	int creds, mode;
1194 	int count = 0;
1195 
1196 	val = read_hvwc_reg(window, VREG(WINCTL));
1197 	if (window->tx_win)
1198 		mode = GET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val);
1199 	else
1200 		mode = GET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val);
1201 
1202 	if (!mode)
1203 		return;
1204 retry:
1205 	if (window->tx_win) {
1206 		val = read_hvwc_reg(window, VREG(TX_WCRED));
1207 		creds = GET_FIELD(VAS_TX_WCRED, val);
1208 	} else {
1209 		val = read_hvwc_reg(window, VREG(LRX_WCRED));
1210 		creds = GET_FIELD(VAS_LRX_WCRED, val);
1211 	}
1212 
1213 	/*
1214 	 * Takes around few milliseconds to complete all pending requests
1215 	 * and return credits.
1216 	 * TODO: Scan fault FIFO and invalidate CRBs points to this window
1217 	 *       and issue CRB Kill to stop all pending requests. Need only
1218 	 *       if there is a bug in NX or fault handling in kernel.
1219 	 */
1220 	if (creds < window->wcreds_max) {
1221 		val = 0;
1222 		set_current_state(TASK_UNINTERRUPTIBLE);
1223 		schedule_timeout(msecs_to_jiffies(10));
1224 		count++;
1225 		/*
1226 		 * Process can not close send window until all credits are
1227 		 * returned.
1228 		 */
1229 		if (!(count % 1000))
1230 			pr_warn_ratelimited("VAS: pid %d stuck. Waiting for credits returned for Window(%d). creds %d, Retries %d\n",
1231 				vas_window_pid(window), window->winid,
1232 				creds, count);
1233 
1234 		goto retry;
1235 	}
1236 }
1237 
1238 /*
1239  * Wait for the window to go to "not-busy" state. It should only take a
1240  * short time to queue a CRB, so window should not be busy for too long.
1241  * Trying 5ms intervals.
1242  */
1243 static void poll_window_busy_state(struct vas_window *window)
1244 {
1245 	int busy;
1246 	u64 val;
1247 	int count = 0;
1248 
1249 retry:
1250 	val = read_hvwc_reg(window, VREG(WIN_STATUS));
1251 	busy = GET_FIELD(VAS_WIN_BUSY, val);
1252 	if (busy) {
1253 		val = 0;
1254 		set_current_state(TASK_UNINTERRUPTIBLE);
1255 		schedule_timeout(msecs_to_jiffies(10));
1256 		count++;
1257 		/*
1258 		 * Takes around few milliseconds to process all pending
1259 		 * requests.
1260 		 */
1261 		if (!(count % 1000))
1262 			pr_warn_ratelimited("VAS: pid %d stuck. Window (ID=%d) is in busy state. Retries %d\n",
1263 				vas_window_pid(window), window->winid, count);
1264 
1265 		goto retry;
1266 	}
1267 }
1268 
1269 /*
1270  * Have the hardware cast a window out of cache and wait for it to
1271  * be completed.
1272  *
1273  * NOTE: It can take a relatively long time to cast the window context
1274  *	out of the cache. It is not strictly necessary to cast out if:
1275  *
1276  *	- we clear the "Pin Window" bit (so hardware is free to evict)
1277  *
1278  *	- we re-initialize the window context when it is reassigned.
1279  *
1280  *	We do the former in vas_win_close() and latter in vas_win_open().
1281  *	So, ignoring the cast-out for now. We can add it as needed. If
1282  *	casting out becomes necessary we should consider offloading the
1283  *	job to a worker thread, so the window close can proceed quickly.
1284  */
1285 static void poll_window_castout(struct vas_window *window)
1286 {
1287 	/* stub for now */
1288 }
1289 
1290 /*
1291  * Unpin and close a window so no new requests are accepted and the
1292  * hardware can evict this window from cache if necessary.
1293  */
1294 static void unpin_close_window(struct vas_window *window)
1295 {
1296 	u64 val;
1297 
1298 	val = read_hvwc_reg(window, VREG(WINCTL));
1299 	val = SET_FIELD(VAS_WINCTL_PIN, val, 0);
1300 	val = SET_FIELD(VAS_WINCTL_OPEN, val, 0);
1301 	write_hvwc_reg(window, VREG(WINCTL), val);
1302 }
1303 
1304 /*
1305  * Close a window.
1306  *
1307  * See Section 1.12.1 of VAS workbook v1.05 for details on closing window:
1308  *	- Disable new paste operations (unmap paste address)
1309  *	- Poll for the "Window Busy" bit to be cleared
1310  *	- Clear the Open/Enable bit for the Window.
1311  *	- Poll for return of window Credits (implies FIFO empty for Rx win?)
1312  *	- Unpin and cast window context out of cache
1313  *
1314  * Besides the hardware, kernel has some bookkeeping of course.
1315  */
1316 int vas_win_close(struct vas_window *window)
1317 {
1318 	if (!window)
1319 		return 0;
1320 
1321 	if (!window->tx_win && atomic_read(&window->num_txwins) != 0) {
1322 		pr_devel("Attempting to close an active Rx window!\n");
1323 		WARN_ON_ONCE(1);
1324 		return -EBUSY;
1325 	}
1326 
1327 	unmap_paste_region(window);
1328 
1329 	poll_window_busy_state(window);
1330 
1331 	unpin_close_window(window);
1332 
1333 	poll_window_credits(window);
1334 
1335 	clear_vinst_win(window);
1336 
1337 	poll_window_castout(window);
1338 
1339 	/* if send window, drop reference to matching receive window */
1340 	if (window->tx_win) {
1341 		if (window->user_win) {
1342 			/* Drop references to pid. tgid and mm */
1343 			put_pid(window->pid);
1344 			put_pid(window->tgid);
1345 			if (window->mm) {
1346 				mm_context_remove_vas_window(window->mm);
1347 				mmdrop(window->mm);
1348 			}
1349 		}
1350 		put_rx_win(window->rxwin);
1351 	}
1352 
1353 	vas_window_free(window);
1354 
1355 	return 0;
1356 }
1357 EXPORT_SYMBOL_GPL(vas_win_close);
1358 
1359 /*
1360  * Return credit for the given window.
1361  * Send windows and fault window uses credit mechanism as follows:
1362  *
1363  * Send windows:
1364  * - The default number of credits available for each send window is
1365  *   1024. It means 1024 requests can be issued asynchronously at the
1366  *   same time. If the credit is not available, that request will be
1367  *   returned with RMA_Busy.
1368  * - One credit is taken when NX request is issued.
1369  * - This credit is returned after NX processed that request.
1370  * - If NX encounters translation error, kernel will return the
1371  *   credit on the specific send window after processing the fault CRB.
1372  *
1373  * Fault window:
1374  * - The total number credits available is FIFO_SIZE/CRB_SIZE.
1375  *   Means 4MB/128 in the current implementation. If credit is not
1376  *   available, RMA_Reject is returned.
1377  * - A credit is taken when NX pastes CRB in fault FIFO.
1378  * - The kernel with return credit on fault window after reading entry
1379  *   from fault FIFO.
1380  */
1381 void vas_return_credit(struct vas_window *window, bool tx)
1382 {
1383 	uint64_t val;
1384 
1385 	val = 0ULL;
1386 	if (tx) { /* send window */
1387 		val = SET_FIELD(VAS_TX_WCRED, val, 1);
1388 		write_hvwc_reg(window, VREG(TX_WCRED_ADDER), val);
1389 	} else {
1390 		val = SET_FIELD(VAS_LRX_WCRED, val, 1);
1391 		write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), val);
1392 	}
1393 }
1394 
1395 struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
1396 		uint32_t pswid)
1397 {
1398 	struct vas_window *window;
1399 	int winid;
1400 
1401 	if (!pswid) {
1402 		pr_devel("%s: called for pswid 0!\n", __func__);
1403 		return ERR_PTR(-ESRCH);
1404 	}
1405 
1406 	decode_pswid(pswid, NULL, &winid);
1407 
1408 	if (winid >= VAS_WINDOWS_PER_CHIP)
1409 		return ERR_PTR(-ESRCH);
1410 
1411 	/*
1412 	 * If application closes the window before the hardware
1413 	 * returns the fault CRB, we should wait in vas_win_close()
1414 	 * for the pending requests. so the window must be active
1415 	 * and the process alive.
1416 	 *
1417 	 * If its a kernel process, we should not get any faults and
1418 	 * should not get here.
1419 	 */
1420 	window = vinst->windows[winid];
1421 
1422 	if (!window) {
1423 		pr_err("PSWID decode: Could not find window for winid %d pswid %d vinst 0x%p\n",
1424 			winid, pswid, vinst);
1425 		return NULL;
1426 	}
1427 
1428 	/*
1429 	 * Do some sanity checks on the decoded window.  Window should be
1430 	 * NX GZIP user send window. FTW windows should not incur faults
1431 	 * since their CRBs are ignored (not queued on FIFO or processed
1432 	 * by NX).
1433 	 */
1434 	if (!window->tx_win || !window->user_win || !window->nx_win ||
1435 			window->cop == VAS_COP_TYPE_FAULT ||
1436 			window->cop == VAS_COP_TYPE_FTW) {
1437 		pr_err("PSWID decode: id %d, tx %d, user %d, nx %d, cop %d\n",
1438 			winid, window->tx_win, window->user_win,
1439 			window->nx_win, window->cop);
1440 		WARN_ON(1);
1441 	}
1442 
1443 	return window;
1444 }
1445