1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/delay.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/xarray.h>
39 #include "mlx5_core.h"
40 #include "lib/eq.h"
41 #include "lib/tout.h"
42 
43 enum {
44 	MLX5_PAGES_CANT_GIVE	= 0,
45 	MLX5_PAGES_GIVE		= 1,
46 	MLX5_PAGES_TAKE		= 2
47 };
48 
49 struct mlx5_pages_req {
50 	struct mlx5_core_dev *dev;
51 	u16	func_id;
52 	u8	ec_function;
53 	s32	npages;
54 	struct work_struct work;
55 	u8	release_all;
56 };
57 
58 struct fw_page {
59 	struct rb_node		rb_node;
60 	u64			addr;
61 	struct page	       *page;
62 	u32			function;
63 	unsigned long		bitmask;
64 	struct list_head	list;
65 	unsigned int free_count;
66 };
67 
68 enum {
69 	MLX5_MAX_RECLAIM_TIME_MILI	= 5000,
70 	MLX5_NUM_4K_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
71 };
72 
73 static u32 get_function(u16 func_id, bool ec_function)
74 {
75 	return (u32)func_id | (ec_function << 16);
76 }
77 
78 static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
79 {
80 	struct rb_root *root;
81 	int err;
82 
83 	root = xa_load(&dev->priv.page_root_xa, function);
84 	if (root)
85 		return root;
86 
87 	root = kzalloc(sizeof(*root), GFP_KERNEL);
88 	if (!root)
89 		return ERR_PTR(-ENOMEM);
90 
91 	err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
92 	if (err) {
93 		kfree(root);
94 		return ERR_PTR(err);
95 	}
96 
97 	*root = RB_ROOT;
98 
99 	return root;
100 }
101 
102 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
103 {
104 	struct rb_node *parent = NULL;
105 	struct rb_root *root;
106 	struct rb_node **new;
107 	struct fw_page *nfp;
108 	struct fw_page *tfp;
109 	int i;
110 
111 	root = page_root_per_function(dev, function);
112 	if (IS_ERR(root))
113 		return PTR_ERR(root);
114 
115 	new = &root->rb_node;
116 
117 	while (*new) {
118 		parent = *new;
119 		tfp = rb_entry(parent, struct fw_page, rb_node);
120 		if (tfp->addr < addr)
121 			new = &parent->rb_left;
122 		else if (tfp->addr > addr)
123 			new = &parent->rb_right;
124 		else
125 			return -EEXIST;
126 	}
127 
128 	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
129 	if (!nfp)
130 		return -ENOMEM;
131 
132 	nfp->addr = addr;
133 	nfp->page = page;
134 	nfp->function = function;
135 	nfp->free_count = MLX5_NUM_4K_IN_PAGE;
136 	for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
137 		set_bit(i, &nfp->bitmask);
138 
139 	rb_link_node(&nfp->rb_node, parent, new);
140 	rb_insert_color(&nfp->rb_node, root);
141 	list_add(&nfp->list, &dev->priv.free_list);
142 
143 	return 0;
144 }
145 
146 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
147 				    u32 function)
148 {
149 	struct fw_page *result = NULL;
150 	struct rb_root *root;
151 	struct rb_node *tmp;
152 	struct fw_page *tfp;
153 
154 	root = xa_load(&dev->priv.page_root_xa, function);
155 	if (WARN_ON_ONCE(!root))
156 		return NULL;
157 
158 	tmp = root->rb_node;
159 
160 	while (tmp) {
161 		tfp = rb_entry(tmp, struct fw_page, rb_node);
162 		if (tfp->addr < addr) {
163 			tmp = tmp->rb_left;
164 		} else if (tfp->addr > addr) {
165 			tmp = tmp->rb_right;
166 		} else {
167 			result = tfp;
168 			break;
169 		}
170 	}
171 
172 	return result;
173 }
174 
175 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
176 				s32 *npages, int boot)
177 {
178 	u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
179 	u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
180 	int err;
181 
182 	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
183 	MLX5_SET(query_pages_in, in, op_mod, boot ?
184 		 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
185 		 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
186 	MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
187 
188 	err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
189 	if (err)
190 		return err;
191 
192 	*npages = MLX5_GET(query_pages_out, out, num_pages);
193 	*func_id = MLX5_GET(query_pages_out, out, function_id);
194 
195 	return err;
196 }
197 
198 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
199 {
200 	struct fw_page *fp = NULL;
201 	struct fw_page *iter;
202 	unsigned n;
203 
204 	list_for_each_entry(iter, &dev->priv.free_list, list) {
205 		if (iter->function != function)
206 			continue;
207 		fp = iter;
208 	}
209 
210 	if (list_empty(&dev->priv.free_list) || !fp)
211 		return -ENOMEM;
212 
213 	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
214 	if (n >= MLX5_NUM_4K_IN_PAGE) {
215 		mlx5_core_warn(dev, "alloc 4k bug\n");
216 		return -ENOENT;
217 	}
218 	clear_bit(n, &fp->bitmask);
219 	fp->free_count--;
220 	if (!fp->free_count)
221 		list_del(&fp->list);
222 
223 	*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
224 
225 	return 0;
226 }
227 
228 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
229 
230 static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
231 		     bool in_free_list)
232 {
233 	struct rb_root *root;
234 
235 	root = xa_load(&dev->priv.page_root_xa, fwp->function);
236 	if (WARN_ON_ONCE(!root))
237 		return;
238 
239 	rb_erase(&fwp->rb_node, root);
240 	if (in_free_list)
241 		list_del(&fwp->list);
242 	dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
243 		       PAGE_SIZE, DMA_BIDIRECTIONAL);
244 	__free_page(fwp->page);
245 	kfree(fwp);
246 }
247 
248 static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
249 {
250 	struct fw_page *fwp;
251 	int n;
252 
253 	fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
254 	if (!fwp) {
255 		mlx5_core_warn_rl(dev, "page not found\n");
256 		return;
257 	}
258 	n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
259 	fwp->free_count++;
260 	set_bit(n, &fwp->bitmask);
261 	if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
262 		free_fwp(dev, fwp, fwp->free_count != 1);
263 	else if (fwp->free_count == 1)
264 		list_add(&fwp->list, &dev->priv.free_list);
265 }
266 
267 static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
268 {
269 	struct device *device = mlx5_core_dma_dev(dev);
270 	int nid = dev_to_node(device);
271 	struct page *page;
272 	u64 zero_addr = 1;
273 	u64 addr;
274 	int err;
275 
276 	page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
277 	if (!page) {
278 		mlx5_core_warn(dev, "failed to allocate page\n");
279 		return -ENOMEM;
280 	}
281 map:
282 	addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
283 	if (dma_mapping_error(device, addr)) {
284 		mlx5_core_warn(dev, "failed dma mapping page\n");
285 		err = -ENOMEM;
286 		goto err_mapping;
287 	}
288 
289 	/* Firmware doesn't support page with physical address 0 */
290 	if (addr == 0) {
291 		zero_addr = addr;
292 		goto map;
293 	}
294 
295 	err = insert_page(dev, addr, page, function);
296 	if (err) {
297 		mlx5_core_err(dev, "failed to track allocated page\n");
298 		dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
299 	}
300 
301 err_mapping:
302 	if (err)
303 		__free_page(page);
304 
305 	if (zero_addr == 0)
306 		dma_unmap_page(device, zero_addr, PAGE_SIZE,
307 			       DMA_BIDIRECTIONAL);
308 
309 	return err;
310 }
311 
312 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
313 			     bool ec_function)
314 {
315 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
316 	int err;
317 
318 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
319 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
320 	MLX5_SET(manage_pages_in, in, function_id, func_id);
321 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
322 
323 	err = mlx5_cmd_exec_in(dev, manage_pages, in);
324 	if (err)
325 		mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
326 			       func_id, err);
327 }
328 
329 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
330 		      int notify_fail, bool ec_function)
331 {
332 	u32 function = get_function(func_id, ec_function);
333 	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
334 	int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
335 	u64 addr;
336 	int err;
337 	u32 *in;
338 	int i;
339 
340 	inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
341 	in = kvzalloc(inlen, GFP_KERNEL);
342 	if (!in) {
343 		err = -ENOMEM;
344 		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
345 		goto out_free;
346 	}
347 
348 	for (i = 0; i < npages; i++) {
349 retry:
350 		err = alloc_4k(dev, &addr, function);
351 		if (err) {
352 			if (err == -ENOMEM)
353 				err = alloc_system_page(dev, function);
354 			if (err)
355 				goto out_4k;
356 
357 			goto retry;
358 		}
359 		MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
360 	}
361 
362 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
363 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
364 	MLX5_SET(manage_pages_in, in, function_id, func_id);
365 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
366 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
367 
368 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
369 	if (err) {
370 		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
371 			       func_id, npages, err);
372 		goto out_4k;
373 	}
374 
375 	dev->priv.fw_pages += npages;
376 	if (func_id)
377 		dev->priv.vfs_pages += npages;
378 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
379 		dev->priv.host_pf_pages += npages;
380 
381 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
382 		      npages, ec_function, func_id, err);
383 
384 	kvfree(in);
385 	return 0;
386 
387 out_4k:
388 	for (i--; i >= 0; i--)
389 		free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
390 out_free:
391 	kvfree(in);
392 	if (notify_fail)
393 		page_notify_fail(dev, func_id, ec_function);
394 	return err;
395 }
396 
397 static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
398 			      bool ec_function)
399 {
400 	u32 function = get_function(func_id, ec_function);
401 	struct rb_root *root;
402 	struct rb_node *p;
403 	int npages = 0;
404 
405 	root = xa_load(&dev->priv.page_root_xa, function);
406 	if (WARN_ON_ONCE(!root))
407 		return;
408 
409 	p = rb_first(root);
410 	while (p) {
411 		struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
412 
413 		p = rb_next(p);
414 		npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
415 		free_fwp(dev, fwp, fwp->free_count);
416 	}
417 
418 	dev->priv.fw_pages -= npages;
419 	if (func_id)
420 		dev->priv.vfs_pages -= npages;
421 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
422 		dev->priv.host_pf_pages -= npages;
423 
424 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
425 		      npages, ec_function, func_id);
426 }
427 
428 static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
429 				     u32 npages)
430 {
431 	u32 pages_set = 0;
432 	unsigned int n;
433 
434 	for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
435 		MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
436 				 fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
437 		pages_set++;
438 
439 		if (!--npages)
440 			break;
441 	}
442 
443 	return pages_set;
444 }
445 
446 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
447 			     u32 *in, int in_size, u32 *out, int out_size)
448 {
449 	struct rb_root *root;
450 	struct fw_page *fwp;
451 	struct rb_node *p;
452 	bool ec_function;
453 	u32 func_id;
454 	u32 npages;
455 	u32 i = 0;
456 
457 	if (!mlx5_cmd_is_down(dev))
458 		return mlx5_cmd_exec(dev, in, in_size, out, out_size);
459 
460 	/* No hard feelings, we want our pages back! */
461 	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
462 	func_id = MLX5_GET(manage_pages_in, in, function_id);
463 	ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
464 
465 	root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
466 	if (WARN_ON_ONCE(!root))
467 		return -EEXIST;
468 
469 	p = rb_first(root);
470 	while (p && i < npages) {
471 		fwp = rb_entry(p, struct fw_page, rb_node);
472 		p = rb_next(p);
473 
474 		i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
475 	}
476 
477 	MLX5_SET(manage_pages_out, out, output_num_entries, i);
478 	return 0;
479 }
480 
481 static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
482 			 int *nclaimed, bool ec_function)
483 {
484 	u32 function = get_function(func_id, ec_function);
485 	int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
486 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
487 	int num_claimed;
488 	u32 *out;
489 	int err;
490 	int i;
491 
492 	if (nclaimed)
493 		*nclaimed = 0;
494 
495 	outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
496 	out = kvzalloc(outlen, GFP_KERNEL);
497 	if (!out)
498 		return -ENOMEM;
499 
500 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
501 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
502 	MLX5_SET(manage_pages_in, in, function_id, func_id);
503 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
504 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
505 
506 	mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
507 		      func_id, npages, outlen);
508 	err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
509 	if (err) {
510 		mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
511 		goto out_free;
512 	}
513 
514 	num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
515 	if (num_claimed > npages) {
516 		mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
517 			       num_claimed, npages);
518 		err = -EINVAL;
519 		goto out_free;
520 	}
521 
522 	for (i = 0; i < num_claimed; i++)
523 		free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
524 
525 	if (nclaimed)
526 		*nclaimed = num_claimed;
527 
528 	dev->priv.fw_pages -= num_claimed;
529 	if (func_id)
530 		dev->priv.vfs_pages -= num_claimed;
531 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
532 		dev->priv.host_pf_pages -= num_claimed;
533 
534 out_free:
535 	kvfree(out);
536 	return err;
537 }
538 
539 static void pages_work_handler(struct work_struct *work)
540 {
541 	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
542 	struct mlx5_core_dev *dev = req->dev;
543 	int err = 0;
544 
545 	if (req->release_all)
546 		release_all_pages(dev, req->func_id, req->ec_function);
547 	else if (req->npages < 0)
548 		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
549 				    req->ec_function);
550 	else if (req->npages > 0)
551 		err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
552 
553 	if (err)
554 		mlx5_core_warn(dev, "%s fail %d\n",
555 			       req->npages < 0 ? "reclaim" : "give", err);
556 
557 	kfree(req);
558 }
559 
560 enum {
561 	EC_FUNCTION_MASK = 0x8000,
562 	RELEASE_ALL_PAGES_MASK = 0x4000,
563 };
564 
565 static int req_pages_handler(struct notifier_block *nb,
566 			     unsigned long type, void *data)
567 {
568 	struct mlx5_pages_req *req;
569 	struct mlx5_core_dev *dev;
570 	struct mlx5_priv *priv;
571 	struct mlx5_eqe *eqe;
572 	bool ec_function;
573 	bool release_all;
574 	u16 func_id;
575 	s32 npages;
576 
577 	priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
578 	dev  = container_of(priv, struct mlx5_core_dev, priv);
579 	eqe  = data;
580 
581 	func_id = be16_to_cpu(eqe->data.req_pages.func_id);
582 	npages  = be32_to_cpu(eqe->data.req_pages.num_pages);
583 	ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
584 	release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
585 		      RELEASE_ALL_PAGES_MASK;
586 	mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
587 		      func_id, npages, release_all);
588 	req = kzalloc(sizeof(*req), GFP_ATOMIC);
589 	if (!req) {
590 		mlx5_core_warn(dev, "failed to allocate pages request\n");
591 		return NOTIFY_DONE;
592 	}
593 
594 	req->dev = dev;
595 	req->func_id = func_id;
596 	req->npages = npages;
597 	req->ec_function = ec_function;
598 	req->release_all = release_all;
599 	INIT_WORK(&req->work, pages_work_handler);
600 	queue_work(dev->priv.pg_wq, &req->work);
601 	return NOTIFY_OK;
602 }
603 
604 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
605 {
606 	u16 func_id;
607 	s32 npages;
608 	int err;
609 
610 	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
611 	if (err)
612 		return err;
613 
614 	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
615 		      npages, boot ? "boot" : "init", func_id);
616 
617 	return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
618 }
619 
620 enum {
621 	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
622 };
623 
624 static int optimal_reclaimed_pages(void)
625 {
626 	struct mlx5_cmd_prot_block *block;
627 	struct mlx5_cmd_layout *lay;
628 	int ret;
629 
630 	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
631 	       MLX5_ST_SZ_BYTES(manage_pages_out)) /
632 	       MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
633 
634 	return ret;
635 }
636 
637 static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
638 				   struct rb_root *root, u16 func_id)
639 {
640 	u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
641 	unsigned long end = jiffies + recl_pages_to_jiffies;
642 
643 	while (!RB_EMPTY_ROOT(root)) {
644 		int nclaimed;
645 		int err;
646 
647 		err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
648 				    &nclaimed, mlx5_core_is_ecpf(dev));
649 		if (err) {
650 			mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
651 				       err, func_id);
652 			return err;
653 		}
654 
655 		if (nclaimed)
656 			end = jiffies + recl_pages_to_jiffies;
657 
658 		if (time_after(jiffies, end)) {
659 			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
660 			break;
661 		}
662 	}
663 
664 	return 0;
665 }
666 
667 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
668 {
669 	struct rb_root *root;
670 	unsigned long id;
671 	void *entry;
672 
673 	xa_for_each(&dev->priv.page_root_xa, id, entry) {
674 		root = entry;
675 		mlx5_reclaim_root_pages(dev, root, id);
676 		xa_erase(&dev->priv.page_root_xa, id);
677 		kfree(root);
678 	}
679 
680 	WARN_ON(!xa_empty(&dev->priv.page_root_xa));
681 
682 	WARN(dev->priv.fw_pages,
683 	     "FW pages counter is %d after reclaiming all pages\n",
684 	     dev->priv.fw_pages);
685 	WARN(dev->priv.vfs_pages,
686 	     "VFs FW pages counter is %d after reclaiming all pages\n",
687 	     dev->priv.vfs_pages);
688 	WARN(dev->priv.host_pf_pages,
689 	     "External host PF FW pages counter is %d after reclaiming all pages\n",
690 	     dev->priv.host_pf_pages);
691 
692 	return 0;
693 }
694 
695 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
696 {
697 	INIT_LIST_HEAD(&dev->priv.free_list);
698 	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
699 	if (!dev->priv.pg_wq)
700 		return -ENOMEM;
701 
702 	xa_init(&dev->priv.page_root_xa);
703 
704 	return 0;
705 }
706 
707 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
708 {
709 	xa_destroy(&dev->priv.page_root_xa);
710 	destroy_workqueue(dev->priv.pg_wq);
711 }
712 
713 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
714 {
715 	MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
716 	mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
717 }
718 
719 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
720 {
721 	mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
722 	flush_workqueue(dev->priv.pg_wq);
723 }
724 
725 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
726 {
727 	u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
728 	unsigned long end = jiffies + recl_vf_pages_to_jiffies;
729 	int prev_pages = *pages;
730 
731 	/* In case of internal error we will free the pages manually later */
732 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
733 		mlx5_core_warn(dev, "Skipping wait for vf pages stage");
734 		return 0;
735 	}
736 
737 	mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
738 	while (*pages) {
739 		if (time_after(jiffies, end)) {
740 			mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
741 			return -ETIMEDOUT;
742 		}
743 		if (*pages < prev_pages) {
744 			end = jiffies + recl_vf_pages_to_jiffies;
745 			prev_pages = *pages;
746 		}
747 		msleep(50);
748 	}
749 
750 	mlx5_core_dbg(dev, "All pages received\n");
751 	return 0;
752 }
753