1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/delay.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/xarray.h>
38 #include "mlx5_core.h"
39 #include "lib/eq.h"
40 #include "lib/tout.h"
41 
42 enum {
43 	MLX5_PAGES_CANT_GIVE	= 0,
44 	MLX5_PAGES_GIVE		= 1,
45 	MLX5_PAGES_TAKE		= 2
46 };
47 
48 struct mlx5_pages_req {
49 	struct mlx5_core_dev *dev;
50 	u16	func_id;
51 	u8	ec_function;
52 	s32	npages;
53 	struct work_struct work;
54 	u8	release_all;
55 };
56 
57 struct fw_page {
58 	struct rb_node		rb_node;
59 	u64			addr;
60 	struct page	       *page;
61 	u32			function;
62 	unsigned long		bitmask;
63 	struct list_head	list;
64 	unsigned int free_count;
65 };
66 
67 enum {
68 	MLX5_MAX_RECLAIM_TIME_MILI	= 5000,
69 	MLX5_NUM_4K_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
70 };
71 
72 static u32 get_function(u16 func_id, bool ec_function)
73 {
74 	return (u32)func_id | (ec_function << 16);
75 }
76 
77 static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
78 {
79 	if (!func_id)
80 		return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
81 
82 	return func_id <= mlx5_core_max_vfs(dev) ?  MLX5_VF : MLX5_SF;
83 }
84 
85 static u32 mlx5_get_ec_function(u32 function)
86 {
87 	return function >> 16;
88 }
89 
90 static u32 mlx5_get_func_id(u32 function)
91 {
92 	return function & 0xffff;
93 }
94 
95 static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
96 {
97 	struct rb_root *root;
98 	int err;
99 
100 	root = xa_load(&dev->priv.page_root_xa, function);
101 	if (root)
102 		return root;
103 
104 	root = kzalloc(sizeof(*root), GFP_KERNEL);
105 	if (!root)
106 		return ERR_PTR(-ENOMEM);
107 
108 	err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
109 	if (err) {
110 		kfree(root);
111 		return ERR_PTR(err);
112 	}
113 
114 	*root = RB_ROOT;
115 
116 	return root;
117 }
118 
119 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
120 {
121 	struct rb_node *parent = NULL;
122 	struct rb_root *root;
123 	struct rb_node **new;
124 	struct fw_page *nfp;
125 	struct fw_page *tfp;
126 	int i;
127 
128 	root = page_root_per_function(dev, function);
129 	if (IS_ERR(root))
130 		return PTR_ERR(root);
131 
132 	new = &root->rb_node;
133 
134 	while (*new) {
135 		parent = *new;
136 		tfp = rb_entry(parent, struct fw_page, rb_node);
137 		if (tfp->addr < addr)
138 			new = &parent->rb_left;
139 		else if (tfp->addr > addr)
140 			new = &parent->rb_right;
141 		else
142 			return -EEXIST;
143 	}
144 
145 	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
146 	if (!nfp)
147 		return -ENOMEM;
148 
149 	nfp->addr = addr;
150 	nfp->page = page;
151 	nfp->function = function;
152 	nfp->free_count = MLX5_NUM_4K_IN_PAGE;
153 	for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
154 		set_bit(i, &nfp->bitmask);
155 
156 	rb_link_node(&nfp->rb_node, parent, new);
157 	rb_insert_color(&nfp->rb_node, root);
158 	list_add(&nfp->list, &dev->priv.free_list);
159 
160 	return 0;
161 }
162 
163 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
164 				    u32 function)
165 {
166 	struct fw_page *result = NULL;
167 	struct rb_root *root;
168 	struct rb_node *tmp;
169 	struct fw_page *tfp;
170 
171 	root = xa_load(&dev->priv.page_root_xa, function);
172 	if (WARN_ON_ONCE(!root))
173 		return NULL;
174 
175 	tmp = root->rb_node;
176 
177 	while (tmp) {
178 		tfp = rb_entry(tmp, struct fw_page, rb_node);
179 		if (tfp->addr < addr) {
180 			tmp = tmp->rb_left;
181 		} else if (tfp->addr > addr) {
182 			tmp = tmp->rb_right;
183 		} else {
184 			result = tfp;
185 			break;
186 		}
187 	}
188 
189 	return result;
190 }
191 
192 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
193 				s32 *npages, int boot)
194 {
195 	u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
196 	u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
197 	int err;
198 
199 	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
200 	MLX5_SET(query_pages_in, in, op_mod, boot ?
201 		 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
202 		 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
203 	MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
204 
205 	err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
206 	if (err)
207 		return err;
208 
209 	*npages = MLX5_GET(query_pages_out, out, num_pages);
210 	*func_id = MLX5_GET(query_pages_out, out, function_id);
211 
212 	return err;
213 }
214 
215 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
216 {
217 	struct fw_page *fp = NULL;
218 	struct fw_page *iter;
219 	unsigned n;
220 
221 	list_for_each_entry(iter, &dev->priv.free_list, list) {
222 		if (iter->function != function)
223 			continue;
224 		fp = iter;
225 	}
226 
227 	if (list_empty(&dev->priv.free_list) || !fp)
228 		return -ENOMEM;
229 
230 	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
231 	if (n >= MLX5_NUM_4K_IN_PAGE) {
232 		mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
233 			       fp->addr, n, fp->bitmask,  MLX5_NUM_4K_IN_PAGE);
234 		return -ENOENT;
235 	}
236 	clear_bit(n, &fp->bitmask);
237 	fp->free_count--;
238 	if (!fp->free_count)
239 		list_del(&fp->list);
240 
241 	*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
242 
243 	return 0;
244 }
245 
246 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
247 
248 static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
249 		     bool in_free_list)
250 {
251 	struct rb_root *root;
252 
253 	root = xa_load(&dev->priv.page_root_xa, fwp->function);
254 	if (WARN_ON_ONCE(!root))
255 		return;
256 
257 	rb_erase(&fwp->rb_node, root);
258 	if (in_free_list)
259 		list_del(&fwp->list);
260 	dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
261 		       PAGE_SIZE, DMA_BIDIRECTIONAL);
262 	__free_page(fwp->page);
263 	kfree(fwp);
264 }
265 
266 static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
267 {
268 	struct fw_page *fwp;
269 	int n;
270 
271 	fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
272 	if (!fwp) {
273 		mlx5_core_warn_rl(dev, "page not found\n");
274 		return;
275 	}
276 	n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
277 	fwp->free_count++;
278 	set_bit(n, &fwp->bitmask);
279 	if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
280 		free_fwp(dev, fwp, fwp->free_count != 1);
281 	else if (fwp->free_count == 1)
282 		list_add(&fwp->list, &dev->priv.free_list);
283 }
284 
285 static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
286 {
287 	struct device *device = mlx5_core_dma_dev(dev);
288 	int nid = dev_to_node(device);
289 	struct page *page;
290 	u64 zero_addr = 1;
291 	u64 addr;
292 	int err;
293 
294 	page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
295 	if (!page) {
296 		mlx5_core_warn(dev, "failed to allocate page\n");
297 		return -ENOMEM;
298 	}
299 map:
300 	addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
301 	if (dma_mapping_error(device, addr)) {
302 		mlx5_core_warn(dev, "failed dma mapping page\n");
303 		err = -ENOMEM;
304 		goto err_mapping;
305 	}
306 
307 	/* Firmware doesn't support page with physical address 0 */
308 	if (addr == 0) {
309 		zero_addr = addr;
310 		goto map;
311 	}
312 
313 	err = insert_page(dev, addr, page, function);
314 	if (err) {
315 		mlx5_core_err(dev, "failed to track allocated page\n");
316 		dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
317 	}
318 
319 err_mapping:
320 	if (err)
321 		__free_page(page);
322 
323 	if (zero_addr == 0)
324 		dma_unmap_page(device, zero_addr, PAGE_SIZE,
325 			       DMA_BIDIRECTIONAL);
326 
327 	return err;
328 }
329 
330 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
331 			     bool ec_function)
332 {
333 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
334 	int err;
335 
336 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
337 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
338 	MLX5_SET(manage_pages_in, in, function_id, func_id);
339 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
340 
341 	err = mlx5_cmd_exec_in(dev, manage_pages, in);
342 	if (err)
343 		mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
344 			       func_id, err);
345 }
346 
347 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
348 		      int event, bool ec_function)
349 {
350 	u32 function = get_function(func_id, ec_function);
351 	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
352 	int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
353 	int notify_fail = event;
354 	u16 func_type;
355 	u64 addr;
356 	int err;
357 	u32 *in;
358 	int i;
359 
360 	inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
361 	in = kvzalloc(inlen, GFP_KERNEL);
362 	if (!in) {
363 		err = -ENOMEM;
364 		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
365 		goto out_free;
366 	}
367 
368 	for (i = 0; i < npages; i++) {
369 retry:
370 		err = alloc_4k(dev, &addr, function);
371 		if (err) {
372 			if (err == -ENOMEM)
373 				err = alloc_system_page(dev, function);
374 			if (err) {
375 				dev->priv.fw_pages_alloc_failed += (npages - i);
376 				goto out_4k;
377 			}
378 
379 			goto retry;
380 		}
381 		MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
382 	}
383 
384 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
385 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
386 	MLX5_SET(manage_pages_in, in, function_id, func_id);
387 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
388 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
389 
390 	err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out));
391 	if (err == -EREMOTEIO) {
392 		notify_fail = 0;
393 		/* if triggered by FW and failed by FW ignore */
394 		if (event) {
395 			err = 0;
396 			goto out_dropped;
397 		}
398 	}
399 	err = mlx5_cmd_check(dev, err, in, out);
400 	if (err) {
401 		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
402 			       func_id, npages, err);
403 		goto out_dropped;
404 	}
405 
406 	func_type = func_id_to_type(dev, func_id, ec_function);
407 	dev->priv.page_counters[func_type] += npages;
408 	dev->priv.fw_pages += npages;
409 
410 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
411 		      npages, ec_function, func_id, err);
412 
413 	kvfree(in);
414 	return 0;
415 
416 out_dropped:
417 	dev->priv.give_pages_dropped += npages;
418 out_4k:
419 	for (i--; i >= 0; i--)
420 		free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
421 out_free:
422 	kvfree(in);
423 	if (notify_fail)
424 		page_notify_fail(dev, func_id, ec_function);
425 	return err;
426 }
427 
428 static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
429 			      bool ec_function)
430 {
431 	u32 function = get_function(func_id, ec_function);
432 	struct rb_root *root;
433 	struct rb_node *p;
434 	int npages = 0;
435 	u16 func_type;
436 
437 	root = xa_load(&dev->priv.page_root_xa, function);
438 	if (WARN_ON_ONCE(!root))
439 		return;
440 
441 	p = rb_first(root);
442 	while (p) {
443 		struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
444 
445 		p = rb_next(p);
446 		npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
447 		free_fwp(dev, fwp, fwp->free_count);
448 	}
449 
450 	func_type = func_id_to_type(dev, func_id, ec_function);
451 	dev->priv.page_counters[func_type] -= npages;
452 	dev->priv.fw_pages -= npages;
453 
454 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
455 		      npages, ec_function, func_id);
456 }
457 
458 static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
459 				     u32 npages)
460 {
461 	u32 pages_set = 0;
462 	unsigned int n;
463 
464 	for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
465 		MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
466 				 fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
467 		pages_set++;
468 
469 		if (!--npages)
470 			break;
471 	}
472 
473 	return pages_set;
474 }
475 
476 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
477 			     u32 *in, int in_size, u32 *out, int out_size)
478 {
479 	struct rb_root *root;
480 	struct fw_page *fwp;
481 	struct rb_node *p;
482 	bool ec_function;
483 	u32 func_id;
484 	u32 npages;
485 	u32 i = 0;
486 
487 	if (!mlx5_cmd_is_down(dev))
488 		return mlx5_cmd_do(dev, in, in_size, out, out_size);
489 
490 	/* No hard feelings, we want our pages back! */
491 	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
492 	func_id = MLX5_GET(manage_pages_in, in, function_id);
493 	ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
494 
495 	root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
496 	if (WARN_ON_ONCE(!root))
497 		return -EEXIST;
498 
499 	p = rb_first(root);
500 	while (p && i < npages) {
501 		fwp = rb_entry(p, struct fw_page, rb_node);
502 		p = rb_next(p);
503 
504 		i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
505 	}
506 
507 	MLX5_SET(manage_pages_out, out, output_num_entries, i);
508 	return 0;
509 }
510 
511 static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
512 			 int *nclaimed, bool event, bool ec_function)
513 {
514 	u32 function = get_function(func_id, ec_function);
515 	int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
516 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
517 	int num_claimed;
518 	u16 func_type;
519 	u32 *out;
520 	int err;
521 	int i;
522 
523 	if (nclaimed)
524 		*nclaimed = 0;
525 
526 	outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
527 	out = kvzalloc(outlen, GFP_KERNEL);
528 	if (!out)
529 		return -ENOMEM;
530 
531 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
532 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
533 	MLX5_SET(manage_pages_in, in, function_id, func_id);
534 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
535 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
536 
537 	mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
538 		      func_id, npages, outlen);
539 	err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
540 	if (err) {
541 		npages = MLX5_GET(manage_pages_in, in, input_num_entries);
542 		dev->priv.reclaim_pages_discard += npages;
543 	}
544 	/* if triggered by FW event and failed by FW then ignore */
545 	if (event && err == -EREMOTEIO) {
546 		err = 0;
547 		goto out_free;
548 	}
549 
550 	err = mlx5_cmd_check(dev, err, in, out);
551 	if (err) {
552 		mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
553 		goto out_free;
554 	}
555 
556 	num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
557 	if (num_claimed > npages) {
558 		mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
559 			       num_claimed, npages);
560 		err = -EINVAL;
561 		goto out_free;
562 	}
563 
564 	for (i = 0; i < num_claimed; i++)
565 		free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
566 
567 	if (nclaimed)
568 		*nclaimed = num_claimed;
569 
570 	func_type = func_id_to_type(dev, func_id, ec_function);
571 	dev->priv.page_counters[func_type] -= num_claimed;
572 	dev->priv.fw_pages -= num_claimed;
573 
574 out_free:
575 	kvfree(out);
576 	return err;
577 }
578 
579 static void pages_work_handler(struct work_struct *work)
580 {
581 	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
582 	struct mlx5_core_dev *dev = req->dev;
583 	int err = 0;
584 
585 	if (req->release_all)
586 		release_all_pages(dev, req->func_id, req->ec_function);
587 	else if (req->npages < 0)
588 		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
589 				    true, req->ec_function);
590 	else if (req->npages > 0)
591 		err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
592 
593 	if (err)
594 		mlx5_core_warn(dev, "%s fail %d\n",
595 			       req->npages < 0 ? "reclaim" : "give", err);
596 
597 	kfree(req);
598 }
599 
600 enum {
601 	EC_FUNCTION_MASK = 0x8000,
602 	RELEASE_ALL_PAGES_MASK = 0x4000,
603 };
604 
605 static int req_pages_handler(struct notifier_block *nb,
606 			     unsigned long type, void *data)
607 {
608 	struct mlx5_pages_req *req;
609 	struct mlx5_core_dev *dev;
610 	struct mlx5_priv *priv;
611 	struct mlx5_eqe *eqe;
612 	bool ec_function;
613 	bool release_all;
614 	u16 func_id;
615 	s32 npages;
616 
617 	priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
618 	dev  = container_of(priv, struct mlx5_core_dev, priv);
619 	eqe  = data;
620 
621 	func_id = be16_to_cpu(eqe->data.req_pages.func_id);
622 	npages  = be32_to_cpu(eqe->data.req_pages.num_pages);
623 	ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
624 	release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
625 		      RELEASE_ALL_PAGES_MASK;
626 	mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
627 		      func_id, npages, release_all);
628 	req = kzalloc(sizeof(*req), GFP_ATOMIC);
629 	if (!req) {
630 		mlx5_core_warn(dev, "failed to allocate pages request\n");
631 		return NOTIFY_DONE;
632 	}
633 
634 	req->dev = dev;
635 	req->func_id = func_id;
636 	req->npages = npages;
637 	req->ec_function = ec_function;
638 	req->release_all = release_all;
639 	INIT_WORK(&req->work, pages_work_handler);
640 	queue_work(dev->priv.pg_wq, &req->work);
641 	return NOTIFY_OK;
642 }
643 
644 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
645 {
646 	u16 func_id;
647 	s32 npages;
648 	int err;
649 
650 	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
651 	if (err)
652 		return err;
653 
654 	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
655 		      npages, boot ? "boot" : "init", func_id);
656 
657 	return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
658 }
659 
660 enum {
661 	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
662 };
663 
664 static int optimal_reclaimed_pages(void)
665 {
666 	struct mlx5_cmd_prot_block *block;
667 	struct mlx5_cmd_layout *lay;
668 	int ret;
669 
670 	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
671 	       MLX5_ST_SZ_BYTES(manage_pages_out)) /
672 	       MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
673 
674 	return ret;
675 }
676 
677 static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
678 				   struct rb_root *root, u32 function)
679 {
680 	u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
681 	unsigned long end = jiffies + recl_pages_to_jiffies;
682 
683 	while (!RB_EMPTY_ROOT(root)) {
684 		u32 ec_function = mlx5_get_ec_function(function);
685 		u32 function_id = mlx5_get_func_id(function);
686 		int nclaimed;
687 		int err;
688 
689 		err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(),
690 				    &nclaimed, false, ec_function);
691 		if (err) {
692 			mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n",
693 				       err, function_id, ec_function);
694 			return err;
695 		}
696 
697 		if (nclaimed)
698 			end = jiffies + recl_pages_to_jiffies;
699 
700 		if (time_after(jiffies, end)) {
701 			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
702 			break;
703 		}
704 	}
705 
706 	return 0;
707 }
708 
709 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
710 {
711 	struct rb_root *root;
712 	unsigned long id;
713 	void *entry;
714 
715 	xa_for_each(&dev->priv.page_root_xa, id, entry) {
716 		root = entry;
717 		mlx5_reclaim_root_pages(dev, root, id);
718 		xa_erase(&dev->priv.page_root_xa, id);
719 		kfree(root);
720 	}
721 
722 	WARN_ON(!xa_empty(&dev->priv.page_root_xa));
723 
724 	WARN(dev->priv.fw_pages,
725 	     "FW pages counter is %d after reclaiming all pages\n",
726 	     dev->priv.fw_pages);
727 	WARN(dev->priv.page_counters[MLX5_VF],
728 	     "VFs FW pages counter is %d after reclaiming all pages\n",
729 	     dev->priv.page_counters[MLX5_VF]);
730 	WARN(dev->priv.page_counters[MLX5_HOST_PF],
731 	     "External host PF FW pages counter is %d after reclaiming all pages\n",
732 	     dev->priv.page_counters[MLX5_HOST_PF]);
733 
734 	return 0;
735 }
736 
737 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
738 {
739 	INIT_LIST_HEAD(&dev->priv.free_list);
740 	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
741 	if (!dev->priv.pg_wq)
742 		return -ENOMEM;
743 
744 	xa_init(&dev->priv.page_root_xa);
745 	mlx5_pages_debugfs_init(dev);
746 
747 	return 0;
748 }
749 
750 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
751 {
752 	mlx5_pages_debugfs_cleanup(dev);
753 	xa_destroy(&dev->priv.page_root_xa);
754 	destroy_workqueue(dev->priv.pg_wq);
755 }
756 
757 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
758 {
759 	MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
760 	mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
761 }
762 
763 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
764 {
765 	mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
766 	flush_workqueue(dev->priv.pg_wq);
767 }
768 
769 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
770 {
771 	u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
772 	unsigned long end = jiffies + recl_vf_pages_to_jiffies;
773 	int prev_pages = *pages;
774 
775 	/* In case of internal error we will free the pages manually later */
776 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
777 		mlx5_core_warn(dev, "Skipping wait for vf pages stage");
778 		return 0;
779 	}
780 
781 	mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
782 	while (*pages) {
783 		if (time_after(jiffies, end)) {
784 			mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
785 			return -ETIMEDOUT;
786 		}
787 		if (*pages < prev_pages) {
788 			end = jiffies + recl_vf_pages_to_jiffies;
789 			prev_pages = *pages;
790 		}
791 		msleep(50);
792 	}
793 
794 	mlx5_core_dbg(dev, "All pages received\n");
795 	return 0;
796 }
797