1 /*
2  *  linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
3  *
4  *  eHEA ethernet device driver for IBM eServer System p
5  *
6  *  (C) Copyright IBM Corp. 2006
7  *
8  *  Authors:
9  *       Christoph Raisch <raisch@de.ibm.com>
10  *       Jan-Bernd Themann <themann@de.ibm.com>
11  *       Thomas Klein <tklein@de.ibm.com>
12  *
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/mm.h>
32 #include <linux/slab.h>
33 #include "ehea.h"
34 #include "ehea_phyp.h"
35 #include "ehea_qmr.h"
36 
37 static struct ehea_bmap *ehea_bmap;
38 
39 static void *hw_qpageit_get_inc(struct hw_queue *queue)
40 {
41 	void *retvalue = hw_qeit_get(queue);
42 
43 	queue->current_q_offset += queue->pagesize;
44 	if (queue->current_q_offset > queue->queue_length) {
45 		queue->current_q_offset -= queue->pagesize;
46 		retvalue = NULL;
47 	} else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
48 		pr_err("not on pageboundary\n");
49 		retvalue = NULL;
50 	}
51 	return retvalue;
52 }
53 
54 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
55 			  const u32 pagesize, const u32 qe_size)
56 {
57 	int pages_per_kpage = PAGE_SIZE / pagesize;
58 	int i, k;
59 
60 	if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
61 		pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
62 		       (int)PAGE_SIZE, (int)pagesize);
63 		return -EINVAL;
64 	}
65 
66 	queue->queue_length = nr_of_pages * pagesize;
67 	queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *),
68 					   GFP_KERNEL);
69 	if (!queue->queue_pages)
70 		return -ENOMEM;
71 
72 	/*
73 	 * allocate pages for queue:
74 	 * outer loop allocates whole kernel pages (page aligned) and
75 	 * inner loop divides a kernel page into smaller hea queue pages
76 	 */
77 	i = 0;
78 	while (i < nr_of_pages) {
79 		u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
80 		if (!kpage)
81 			goto out_nomem;
82 		for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
83 			(queue->queue_pages)[i] = (struct ehea_page *)kpage;
84 			kpage += pagesize;
85 			i++;
86 		}
87 	}
88 
89 	queue->current_q_offset = 0;
90 	queue->qe_size = qe_size;
91 	queue->pagesize = pagesize;
92 	queue->toggle_state = 1;
93 
94 	return 0;
95 out_nomem:
96 	for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
97 		if (!(queue->queue_pages)[i])
98 			break;
99 		free_page((unsigned long)(queue->queue_pages)[i]);
100 	}
101 	return -ENOMEM;
102 }
103 
104 static void hw_queue_dtor(struct hw_queue *queue)
105 {
106 	int pages_per_kpage;
107 	int i, nr_pages;
108 
109 	if (!queue || !queue->queue_pages)
110 		return;
111 
112 	pages_per_kpage = PAGE_SIZE / queue->pagesize;
113 
114 	nr_pages = queue->queue_length / queue->pagesize;
115 
116 	for (i = 0; i < nr_pages; i += pages_per_kpage)
117 		free_page((unsigned long)(queue->queue_pages)[i]);
118 
119 	kfree(queue->queue_pages);
120 }
121 
122 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
123 			       int nr_of_cqe, u64 eq_handle, u32 cq_token)
124 {
125 	struct ehea_cq *cq;
126 	u64 hret, rpage;
127 	u32 counter;
128 	int ret;
129 	void *vpage;
130 
131 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
132 	if (!cq)
133 		goto out_nomem;
134 
135 	cq->attr.max_nr_of_cqes = nr_of_cqe;
136 	cq->attr.cq_token = cq_token;
137 	cq->attr.eq_handle = eq_handle;
138 
139 	cq->adapter = adapter;
140 
141 	hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
142 					&cq->fw_handle, &cq->epas);
143 	if (hret != H_SUCCESS) {
144 		pr_err("alloc_resource_cq failed\n");
145 		goto out_freemem;
146 	}
147 
148 	ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
149 			    EHEA_PAGESIZE, sizeof(struct ehea_cqe));
150 	if (ret)
151 		goto out_freeres;
152 
153 	for (counter = 0; counter < cq->attr.nr_pages; counter++) {
154 		vpage = hw_qpageit_get_inc(&cq->hw_queue);
155 		if (!vpage) {
156 			pr_err("hw_qpageit_get_inc failed\n");
157 			goto out_kill_hwq;
158 		}
159 
160 		rpage = __pa(vpage);
161 		hret = ehea_h_register_rpage(adapter->handle,
162 					     0, EHEA_CQ_REGISTER_ORIG,
163 					     cq->fw_handle, rpage, 1);
164 		if (hret < H_SUCCESS) {
165 			pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
166 			       cq, hret, counter, cq->attr.nr_pages);
167 			goto out_kill_hwq;
168 		}
169 
170 		if (counter == (cq->attr.nr_pages - 1)) {
171 			vpage = hw_qpageit_get_inc(&cq->hw_queue);
172 
173 			if ((hret != H_SUCCESS) || (vpage)) {
174 				pr_err("registration of pages not complete hret=%llx\n",
175 				       hret);
176 				goto out_kill_hwq;
177 			}
178 		} else {
179 			if (hret != H_PAGE_REGISTERED) {
180 				pr_err("CQ: registration of page failed hret=%llx\n",
181 				       hret);
182 				goto out_kill_hwq;
183 			}
184 		}
185 	}
186 
187 	hw_qeit_reset(&cq->hw_queue);
188 	ehea_reset_cq_ep(cq);
189 	ehea_reset_cq_n1(cq);
190 
191 	return cq;
192 
193 out_kill_hwq:
194 	hw_queue_dtor(&cq->hw_queue);
195 
196 out_freeres:
197 	ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
198 
199 out_freemem:
200 	kfree(cq);
201 
202 out_nomem:
203 	return NULL;
204 }
205 
206 static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
207 {
208 	u64 hret;
209 	u64 adapter_handle = cq->adapter->handle;
210 
211 	/* deregister all previous registered pages */
212 	hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
213 	if (hret != H_SUCCESS)
214 		return hret;
215 
216 	hw_queue_dtor(&cq->hw_queue);
217 	kfree(cq);
218 
219 	return hret;
220 }
221 
222 int ehea_destroy_cq(struct ehea_cq *cq)
223 {
224 	u64 hret, aer, aerr;
225 	if (!cq)
226 		return 0;
227 
228 	hcp_epas_dtor(&cq->epas);
229 	hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
230 	if (hret == H_R_STATE) {
231 		ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
232 		hret = ehea_destroy_cq_res(cq, FORCE_FREE);
233 	}
234 
235 	if (hret != H_SUCCESS) {
236 		pr_err("destroy CQ failed\n");
237 		return -EIO;
238 	}
239 
240 	return 0;
241 }
242 
243 struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
244 			       const enum ehea_eq_type type,
245 			       const u32 max_nr_of_eqes, const u8 eqe_gen)
246 {
247 	int ret, i;
248 	u64 hret, rpage;
249 	void *vpage;
250 	struct ehea_eq *eq;
251 
252 	eq = kzalloc(sizeof(*eq), GFP_KERNEL);
253 	if (!eq)
254 		return NULL;
255 
256 	eq->adapter = adapter;
257 	eq->attr.type = type;
258 	eq->attr.max_nr_of_eqes = max_nr_of_eqes;
259 	eq->attr.eqe_gen = eqe_gen;
260 	spin_lock_init(&eq->spinlock);
261 
262 	hret = ehea_h_alloc_resource_eq(adapter->handle,
263 					&eq->attr, &eq->fw_handle);
264 	if (hret != H_SUCCESS) {
265 		pr_err("alloc_resource_eq failed\n");
266 		goto out_freemem;
267 	}
268 
269 	ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
270 			    EHEA_PAGESIZE, sizeof(struct ehea_eqe));
271 	if (ret) {
272 		pr_err("can't allocate eq pages\n");
273 		goto out_freeres;
274 	}
275 
276 	for (i = 0; i < eq->attr.nr_pages; i++) {
277 		vpage = hw_qpageit_get_inc(&eq->hw_queue);
278 		if (!vpage) {
279 			pr_err("hw_qpageit_get_inc failed\n");
280 			hret = H_RESOURCE;
281 			goto out_kill_hwq;
282 		}
283 
284 		rpage = __pa(vpage);
285 
286 		hret = ehea_h_register_rpage(adapter->handle, 0,
287 					     EHEA_EQ_REGISTER_ORIG,
288 					     eq->fw_handle, rpage, 1);
289 
290 		if (i == (eq->attr.nr_pages - 1)) {
291 			/* last page */
292 			vpage = hw_qpageit_get_inc(&eq->hw_queue);
293 			if ((hret != H_SUCCESS) || (vpage))
294 				goto out_kill_hwq;
295 
296 		} else {
297 			if (hret != H_PAGE_REGISTERED)
298 				goto out_kill_hwq;
299 
300 		}
301 	}
302 
303 	hw_qeit_reset(&eq->hw_queue);
304 	return eq;
305 
306 out_kill_hwq:
307 	hw_queue_dtor(&eq->hw_queue);
308 
309 out_freeres:
310 	ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
311 
312 out_freemem:
313 	kfree(eq);
314 	return NULL;
315 }
316 
317 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
318 {
319 	struct ehea_eqe *eqe;
320 	unsigned long flags;
321 
322 	spin_lock_irqsave(&eq->spinlock, flags);
323 	eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
324 	spin_unlock_irqrestore(&eq->spinlock, flags);
325 
326 	return eqe;
327 }
328 
329 static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
330 {
331 	u64 hret;
332 	unsigned long flags;
333 
334 	spin_lock_irqsave(&eq->spinlock, flags);
335 
336 	hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
337 	spin_unlock_irqrestore(&eq->spinlock, flags);
338 
339 	if (hret != H_SUCCESS)
340 		return hret;
341 
342 	hw_queue_dtor(&eq->hw_queue);
343 	kfree(eq);
344 
345 	return hret;
346 }
347 
348 int ehea_destroy_eq(struct ehea_eq *eq)
349 {
350 	u64 hret, aer, aerr;
351 	if (!eq)
352 		return 0;
353 
354 	hcp_epas_dtor(&eq->epas);
355 
356 	hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
357 	if (hret == H_R_STATE) {
358 		ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
359 		hret = ehea_destroy_eq_res(eq, FORCE_FREE);
360 	}
361 
362 	if (hret != H_SUCCESS) {
363 		pr_err("destroy EQ failed\n");
364 		return -EIO;
365 	}
366 
367 	return 0;
368 }
369 
370 /* allocates memory for a queue and registers pages in phyp */
371 static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
372 			   int nr_pages, int wqe_size, int act_nr_sges,
373 			   struct ehea_adapter *adapter, int h_call_q_selector)
374 {
375 	u64 hret, rpage;
376 	int ret, cnt;
377 	void *vpage;
378 
379 	ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
380 	if (ret)
381 		return ret;
382 
383 	for (cnt = 0; cnt < nr_pages; cnt++) {
384 		vpage = hw_qpageit_get_inc(hw_queue);
385 		if (!vpage) {
386 			pr_err("hw_qpageit_get_inc failed\n");
387 			goto out_kill_hwq;
388 		}
389 		rpage = __pa(vpage);
390 		hret = ehea_h_register_rpage(adapter->handle,
391 					     0, h_call_q_selector,
392 					     qp->fw_handle, rpage, 1);
393 		if (hret < H_SUCCESS) {
394 			pr_err("register_rpage_qp failed\n");
395 			goto out_kill_hwq;
396 		}
397 	}
398 	hw_qeit_reset(hw_queue);
399 	return 0;
400 
401 out_kill_hwq:
402 	hw_queue_dtor(hw_queue);
403 	return -EIO;
404 }
405 
406 static inline u32 map_wqe_size(u8 wqe_enc_size)
407 {
408 	return 128 << wqe_enc_size;
409 }
410 
411 struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
412 			       u32 pd, struct ehea_qp_init_attr *init_attr)
413 {
414 	int ret;
415 	u64 hret;
416 	struct ehea_qp *qp;
417 	u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
418 	u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
419 
420 
421 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
422 	if (!qp)
423 		return NULL;
424 
425 	qp->adapter = adapter;
426 
427 	hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
428 					&qp->fw_handle, &qp->epas);
429 	if (hret != H_SUCCESS) {
430 		pr_err("ehea_h_alloc_resource_qp failed\n");
431 		goto out_freemem;
432 	}
433 
434 	wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
435 	wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
436 	wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
437 	wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
438 
439 	ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
440 				     wqe_size_in_bytes_sq,
441 				     init_attr->act_wqe_size_enc_sq, adapter,
442 				     0);
443 	if (ret) {
444 		pr_err("can't register for sq ret=%x\n", ret);
445 		goto out_freeres;
446 	}
447 
448 	ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
449 				     init_attr->nr_rq1_pages,
450 				     wqe_size_in_bytes_rq1,
451 				     init_attr->act_wqe_size_enc_rq1,
452 				     adapter, 1);
453 	if (ret) {
454 		pr_err("can't register for rq1 ret=%x\n", ret);
455 		goto out_kill_hwsq;
456 	}
457 
458 	if (init_attr->rq_count > 1) {
459 		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
460 					     init_attr->nr_rq2_pages,
461 					     wqe_size_in_bytes_rq2,
462 					     init_attr->act_wqe_size_enc_rq2,
463 					     adapter, 2);
464 		if (ret) {
465 			pr_err("can't register for rq2 ret=%x\n", ret);
466 			goto out_kill_hwr1q;
467 		}
468 	}
469 
470 	if (init_attr->rq_count > 2) {
471 		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
472 					     init_attr->nr_rq3_pages,
473 					     wqe_size_in_bytes_rq3,
474 					     init_attr->act_wqe_size_enc_rq3,
475 					     adapter, 3);
476 		if (ret) {
477 			pr_err("can't register for rq3 ret=%x\n", ret);
478 			goto out_kill_hwr2q;
479 		}
480 	}
481 
482 	qp->init_attr = *init_attr;
483 
484 	return qp;
485 
486 out_kill_hwr2q:
487 	hw_queue_dtor(&qp->hw_rqueue2);
488 
489 out_kill_hwr1q:
490 	hw_queue_dtor(&qp->hw_rqueue1);
491 
492 out_kill_hwsq:
493 	hw_queue_dtor(&qp->hw_squeue);
494 
495 out_freeres:
496 	ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
497 	ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
498 
499 out_freemem:
500 	kfree(qp);
501 	return NULL;
502 }
503 
504 static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
505 {
506 	u64 hret;
507 	struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
508 
509 
510 	ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
511 	hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
512 	if (hret != H_SUCCESS)
513 		return hret;
514 
515 	hw_queue_dtor(&qp->hw_squeue);
516 	hw_queue_dtor(&qp->hw_rqueue1);
517 
518 	if (qp_attr->rq_count > 1)
519 		hw_queue_dtor(&qp->hw_rqueue2);
520 	if (qp_attr->rq_count > 2)
521 		hw_queue_dtor(&qp->hw_rqueue3);
522 	kfree(qp);
523 
524 	return hret;
525 }
526 
527 int ehea_destroy_qp(struct ehea_qp *qp)
528 {
529 	u64 hret, aer, aerr;
530 	if (!qp)
531 		return 0;
532 
533 	hcp_epas_dtor(&qp->epas);
534 
535 	hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
536 	if (hret == H_R_STATE) {
537 		ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
538 		hret = ehea_destroy_qp_res(qp, FORCE_FREE);
539 	}
540 
541 	if (hret != H_SUCCESS) {
542 		pr_err("destroy QP failed\n");
543 		return -EIO;
544 	}
545 
546 	return 0;
547 }
548 
549 static inline int ehea_calc_index(unsigned long i, unsigned long s)
550 {
551 	return (i >> s) & EHEA_INDEX_MASK;
552 }
553 
554 static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
555 				     int dir)
556 {
557 	if (!ehea_top_bmap->dir[dir]) {
558 		ehea_top_bmap->dir[dir] =
559 			kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
560 		if (!ehea_top_bmap->dir[dir])
561 			return -ENOMEM;
562 	}
563 	return 0;
564 }
565 
566 static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
567 {
568 	if (!ehea_bmap->top[top]) {
569 		ehea_bmap->top[top] =
570 			kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
571 		if (!ehea_bmap->top[top])
572 			return -ENOMEM;
573 	}
574 	return ehea_init_top_bmap(ehea_bmap->top[top], dir);
575 }
576 
577 static DEFINE_MUTEX(ehea_busmap_mutex);
578 static unsigned long ehea_mr_len;
579 
580 #define EHEA_BUSMAP_ADD_SECT 1
581 #define EHEA_BUSMAP_REM_SECT 0
582 
583 static void ehea_rebuild_busmap(void)
584 {
585 	u64 vaddr = EHEA_BUSMAP_START;
586 	int top, dir, idx;
587 
588 	for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
589 		struct ehea_top_bmap *ehea_top;
590 		int valid_dir_entries = 0;
591 
592 		if (!ehea_bmap->top[top])
593 			continue;
594 		ehea_top = ehea_bmap->top[top];
595 		for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
596 			struct ehea_dir_bmap *ehea_dir;
597 			int valid_entries = 0;
598 
599 			if (!ehea_top->dir[dir])
600 				continue;
601 			valid_dir_entries++;
602 			ehea_dir = ehea_top->dir[dir];
603 			for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
604 				if (!ehea_dir->ent[idx])
605 					continue;
606 				valid_entries++;
607 				ehea_dir->ent[idx] = vaddr;
608 				vaddr += EHEA_SECTSIZE;
609 			}
610 			if (!valid_entries) {
611 				ehea_top->dir[dir] = NULL;
612 				kfree(ehea_dir);
613 			}
614 		}
615 		if (!valid_dir_entries) {
616 			ehea_bmap->top[top] = NULL;
617 			kfree(ehea_top);
618 		}
619 	}
620 }
621 
622 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
623 {
624 	unsigned long i, start_section, end_section;
625 
626 	if (!nr_pages)
627 		return 0;
628 
629 	if (!ehea_bmap) {
630 		ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
631 		if (!ehea_bmap)
632 			return -ENOMEM;
633 	}
634 
635 	start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
636 	end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
637 	/* Mark entries as valid or invalid only; address is assigned later */
638 	for (i = start_section; i < end_section; i++) {
639 		u64 flag;
640 		int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
641 		int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
642 		int idx = i & EHEA_INDEX_MASK;
643 
644 		if (add) {
645 			int ret = ehea_init_bmap(ehea_bmap, top, dir);
646 			if (ret)
647 				return ret;
648 			flag = 1; /* valid */
649 			ehea_mr_len += EHEA_SECTSIZE;
650 		} else {
651 			if (!ehea_bmap->top[top])
652 				continue;
653 			if (!ehea_bmap->top[top]->dir[dir])
654 				continue;
655 			flag = 0; /* invalid */
656 			ehea_mr_len -= EHEA_SECTSIZE;
657 		}
658 
659 		ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
660 	}
661 	ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
662 	return 0;
663 }
664 
665 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
666 {
667 	int ret;
668 
669 	mutex_lock(&ehea_busmap_mutex);
670 	ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
671 	mutex_unlock(&ehea_busmap_mutex);
672 	return ret;
673 }
674 
675 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
676 {
677 	int ret;
678 
679 	mutex_lock(&ehea_busmap_mutex);
680 	ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
681 	mutex_unlock(&ehea_busmap_mutex);
682 	return ret;
683 }
684 
685 static int ehea_is_hugepage(unsigned long pfn)
686 {
687 	int page_order;
688 
689 	if (pfn & EHEA_HUGEPAGE_PFN_MASK)
690 		return 0;
691 
692 	page_order = compound_order(pfn_to_page(pfn));
693 	if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
694 		return 0;
695 
696 	return 1;
697 }
698 
699 static int ehea_create_busmap_callback(unsigned long initial_pfn,
700 				       unsigned long total_nr_pages, void *arg)
701 {
702 	int ret;
703 	unsigned long pfn, start_pfn, end_pfn, nr_pages;
704 
705 	if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
706 		return ehea_update_busmap(initial_pfn, total_nr_pages,
707 					  EHEA_BUSMAP_ADD_SECT);
708 
709 	/* Given chunk is >= 16GB -> check for hugepages */
710 	start_pfn = initial_pfn;
711 	end_pfn = initial_pfn + total_nr_pages;
712 	pfn = start_pfn;
713 
714 	while (pfn < end_pfn) {
715 		if (ehea_is_hugepage(pfn)) {
716 			/* Add mem found in front of the hugepage */
717 			nr_pages = pfn - start_pfn;
718 			ret = ehea_update_busmap(start_pfn, nr_pages,
719 						 EHEA_BUSMAP_ADD_SECT);
720 			if (ret)
721 				return ret;
722 
723 			/* Skip the hugepage */
724 			pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
725 			start_pfn = pfn;
726 		} else
727 			pfn += (EHEA_SECTSIZE / PAGE_SIZE);
728 	}
729 
730 	/* Add mem found behind the hugepage(s)  */
731 	nr_pages = pfn - start_pfn;
732 	return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
733 }
734 
735 int ehea_create_busmap(void)
736 {
737 	int ret;
738 
739 	mutex_lock(&ehea_busmap_mutex);
740 	ehea_mr_len = 0;
741 	ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
742 				   ehea_create_busmap_callback);
743 	mutex_unlock(&ehea_busmap_mutex);
744 	return ret;
745 }
746 
747 void ehea_destroy_busmap(void)
748 {
749 	int top, dir;
750 	mutex_lock(&ehea_busmap_mutex);
751 	if (!ehea_bmap)
752 		goto out_destroy;
753 
754 	for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
755 		if (!ehea_bmap->top[top])
756 			continue;
757 
758 		for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
759 			if (!ehea_bmap->top[top]->dir[dir])
760 				continue;
761 
762 			kfree(ehea_bmap->top[top]->dir[dir]);
763 		}
764 
765 		kfree(ehea_bmap->top[top]);
766 	}
767 
768 	kfree(ehea_bmap);
769 	ehea_bmap = NULL;
770 out_destroy:
771 	mutex_unlock(&ehea_busmap_mutex);
772 }
773 
774 u64 ehea_map_vaddr(void *caddr)
775 {
776 	int top, dir, idx;
777 	unsigned long index, offset;
778 
779 	if (!ehea_bmap)
780 		return EHEA_INVAL_ADDR;
781 
782 	index = __pa(caddr) >> SECTION_SIZE_BITS;
783 	top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
784 	if (!ehea_bmap->top[top])
785 		return EHEA_INVAL_ADDR;
786 
787 	dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
788 	if (!ehea_bmap->top[top]->dir[dir])
789 		return EHEA_INVAL_ADDR;
790 
791 	idx = index & EHEA_INDEX_MASK;
792 	if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
793 		return EHEA_INVAL_ADDR;
794 
795 	offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
796 	return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
797 }
798 
799 static inline void *ehea_calc_sectbase(int top, int dir, int idx)
800 {
801 	unsigned long ret = idx;
802 	ret |= dir << EHEA_DIR_INDEX_SHIFT;
803 	ret |= top << EHEA_TOP_INDEX_SHIFT;
804 	return __va(ret << SECTION_SIZE_BITS);
805 }
806 
807 static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
808 			       struct ehea_adapter *adapter,
809 			       struct ehea_mr *mr)
810 {
811 	void *pg;
812 	u64 j, m, hret;
813 	unsigned long k = 0;
814 	u64 pt_abs = __pa(pt);
815 
816 	void *sectbase = ehea_calc_sectbase(top, dir, idx);
817 
818 	for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
819 
820 		for (m = 0; m < EHEA_MAX_RPAGE; m++) {
821 			pg = sectbase + ((k++) * EHEA_PAGESIZE);
822 			pt[m] = __pa(pg);
823 		}
824 		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
825 						0, pt_abs, EHEA_MAX_RPAGE);
826 
827 		if ((hret != H_SUCCESS) &&
828 		    (hret != H_PAGE_REGISTERED)) {
829 			ehea_h_free_resource(adapter->handle, mr->handle,
830 					     FORCE_FREE);
831 			pr_err("register_rpage_mr failed\n");
832 			return hret;
833 		}
834 	}
835 	return hret;
836 }
837 
838 static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
839 				struct ehea_adapter *adapter,
840 				struct ehea_mr *mr)
841 {
842 	u64 hret = H_SUCCESS;
843 	int idx;
844 
845 	for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
846 		if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
847 			continue;
848 
849 		hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
850 		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
851 			return hret;
852 	}
853 	return hret;
854 }
855 
856 static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
857 				    struct ehea_adapter *adapter,
858 				    struct ehea_mr *mr)
859 {
860 	u64 hret = H_SUCCESS;
861 	int dir;
862 
863 	for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
864 		if (!ehea_bmap->top[top]->dir[dir])
865 			continue;
866 
867 		hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
868 		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
869 			return hret;
870 	}
871 	return hret;
872 }
873 
874 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
875 {
876 	int ret;
877 	u64 *pt;
878 	u64 hret;
879 	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
880 
881 	unsigned long top;
882 
883 	pt = (void *)get_zeroed_page(GFP_KERNEL);
884 	if (!pt) {
885 		pr_err("no mem\n");
886 		ret = -ENOMEM;
887 		goto out;
888 	}
889 
890 	hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
891 					ehea_mr_len, acc_ctrl, adapter->pd,
892 					&mr->handle, &mr->lkey);
893 
894 	if (hret != H_SUCCESS) {
895 		pr_err("alloc_resource_mr failed\n");
896 		ret = -EIO;
897 		goto out;
898 	}
899 
900 	if (!ehea_bmap) {
901 		ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
902 		pr_err("no busmap available\n");
903 		ret = -EIO;
904 		goto out;
905 	}
906 
907 	for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
908 		if (!ehea_bmap->top[top])
909 			continue;
910 
911 		hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
912 		if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
913 			break;
914 	}
915 
916 	if (hret != H_SUCCESS) {
917 		ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
918 		pr_err("registering mr failed\n");
919 		ret = -EIO;
920 		goto out;
921 	}
922 
923 	mr->vaddr = EHEA_BUSMAP_START;
924 	mr->adapter = adapter;
925 	ret = 0;
926 out:
927 	free_page((unsigned long)pt);
928 	return ret;
929 }
930 
931 int ehea_rem_mr(struct ehea_mr *mr)
932 {
933 	u64 hret;
934 
935 	if (!mr || !mr->adapter)
936 		return -EINVAL;
937 
938 	hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
939 				    FORCE_FREE);
940 	if (hret != H_SUCCESS) {
941 		pr_err("destroy MR failed\n");
942 		return -EIO;
943 	}
944 
945 	return 0;
946 }
947 
948 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
949 		 struct ehea_mr *shared_mr)
950 {
951 	u64 hret;
952 
953 	hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
954 				   old_mr->vaddr, EHEA_MR_ACC_CTRL,
955 				   adapter->pd, shared_mr);
956 	if (hret != H_SUCCESS)
957 		return -EIO;
958 
959 	shared_mr->adapter = adapter;
960 
961 	return 0;
962 }
963 
964 static void print_error_data(u64 *data)
965 {
966 	int length;
967 	u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
968 	u64 resource = data[1];
969 
970 	length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
971 
972 	if (length > EHEA_PAGESIZE)
973 		length = EHEA_PAGESIZE;
974 
975 	if (type == EHEA_AER_RESTYPE_QP)
976 		pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
977 		       resource, data[6], data[12], data[22]);
978 	else if (type == EHEA_AER_RESTYPE_CQ)
979 		pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
980 		       resource, data[6]);
981 	else if (type == EHEA_AER_RESTYPE_EQ)
982 		pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
983 		       resource, data[6]);
984 
985 	ehea_dump(data, length, "error data");
986 }
987 
988 u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
989 		    u64 *aer, u64 *aerr)
990 {
991 	unsigned long ret;
992 	u64 *rblock;
993 	u64 type = 0;
994 
995 	rblock = (void *)get_zeroed_page(GFP_KERNEL);
996 	if (!rblock) {
997 		pr_err("Cannot allocate rblock memory\n");
998 		goto out;
999 	}
1000 
1001 	ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1002 
1003 	if (ret == H_SUCCESS) {
1004 		type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1005 		*aer = rblock[6];
1006 		*aerr = rblock[12];
1007 		print_error_data(rblock);
1008 	} else if (ret == H_R_STATE) {
1009 		pr_err("No error data available: %llX\n", res_handle);
1010 	} else
1011 		pr_err("Error data could not be fetched: %llX\n", res_handle);
1012 
1013 	free_page((unsigned long)rblock);
1014 out:
1015 	return type;
1016 }
1017