1 /*
2  *  linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
3  *
4  *  eHEA ethernet device driver for IBM eServer System p
5  *
6  *  (C) Copyright IBM Corp. 2006
7  *
8  *  Authors:
9  *       Christoph Raisch <raisch@de.ibm.com>
10  *       Jan-Bernd Themann <themann@de.ibm.com>
11  *       Thomas Klein <tklein@de.ibm.com>
12  *
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/mm.h>
32 #include <linux/slab.h>
33 #include "ehea.h"
34 #include "ehea_phyp.h"
35 #include "ehea_qmr.h"
36 
37 static struct ehea_bmap *ehea_bmap;
38 
39 static void *hw_qpageit_get_inc(struct hw_queue *queue)
40 {
41 	void *retvalue = hw_qeit_get(queue);
42 
43 	queue->current_q_offset += queue->pagesize;
44 	if (queue->current_q_offset > queue->queue_length) {
45 		queue->current_q_offset -= queue->pagesize;
46 		retvalue = NULL;
47 	} else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
48 		pr_err("not on pageboundary\n");
49 		retvalue = NULL;
50 	}
51 	return retvalue;
52 }
53 
54 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
55 			  const u32 pagesize, const u32 qe_size)
56 {
57 	int pages_per_kpage = PAGE_SIZE / pagesize;
58 	int i, k;
59 
60 	if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
61 		pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
62 		       (int)PAGE_SIZE, (int)pagesize);
63 		return -EINVAL;
64 	}
65 
66 	queue->queue_length = nr_of_pages * pagesize;
67 	queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *),
68 					   GFP_KERNEL);
69 	if (!queue->queue_pages)
70 		return -ENOMEM;
71 
72 	/*
73 	 * allocate pages for queue:
74 	 * outer loop allocates whole kernel pages (page aligned) and
75 	 * inner loop divides a kernel page into smaller hea queue pages
76 	 */
77 	i = 0;
78 	while (i < nr_of_pages) {
79 		u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
80 		if (!kpage)
81 			goto out_nomem;
82 		for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
83 			(queue->queue_pages)[i] = (struct ehea_page *)kpage;
84 			kpage += pagesize;
85 			i++;
86 		}
87 	}
88 
89 	queue->current_q_offset = 0;
90 	queue->qe_size = qe_size;
91 	queue->pagesize = pagesize;
92 	queue->toggle_state = 1;
93 
94 	return 0;
95 out_nomem:
96 	for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
97 		if (!(queue->queue_pages)[i])
98 			break;
99 		free_page((unsigned long)(queue->queue_pages)[i]);
100 	}
101 	return -ENOMEM;
102 }
103 
104 static void hw_queue_dtor(struct hw_queue *queue)
105 {
106 	int pages_per_kpage;
107 	int i, nr_pages;
108 
109 	if (!queue || !queue->queue_pages)
110 		return;
111 
112 	pages_per_kpage = PAGE_SIZE / queue->pagesize;
113 
114 	nr_pages = queue->queue_length / queue->pagesize;
115 
116 	for (i = 0; i < nr_pages; i += pages_per_kpage)
117 		free_page((unsigned long)(queue->queue_pages)[i]);
118 
119 	kfree(queue->queue_pages);
120 }
121 
122 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
123 			       int nr_of_cqe, u64 eq_handle, u32 cq_token)
124 {
125 	struct ehea_cq *cq;
126 	struct h_epa epa;
127 	u64 *cq_handle_ref, hret, rpage;
128 	u32 counter;
129 	int ret;
130 	void *vpage;
131 
132 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
133 	if (!cq)
134 		goto out_nomem;
135 
136 	cq->attr.max_nr_of_cqes = nr_of_cqe;
137 	cq->attr.cq_token = cq_token;
138 	cq->attr.eq_handle = eq_handle;
139 
140 	cq->adapter = adapter;
141 
142 	cq_handle_ref = &cq->fw_handle;
143 
144 	hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
145 					&cq->fw_handle, &cq->epas);
146 	if (hret != H_SUCCESS) {
147 		pr_err("alloc_resource_cq failed\n");
148 		goto out_freemem;
149 	}
150 
151 	ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
152 			    EHEA_PAGESIZE, sizeof(struct ehea_cqe));
153 	if (ret)
154 		goto out_freeres;
155 
156 	for (counter = 0; counter < cq->attr.nr_pages; counter++) {
157 		vpage = hw_qpageit_get_inc(&cq->hw_queue);
158 		if (!vpage) {
159 			pr_err("hw_qpageit_get_inc failed\n");
160 			goto out_kill_hwq;
161 		}
162 
163 		rpage = __pa(vpage);
164 		hret = ehea_h_register_rpage(adapter->handle,
165 					     0, EHEA_CQ_REGISTER_ORIG,
166 					     cq->fw_handle, rpage, 1);
167 		if (hret < H_SUCCESS) {
168 			pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
169 			       cq, hret, counter, cq->attr.nr_pages);
170 			goto out_kill_hwq;
171 		}
172 
173 		if (counter == (cq->attr.nr_pages - 1)) {
174 			vpage = hw_qpageit_get_inc(&cq->hw_queue);
175 
176 			if ((hret != H_SUCCESS) || (vpage)) {
177 				pr_err("registration of pages not complete hret=%llx\n",
178 				       hret);
179 				goto out_kill_hwq;
180 			}
181 		} else {
182 			if (hret != H_PAGE_REGISTERED) {
183 				pr_err("CQ: registration of page failed hret=%llx\n",
184 				       hret);
185 				goto out_kill_hwq;
186 			}
187 		}
188 	}
189 
190 	hw_qeit_reset(&cq->hw_queue);
191 	epa = cq->epas.kernel;
192 	ehea_reset_cq_ep(cq);
193 	ehea_reset_cq_n1(cq);
194 
195 	return cq;
196 
197 out_kill_hwq:
198 	hw_queue_dtor(&cq->hw_queue);
199 
200 out_freeres:
201 	ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
202 
203 out_freemem:
204 	kfree(cq);
205 
206 out_nomem:
207 	return NULL;
208 }
209 
210 static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
211 {
212 	u64 hret;
213 	u64 adapter_handle = cq->adapter->handle;
214 
215 	/* deregister all previous registered pages */
216 	hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
217 	if (hret != H_SUCCESS)
218 		return hret;
219 
220 	hw_queue_dtor(&cq->hw_queue);
221 	kfree(cq);
222 
223 	return hret;
224 }
225 
226 int ehea_destroy_cq(struct ehea_cq *cq)
227 {
228 	u64 hret, aer, aerr;
229 	if (!cq)
230 		return 0;
231 
232 	hcp_epas_dtor(&cq->epas);
233 	hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
234 	if (hret == H_R_STATE) {
235 		ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
236 		hret = ehea_destroy_cq_res(cq, FORCE_FREE);
237 	}
238 
239 	if (hret != H_SUCCESS) {
240 		pr_err("destroy CQ failed\n");
241 		return -EIO;
242 	}
243 
244 	return 0;
245 }
246 
247 struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
248 			       const enum ehea_eq_type type,
249 			       const u32 max_nr_of_eqes, const u8 eqe_gen)
250 {
251 	int ret, i;
252 	u64 hret, rpage;
253 	void *vpage;
254 	struct ehea_eq *eq;
255 
256 	eq = kzalloc(sizeof(*eq), GFP_KERNEL);
257 	if (!eq)
258 		return NULL;
259 
260 	eq->adapter = adapter;
261 	eq->attr.type = type;
262 	eq->attr.max_nr_of_eqes = max_nr_of_eqes;
263 	eq->attr.eqe_gen = eqe_gen;
264 	spin_lock_init(&eq->spinlock);
265 
266 	hret = ehea_h_alloc_resource_eq(adapter->handle,
267 					&eq->attr, &eq->fw_handle);
268 	if (hret != H_SUCCESS) {
269 		pr_err("alloc_resource_eq failed\n");
270 		goto out_freemem;
271 	}
272 
273 	ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
274 			    EHEA_PAGESIZE, sizeof(struct ehea_eqe));
275 	if (ret) {
276 		pr_err("can't allocate eq pages\n");
277 		goto out_freeres;
278 	}
279 
280 	for (i = 0; i < eq->attr.nr_pages; i++) {
281 		vpage = hw_qpageit_get_inc(&eq->hw_queue);
282 		if (!vpage) {
283 			pr_err("hw_qpageit_get_inc failed\n");
284 			hret = H_RESOURCE;
285 			goto out_kill_hwq;
286 		}
287 
288 		rpage = __pa(vpage);
289 
290 		hret = ehea_h_register_rpage(adapter->handle, 0,
291 					     EHEA_EQ_REGISTER_ORIG,
292 					     eq->fw_handle, rpage, 1);
293 
294 		if (i == (eq->attr.nr_pages - 1)) {
295 			/* last page */
296 			vpage = hw_qpageit_get_inc(&eq->hw_queue);
297 			if ((hret != H_SUCCESS) || (vpage))
298 				goto out_kill_hwq;
299 
300 		} else {
301 			if (hret != H_PAGE_REGISTERED)
302 				goto out_kill_hwq;
303 
304 		}
305 	}
306 
307 	hw_qeit_reset(&eq->hw_queue);
308 	return eq;
309 
310 out_kill_hwq:
311 	hw_queue_dtor(&eq->hw_queue);
312 
313 out_freeres:
314 	ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
315 
316 out_freemem:
317 	kfree(eq);
318 	return NULL;
319 }
320 
321 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
322 {
323 	struct ehea_eqe *eqe;
324 	unsigned long flags;
325 
326 	spin_lock_irqsave(&eq->spinlock, flags);
327 	eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
328 	spin_unlock_irqrestore(&eq->spinlock, flags);
329 
330 	return eqe;
331 }
332 
333 static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
334 {
335 	u64 hret;
336 	unsigned long flags;
337 
338 	spin_lock_irqsave(&eq->spinlock, flags);
339 
340 	hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
341 	spin_unlock_irqrestore(&eq->spinlock, flags);
342 
343 	if (hret != H_SUCCESS)
344 		return hret;
345 
346 	hw_queue_dtor(&eq->hw_queue);
347 	kfree(eq);
348 
349 	return hret;
350 }
351 
352 int ehea_destroy_eq(struct ehea_eq *eq)
353 {
354 	u64 hret, aer, aerr;
355 	if (!eq)
356 		return 0;
357 
358 	hcp_epas_dtor(&eq->epas);
359 
360 	hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
361 	if (hret == H_R_STATE) {
362 		ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
363 		hret = ehea_destroy_eq_res(eq, FORCE_FREE);
364 	}
365 
366 	if (hret != H_SUCCESS) {
367 		pr_err("destroy EQ failed\n");
368 		return -EIO;
369 	}
370 
371 	return 0;
372 }
373 
374 /* allocates memory for a queue and registers pages in phyp */
375 static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
376 			   int nr_pages, int wqe_size, int act_nr_sges,
377 			   struct ehea_adapter *adapter, int h_call_q_selector)
378 {
379 	u64 hret, rpage;
380 	int ret, cnt;
381 	void *vpage;
382 
383 	ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
384 	if (ret)
385 		return ret;
386 
387 	for (cnt = 0; cnt < nr_pages; cnt++) {
388 		vpage = hw_qpageit_get_inc(hw_queue);
389 		if (!vpage) {
390 			pr_err("hw_qpageit_get_inc failed\n");
391 			goto out_kill_hwq;
392 		}
393 		rpage = __pa(vpage);
394 		hret = ehea_h_register_rpage(adapter->handle,
395 					     0, h_call_q_selector,
396 					     qp->fw_handle, rpage, 1);
397 		if (hret < H_SUCCESS) {
398 			pr_err("register_rpage_qp failed\n");
399 			goto out_kill_hwq;
400 		}
401 	}
402 	hw_qeit_reset(hw_queue);
403 	return 0;
404 
405 out_kill_hwq:
406 	hw_queue_dtor(hw_queue);
407 	return -EIO;
408 }
409 
410 static inline u32 map_wqe_size(u8 wqe_enc_size)
411 {
412 	return 128 << wqe_enc_size;
413 }
414 
415 struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
416 			       u32 pd, struct ehea_qp_init_attr *init_attr)
417 {
418 	int ret;
419 	u64 hret;
420 	struct ehea_qp *qp;
421 	u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
422 	u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
423 
424 
425 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
426 	if (!qp)
427 		return NULL;
428 
429 	qp->adapter = adapter;
430 
431 	hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
432 					&qp->fw_handle, &qp->epas);
433 	if (hret != H_SUCCESS) {
434 		pr_err("ehea_h_alloc_resource_qp failed\n");
435 		goto out_freemem;
436 	}
437 
438 	wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
439 	wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
440 	wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
441 	wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
442 
443 	ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
444 				     wqe_size_in_bytes_sq,
445 				     init_attr->act_wqe_size_enc_sq, adapter,
446 				     0);
447 	if (ret) {
448 		pr_err("can't register for sq ret=%x\n", ret);
449 		goto out_freeres;
450 	}
451 
452 	ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
453 				     init_attr->nr_rq1_pages,
454 				     wqe_size_in_bytes_rq1,
455 				     init_attr->act_wqe_size_enc_rq1,
456 				     adapter, 1);
457 	if (ret) {
458 		pr_err("can't register for rq1 ret=%x\n", ret);
459 		goto out_kill_hwsq;
460 	}
461 
462 	if (init_attr->rq_count > 1) {
463 		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
464 					     init_attr->nr_rq2_pages,
465 					     wqe_size_in_bytes_rq2,
466 					     init_attr->act_wqe_size_enc_rq2,
467 					     adapter, 2);
468 		if (ret) {
469 			pr_err("can't register for rq2 ret=%x\n", ret);
470 			goto out_kill_hwr1q;
471 		}
472 	}
473 
474 	if (init_attr->rq_count > 2) {
475 		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
476 					     init_attr->nr_rq3_pages,
477 					     wqe_size_in_bytes_rq3,
478 					     init_attr->act_wqe_size_enc_rq3,
479 					     adapter, 3);
480 		if (ret) {
481 			pr_err("can't register for rq3 ret=%x\n", ret);
482 			goto out_kill_hwr2q;
483 		}
484 	}
485 
486 	qp->init_attr = *init_attr;
487 
488 	return qp;
489 
490 out_kill_hwr2q:
491 	hw_queue_dtor(&qp->hw_rqueue2);
492 
493 out_kill_hwr1q:
494 	hw_queue_dtor(&qp->hw_rqueue1);
495 
496 out_kill_hwsq:
497 	hw_queue_dtor(&qp->hw_squeue);
498 
499 out_freeres:
500 	ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
501 	ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
502 
503 out_freemem:
504 	kfree(qp);
505 	return NULL;
506 }
507 
508 static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
509 {
510 	u64 hret;
511 	struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
512 
513 
514 	ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
515 	hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
516 	if (hret != H_SUCCESS)
517 		return hret;
518 
519 	hw_queue_dtor(&qp->hw_squeue);
520 	hw_queue_dtor(&qp->hw_rqueue1);
521 
522 	if (qp_attr->rq_count > 1)
523 		hw_queue_dtor(&qp->hw_rqueue2);
524 	if (qp_attr->rq_count > 2)
525 		hw_queue_dtor(&qp->hw_rqueue3);
526 	kfree(qp);
527 
528 	return hret;
529 }
530 
531 int ehea_destroy_qp(struct ehea_qp *qp)
532 {
533 	u64 hret, aer, aerr;
534 	if (!qp)
535 		return 0;
536 
537 	hcp_epas_dtor(&qp->epas);
538 
539 	hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
540 	if (hret == H_R_STATE) {
541 		ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
542 		hret = ehea_destroy_qp_res(qp, FORCE_FREE);
543 	}
544 
545 	if (hret != H_SUCCESS) {
546 		pr_err("destroy QP failed\n");
547 		return -EIO;
548 	}
549 
550 	return 0;
551 }
552 
553 static inline int ehea_calc_index(unsigned long i, unsigned long s)
554 {
555 	return (i >> s) & EHEA_INDEX_MASK;
556 }
557 
558 static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
559 				     int dir)
560 {
561 	if (!ehea_top_bmap->dir[dir]) {
562 		ehea_top_bmap->dir[dir] =
563 			kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
564 		if (!ehea_top_bmap->dir[dir])
565 			return -ENOMEM;
566 	}
567 	return 0;
568 }
569 
570 static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
571 {
572 	if (!ehea_bmap->top[top]) {
573 		ehea_bmap->top[top] =
574 			kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
575 		if (!ehea_bmap->top[top])
576 			return -ENOMEM;
577 	}
578 	return ehea_init_top_bmap(ehea_bmap->top[top], dir);
579 }
580 
581 static DEFINE_MUTEX(ehea_busmap_mutex);
582 static unsigned long ehea_mr_len;
583 
584 #define EHEA_BUSMAP_ADD_SECT 1
585 #define EHEA_BUSMAP_REM_SECT 0
586 
587 static void ehea_rebuild_busmap(void)
588 {
589 	u64 vaddr = EHEA_BUSMAP_START;
590 	int top, dir, idx;
591 
592 	for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
593 		struct ehea_top_bmap *ehea_top;
594 		int valid_dir_entries = 0;
595 
596 		if (!ehea_bmap->top[top])
597 			continue;
598 		ehea_top = ehea_bmap->top[top];
599 		for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
600 			struct ehea_dir_bmap *ehea_dir;
601 			int valid_entries = 0;
602 
603 			if (!ehea_top->dir[dir])
604 				continue;
605 			valid_dir_entries++;
606 			ehea_dir = ehea_top->dir[dir];
607 			for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
608 				if (!ehea_dir->ent[idx])
609 					continue;
610 				valid_entries++;
611 				ehea_dir->ent[idx] = vaddr;
612 				vaddr += EHEA_SECTSIZE;
613 			}
614 			if (!valid_entries) {
615 				ehea_top->dir[dir] = NULL;
616 				kfree(ehea_dir);
617 			}
618 		}
619 		if (!valid_dir_entries) {
620 			ehea_bmap->top[top] = NULL;
621 			kfree(ehea_top);
622 		}
623 	}
624 }
625 
626 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
627 {
628 	unsigned long i, start_section, end_section;
629 
630 	if (!nr_pages)
631 		return 0;
632 
633 	if (!ehea_bmap) {
634 		ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
635 		if (!ehea_bmap)
636 			return -ENOMEM;
637 	}
638 
639 	start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
640 	end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
641 	/* Mark entries as valid or invalid only; address is assigned later */
642 	for (i = start_section; i < end_section; i++) {
643 		u64 flag;
644 		int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
645 		int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
646 		int idx = i & EHEA_INDEX_MASK;
647 
648 		if (add) {
649 			int ret = ehea_init_bmap(ehea_bmap, top, dir);
650 			if (ret)
651 				return ret;
652 			flag = 1; /* valid */
653 			ehea_mr_len += EHEA_SECTSIZE;
654 		} else {
655 			if (!ehea_bmap->top[top])
656 				continue;
657 			if (!ehea_bmap->top[top]->dir[dir])
658 				continue;
659 			flag = 0; /* invalid */
660 			ehea_mr_len -= EHEA_SECTSIZE;
661 		}
662 
663 		ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
664 	}
665 	ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
666 	return 0;
667 }
668 
669 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
670 {
671 	int ret;
672 
673 	mutex_lock(&ehea_busmap_mutex);
674 	ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
675 	mutex_unlock(&ehea_busmap_mutex);
676 	return ret;
677 }
678 
679 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
680 {
681 	int ret;
682 
683 	mutex_lock(&ehea_busmap_mutex);
684 	ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
685 	mutex_unlock(&ehea_busmap_mutex);
686 	return ret;
687 }
688 
689 static int ehea_is_hugepage(unsigned long pfn)
690 {
691 	int page_order;
692 
693 	if (pfn & EHEA_HUGEPAGE_PFN_MASK)
694 		return 0;
695 
696 	page_order = compound_order(pfn_to_page(pfn));
697 	if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
698 		return 0;
699 
700 	return 1;
701 }
702 
703 static int ehea_create_busmap_callback(unsigned long initial_pfn,
704 				       unsigned long total_nr_pages, void *arg)
705 {
706 	int ret;
707 	unsigned long pfn, start_pfn, end_pfn, nr_pages;
708 
709 	if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
710 		return ehea_update_busmap(initial_pfn, total_nr_pages,
711 					  EHEA_BUSMAP_ADD_SECT);
712 
713 	/* Given chunk is >= 16GB -> check for hugepages */
714 	start_pfn = initial_pfn;
715 	end_pfn = initial_pfn + total_nr_pages;
716 	pfn = start_pfn;
717 
718 	while (pfn < end_pfn) {
719 		if (ehea_is_hugepage(pfn)) {
720 			/* Add mem found in front of the hugepage */
721 			nr_pages = pfn - start_pfn;
722 			ret = ehea_update_busmap(start_pfn, nr_pages,
723 						 EHEA_BUSMAP_ADD_SECT);
724 			if (ret)
725 				return ret;
726 
727 			/* Skip the hugepage */
728 			pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
729 			start_pfn = pfn;
730 		} else
731 			pfn += (EHEA_SECTSIZE / PAGE_SIZE);
732 	}
733 
734 	/* Add mem found behind the hugepage(s)  */
735 	nr_pages = pfn - start_pfn;
736 	return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
737 }
738 
739 int ehea_create_busmap(void)
740 {
741 	int ret;
742 
743 	mutex_lock(&ehea_busmap_mutex);
744 	ehea_mr_len = 0;
745 	ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
746 				   ehea_create_busmap_callback);
747 	mutex_unlock(&ehea_busmap_mutex);
748 	return ret;
749 }
750 
751 void ehea_destroy_busmap(void)
752 {
753 	int top, dir;
754 	mutex_lock(&ehea_busmap_mutex);
755 	if (!ehea_bmap)
756 		goto out_destroy;
757 
758 	for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
759 		if (!ehea_bmap->top[top])
760 			continue;
761 
762 		for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
763 			if (!ehea_bmap->top[top]->dir[dir])
764 				continue;
765 
766 			kfree(ehea_bmap->top[top]->dir[dir]);
767 		}
768 
769 		kfree(ehea_bmap->top[top]);
770 	}
771 
772 	kfree(ehea_bmap);
773 	ehea_bmap = NULL;
774 out_destroy:
775 	mutex_unlock(&ehea_busmap_mutex);
776 }
777 
778 u64 ehea_map_vaddr(void *caddr)
779 {
780 	int top, dir, idx;
781 	unsigned long index, offset;
782 
783 	if (!ehea_bmap)
784 		return EHEA_INVAL_ADDR;
785 
786 	index = __pa(caddr) >> SECTION_SIZE_BITS;
787 	top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
788 	if (!ehea_bmap->top[top])
789 		return EHEA_INVAL_ADDR;
790 
791 	dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
792 	if (!ehea_bmap->top[top]->dir[dir])
793 		return EHEA_INVAL_ADDR;
794 
795 	idx = index & EHEA_INDEX_MASK;
796 	if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
797 		return EHEA_INVAL_ADDR;
798 
799 	offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
800 	return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
801 }
802 
803 static inline void *ehea_calc_sectbase(int top, int dir, int idx)
804 {
805 	unsigned long ret = idx;
806 	ret |= dir << EHEA_DIR_INDEX_SHIFT;
807 	ret |= top << EHEA_TOP_INDEX_SHIFT;
808 	return __va(ret << SECTION_SIZE_BITS);
809 }
810 
811 static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
812 			       struct ehea_adapter *adapter,
813 			       struct ehea_mr *mr)
814 {
815 	void *pg;
816 	u64 j, m, hret;
817 	unsigned long k = 0;
818 	u64 pt_abs = __pa(pt);
819 
820 	void *sectbase = ehea_calc_sectbase(top, dir, idx);
821 
822 	for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
823 
824 		for (m = 0; m < EHEA_MAX_RPAGE; m++) {
825 			pg = sectbase + ((k++) * EHEA_PAGESIZE);
826 			pt[m] = __pa(pg);
827 		}
828 		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
829 						0, pt_abs, EHEA_MAX_RPAGE);
830 
831 		if ((hret != H_SUCCESS) &&
832 		    (hret != H_PAGE_REGISTERED)) {
833 			ehea_h_free_resource(adapter->handle, mr->handle,
834 					     FORCE_FREE);
835 			pr_err("register_rpage_mr failed\n");
836 			return hret;
837 		}
838 	}
839 	return hret;
840 }
841 
842 static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
843 				struct ehea_adapter *adapter,
844 				struct ehea_mr *mr)
845 {
846 	u64 hret = H_SUCCESS;
847 	int idx;
848 
849 	for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
850 		if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
851 			continue;
852 
853 		hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
854 		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
855 			return hret;
856 	}
857 	return hret;
858 }
859 
860 static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
861 				    struct ehea_adapter *adapter,
862 				    struct ehea_mr *mr)
863 {
864 	u64 hret = H_SUCCESS;
865 	int dir;
866 
867 	for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
868 		if (!ehea_bmap->top[top]->dir[dir])
869 			continue;
870 
871 		hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
872 		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
873 			return hret;
874 	}
875 	return hret;
876 }
877 
878 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
879 {
880 	int ret;
881 	u64 *pt;
882 	u64 hret;
883 	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
884 
885 	unsigned long top;
886 
887 	pt = (void *)get_zeroed_page(GFP_KERNEL);
888 	if (!pt) {
889 		pr_err("no mem\n");
890 		ret = -ENOMEM;
891 		goto out;
892 	}
893 
894 	hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
895 					ehea_mr_len, acc_ctrl, adapter->pd,
896 					&mr->handle, &mr->lkey);
897 
898 	if (hret != H_SUCCESS) {
899 		pr_err("alloc_resource_mr failed\n");
900 		ret = -EIO;
901 		goto out;
902 	}
903 
904 	if (!ehea_bmap) {
905 		ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
906 		pr_err("no busmap available\n");
907 		ret = -EIO;
908 		goto out;
909 	}
910 
911 	for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
912 		if (!ehea_bmap->top[top])
913 			continue;
914 
915 		hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
916 		if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
917 			break;
918 	}
919 
920 	if (hret != H_SUCCESS) {
921 		ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
922 		pr_err("registering mr failed\n");
923 		ret = -EIO;
924 		goto out;
925 	}
926 
927 	mr->vaddr = EHEA_BUSMAP_START;
928 	mr->adapter = adapter;
929 	ret = 0;
930 out:
931 	free_page((unsigned long)pt);
932 	return ret;
933 }
934 
935 int ehea_rem_mr(struct ehea_mr *mr)
936 {
937 	u64 hret;
938 
939 	if (!mr || !mr->adapter)
940 		return -EINVAL;
941 
942 	hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
943 				    FORCE_FREE);
944 	if (hret != H_SUCCESS) {
945 		pr_err("destroy MR failed\n");
946 		return -EIO;
947 	}
948 
949 	return 0;
950 }
951 
952 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
953 		 struct ehea_mr *shared_mr)
954 {
955 	u64 hret;
956 
957 	hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
958 				   old_mr->vaddr, EHEA_MR_ACC_CTRL,
959 				   adapter->pd, shared_mr);
960 	if (hret != H_SUCCESS)
961 		return -EIO;
962 
963 	shared_mr->adapter = adapter;
964 
965 	return 0;
966 }
967 
968 static void print_error_data(u64 *data)
969 {
970 	int length;
971 	u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
972 	u64 resource = data[1];
973 
974 	length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
975 
976 	if (length > EHEA_PAGESIZE)
977 		length = EHEA_PAGESIZE;
978 
979 	if (type == EHEA_AER_RESTYPE_QP)
980 		pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
981 		       resource, data[6], data[12], data[22]);
982 	else if (type == EHEA_AER_RESTYPE_CQ)
983 		pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
984 		       resource, data[6]);
985 	else if (type == EHEA_AER_RESTYPE_EQ)
986 		pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
987 		       resource, data[6]);
988 
989 	ehea_dump(data, length, "error data");
990 }
991 
992 u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
993 		    u64 *aer, u64 *aerr)
994 {
995 	unsigned long ret;
996 	u64 *rblock;
997 	u64 type = 0;
998 
999 	rblock = (void *)get_zeroed_page(GFP_KERNEL);
1000 	if (!rblock) {
1001 		pr_err("Cannot allocate rblock memory\n");
1002 		goto out;
1003 	}
1004 
1005 	ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1006 
1007 	if (ret == H_SUCCESS) {
1008 		type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1009 		*aer = rblock[6];
1010 		*aerr = rblock[12];
1011 		print_error_data(rblock);
1012 	} else if (ret == H_R_STATE) {
1013 		pr_err("No error data available: %llX\n", res_handle);
1014 	} else
1015 		pr_err("Error data could not be fetched: %llX\n", res_handle);
1016 
1017 	free_page((unsigned long)rblock);
1018 out:
1019 	return type;
1020 }
1021