1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <linux/module.h>
5 #include <linux/pci.h>
6 
7 #include "mana.h"
8 
9 static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
10 {
11 	return readl(g->bar0_va + offset);
12 }
13 
14 static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
15 {
16 	return readq(g->bar0_va + offset);
17 }
18 
19 static void mana_gd_init_registers(struct pci_dev *pdev)
20 {
21 	struct gdma_context *gc = pci_get_drvdata(pdev);
22 
23 	gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
24 
25 	gc->db_page_base = gc->bar0_va +
26 				mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
27 
28 	gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
29 }
30 
31 static int mana_gd_query_max_resources(struct pci_dev *pdev)
32 {
33 	struct gdma_context *gc = pci_get_drvdata(pdev);
34 	struct gdma_query_max_resources_resp resp = {};
35 	struct gdma_general_req req = {};
36 	int err;
37 
38 	mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
39 			     sizeof(req), sizeof(resp));
40 
41 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
42 	if (err || resp.hdr.status) {
43 		dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n",
44 			err, resp.hdr.status);
45 		return err ? err : -EPROTO;
46 	}
47 
48 	if (gc->num_msix_usable > resp.max_msix)
49 		gc->num_msix_usable = resp.max_msix;
50 
51 	if (gc->num_msix_usable <= 1)
52 		return -ENOSPC;
53 
54 	gc->max_num_queues = num_online_cpus();
55 	if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
56 		gc->max_num_queues = MANA_MAX_NUM_QUEUES;
57 
58 	if (gc->max_num_queues > resp.max_eq)
59 		gc->max_num_queues = resp.max_eq;
60 
61 	if (gc->max_num_queues > resp.max_cq)
62 		gc->max_num_queues = resp.max_cq;
63 
64 	if (gc->max_num_queues > resp.max_sq)
65 		gc->max_num_queues = resp.max_sq;
66 
67 	if (gc->max_num_queues > resp.max_rq)
68 		gc->max_num_queues = resp.max_rq;
69 
70 	return 0;
71 }
72 
73 static int mana_gd_detect_devices(struct pci_dev *pdev)
74 {
75 	struct gdma_context *gc = pci_get_drvdata(pdev);
76 	struct gdma_list_devices_resp resp = {};
77 	struct gdma_general_req req = {};
78 	struct gdma_dev_id dev;
79 	u32 i, max_num_devs;
80 	u16 dev_type;
81 	int err;
82 
83 	mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
84 			     sizeof(resp));
85 
86 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
87 	if (err || resp.hdr.status) {
88 		dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err,
89 			resp.hdr.status);
90 		return err ? err : -EPROTO;
91 	}
92 
93 	max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
94 
95 	for (i = 0; i < max_num_devs; i++) {
96 		dev = resp.devs[i];
97 		dev_type = dev.type;
98 
99 		/* HWC is already detected in mana_hwc_create_channel(). */
100 		if (dev_type == GDMA_DEVICE_HWC)
101 			continue;
102 
103 		if (dev_type == GDMA_DEVICE_MANA) {
104 			gc->mana.gdma_context = gc;
105 			gc->mana.dev_id = dev;
106 		}
107 	}
108 
109 	return gc->mana.dev_id.type == 0 ? -ENODEV : 0;
110 }
111 
112 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
113 			 u32 resp_len, void *resp)
114 {
115 	struct hw_channel_context *hwc = gc->hwc.driver_data;
116 
117 	return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
118 }
119 
120 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
121 			 struct gdma_mem_info *gmi)
122 {
123 	dma_addr_t dma_handle;
124 	void *buf;
125 
126 	if (length < PAGE_SIZE || !is_power_of_2(length))
127 		return -EINVAL;
128 
129 	gmi->dev = gc->dev;
130 	buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL);
131 	if (!buf)
132 		return -ENOMEM;
133 
134 	gmi->dma_handle = dma_handle;
135 	gmi->virt_addr = buf;
136 	gmi->length = length;
137 
138 	return 0;
139 }
140 
141 void mana_gd_free_memory(struct gdma_mem_info *gmi)
142 {
143 	dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr,
144 			  gmi->dma_handle);
145 }
146 
147 static int mana_gd_create_hw_eq(struct gdma_context *gc,
148 				struct gdma_queue *queue)
149 {
150 	struct gdma_create_queue_resp resp = {};
151 	struct gdma_create_queue_req req = {};
152 	int err;
153 
154 	if (queue->type != GDMA_EQ)
155 		return -EINVAL;
156 
157 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
158 			     sizeof(req), sizeof(resp));
159 
160 	req.hdr.dev_id = queue->gdma_dev->dev_id;
161 	req.type = queue->type;
162 	req.pdid = queue->gdma_dev->pdid;
163 	req.doolbell_id = queue->gdma_dev->doorbell;
164 	req.gdma_region = queue->mem_info.gdma_region;
165 	req.queue_size = queue->queue_size;
166 	req.log2_throttle_limit = queue->eq.log2_throttle_limit;
167 	req.eq_pci_msix_index = queue->eq.msix_index;
168 
169 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
170 	if (err || resp.hdr.status) {
171 		dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err,
172 			resp.hdr.status);
173 		return err ? err : -EPROTO;
174 	}
175 
176 	queue->id = resp.queue_index;
177 	queue->eq.disable_needed = true;
178 	queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
179 	return 0;
180 }
181 
182 static int mana_gd_disable_queue(struct gdma_queue *queue)
183 {
184 	struct gdma_context *gc = queue->gdma_dev->gdma_context;
185 	struct gdma_disable_queue_req req = {};
186 	struct gdma_general_resp resp = {};
187 	int err;
188 
189 	WARN_ON(queue->type != GDMA_EQ);
190 
191 	mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
192 			     sizeof(req), sizeof(resp));
193 
194 	req.hdr.dev_id = queue->gdma_dev->dev_id;
195 	req.type = queue->type;
196 	req.queue_index =  queue->id;
197 	req.alloc_res_id_on_creation = 1;
198 
199 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
200 	if (err || resp.hdr.status) {
201 		dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
202 			resp.hdr.status);
203 		return err ? err : -EPROTO;
204 	}
205 
206 	return 0;
207 }
208 
209 #define DOORBELL_OFFSET_SQ	0x0
210 #define DOORBELL_OFFSET_RQ	0x400
211 #define DOORBELL_OFFSET_CQ	0x800
212 #define DOORBELL_OFFSET_EQ	0xFF8
213 
214 static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
215 				  enum gdma_queue_type q_type, u32 qid,
216 				  u32 tail_ptr, u8 num_req)
217 {
218 	void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index;
219 	union gdma_doorbell_entry e = {};
220 
221 	switch (q_type) {
222 	case GDMA_EQ:
223 		e.eq.id = qid;
224 		e.eq.tail_ptr = tail_ptr;
225 		e.eq.arm = num_req;
226 
227 		addr += DOORBELL_OFFSET_EQ;
228 		break;
229 
230 	case GDMA_CQ:
231 		e.cq.id = qid;
232 		e.cq.tail_ptr = tail_ptr;
233 		e.cq.arm = num_req;
234 
235 		addr += DOORBELL_OFFSET_CQ;
236 		break;
237 
238 	case GDMA_RQ:
239 		e.rq.id = qid;
240 		e.rq.tail_ptr = tail_ptr;
241 		e.rq.wqe_cnt = num_req;
242 
243 		addr += DOORBELL_OFFSET_RQ;
244 		break;
245 
246 	case GDMA_SQ:
247 		e.sq.id = qid;
248 		e.sq.tail_ptr = tail_ptr;
249 
250 		addr += DOORBELL_OFFSET_SQ;
251 		break;
252 
253 	default:
254 		WARN_ON(1);
255 		return;
256 	}
257 
258 	/* Ensure all writes are done before ring doorbell */
259 	wmb();
260 
261 	writeq(e.as_uint64, addr);
262 }
263 
264 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
265 {
266 	mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
267 			      queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
268 }
269 
270 void mana_gd_arm_cq(struct gdma_queue *cq)
271 {
272 	struct gdma_context *gc = cq->gdma_dev->gdma_context;
273 
274 	u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE;
275 
276 	u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
277 
278 	mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
279 			      head, SET_ARM_BIT);
280 }
281 
282 static void mana_gd_process_eqe(struct gdma_queue *eq)
283 {
284 	u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
285 	struct gdma_context *gc = eq->gdma_dev->gdma_context;
286 	struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
287 	union gdma_eqe_info eqe_info;
288 	enum gdma_eqe_type type;
289 	struct gdma_event event;
290 	struct gdma_queue *cq;
291 	struct gdma_eqe *eqe;
292 	u32 cq_id;
293 
294 	eqe = &eq_eqe_ptr[head];
295 	eqe_info.as_uint32 = eqe->eqe_info;
296 	type = eqe_info.type;
297 
298 	switch (type) {
299 	case GDMA_EQE_COMPLETION:
300 		cq_id = eqe->details[0] & 0xFFFFFF;
301 		if (WARN_ON_ONCE(cq_id >= gc->max_num_cqs))
302 			break;
303 
304 		cq = gc->cq_table[cq_id];
305 		if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id))
306 			break;
307 
308 		if (cq->cq.callback)
309 			cq->cq.callback(cq->cq.context, cq);
310 
311 		break;
312 
313 	case GDMA_EQE_TEST_EVENT:
314 		gc->test_event_eq_id = eq->id;
315 		complete(&gc->eq_test_event);
316 		break;
317 
318 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
319 	case GDMA_EQE_HWC_INIT_DATA:
320 	case GDMA_EQE_HWC_INIT_DONE:
321 		if (!eq->eq.callback)
322 			break;
323 
324 		event.type = type;
325 		memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE);
326 		eq->eq.callback(eq->eq.context, eq, &event);
327 		break;
328 
329 	default:
330 		break;
331 	}
332 }
333 
334 static void mana_gd_process_eq_events(void *arg)
335 {
336 	u32 owner_bits, new_bits, old_bits;
337 	union gdma_eqe_info eqe_info;
338 	struct gdma_eqe *eq_eqe_ptr;
339 	struct gdma_queue *eq = arg;
340 	struct gdma_context *gc;
341 	struct gdma_eqe *eqe;
342 	unsigned int arm_bit;
343 	u32 head, num_eqe;
344 	int i;
345 
346 	gc = eq->gdma_dev->gdma_context;
347 
348 	num_eqe = eq->queue_size / GDMA_EQE_SIZE;
349 	eq_eqe_ptr = eq->queue_mem_ptr;
350 
351 	/* Process up to 5 EQEs at a time, and update the HW head. */
352 	for (i = 0; i < 5; i++) {
353 		eqe = &eq_eqe_ptr[eq->head % num_eqe];
354 		eqe_info.as_uint32 = eqe->eqe_info;
355 		owner_bits = eqe_info.owner_bits;
356 
357 		old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
358 		/* No more entries */
359 		if (owner_bits == old_bits)
360 			break;
361 
362 		new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
363 		if (owner_bits != new_bits) {
364 			dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id);
365 			break;
366 		}
367 
368 		mana_gd_process_eqe(eq);
369 
370 		eq->head++;
371 	}
372 
373 	/* Always rearm the EQ for HWC. For MANA, rearm it when NAPI is done. */
374 	if (mana_gd_is_hwc(eq->gdma_dev)) {
375 		arm_bit = SET_ARM_BIT;
376 	} else if (eq->eq.work_done < eq->eq.budget &&
377 		   napi_complete_done(&eq->eq.napi, eq->eq.work_done)) {
378 		arm_bit = SET_ARM_BIT;
379 	} else {
380 		arm_bit = 0;
381 	}
382 
383 	head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
384 
385 	mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
386 			      head, arm_bit);
387 }
388 
389 static int mana_poll(struct napi_struct *napi, int budget)
390 {
391 	struct gdma_queue *eq = container_of(napi, struct gdma_queue, eq.napi);
392 
393 	eq->eq.work_done = 0;
394 	eq->eq.budget = budget;
395 
396 	mana_gd_process_eq_events(eq);
397 
398 	return min(eq->eq.work_done, budget);
399 }
400 
401 static void mana_gd_schedule_napi(void *arg)
402 {
403 	struct gdma_queue *eq = arg;
404 	struct napi_struct *napi;
405 
406 	napi = &eq->eq.napi;
407 	napi_schedule_irqoff(napi);
408 }
409 
410 static int mana_gd_register_irq(struct gdma_queue *queue,
411 				const struct gdma_queue_spec *spec)
412 {
413 	struct gdma_dev *gd = queue->gdma_dev;
414 	bool is_mana = mana_gd_is_mana(gd);
415 	struct gdma_irq_context *gic;
416 	struct gdma_context *gc;
417 	struct gdma_resource *r;
418 	unsigned int msi_index;
419 	unsigned long flags;
420 	int err;
421 
422 	gc = gd->gdma_context;
423 	r = &gc->msix_resource;
424 
425 	spin_lock_irqsave(&r->lock, flags);
426 
427 	msi_index = find_first_zero_bit(r->map, r->size);
428 	if (msi_index >= r->size) {
429 		err = -ENOSPC;
430 	} else {
431 		bitmap_set(r->map, msi_index, 1);
432 		queue->eq.msix_index = msi_index;
433 		err = 0;
434 	}
435 
436 	spin_unlock_irqrestore(&r->lock, flags);
437 
438 	if (err)
439 		return err;
440 
441 	WARN_ON(msi_index >= gc->num_msix_usable);
442 
443 	gic = &gc->irq_contexts[msi_index];
444 
445 	if (is_mana) {
446 		netif_napi_add(spec->eq.ndev, &queue->eq.napi, mana_poll,
447 			       NAPI_POLL_WEIGHT);
448 		napi_enable(&queue->eq.napi);
449 	}
450 
451 	WARN_ON(gic->handler || gic->arg);
452 
453 	gic->arg = queue;
454 
455 	if (is_mana)
456 		gic->handler = mana_gd_schedule_napi;
457 	else
458 		gic->handler = mana_gd_process_eq_events;
459 
460 	return 0;
461 }
462 
463 static void mana_gd_deregiser_irq(struct gdma_queue *queue)
464 {
465 	struct gdma_dev *gd = queue->gdma_dev;
466 	struct gdma_irq_context *gic;
467 	struct gdma_context *gc;
468 	struct gdma_resource *r;
469 	unsigned int msix_index;
470 	unsigned long flags;
471 
472 	gc = gd->gdma_context;
473 	r = &gc->msix_resource;
474 
475 	/* At most num_online_cpus() + 1 interrupts are used. */
476 	msix_index = queue->eq.msix_index;
477 	if (WARN_ON(msix_index >= gc->num_msix_usable))
478 		return;
479 
480 	gic = &gc->irq_contexts[msix_index];
481 	gic->handler = NULL;
482 	gic->arg = NULL;
483 
484 	spin_lock_irqsave(&r->lock, flags);
485 	bitmap_clear(r->map, msix_index, 1);
486 	spin_unlock_irqrestore(&r->lock, flags);
487 
488 	queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
489 }
490 
491 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
492 {
493 	struct gdma_generate_test_event_req req = {};
494 	struct gdma_general_resp resp = {};
495 	struct device *dev = gc->dev;
496 	int err;
497 
498 	mutex_lock(&gc->eq_test_event_mutex);
499 
500 	init_completion(&gc->eq_test_event);
501 	gc->test_event_eq_id = INVALID_QUEUE_ID;
502 
503 	mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE,
504 			     sizeof(req), sizeof(resp));
505 
506 	req.hdr.dev_id = eq->gdma_dev->dev_id;
507 	req.queue_index = eq->id;
508 
509 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
510 	if (err) {
511 		dev_err(dev, "test_eq failed: %d\n", err);
512 		goto out;
513 	}
514 
515 	err = -EPROTO;
516 
517 	if (resp.hdr.status) {
518 		dev_err(dev, "test_eq failed: 0x%x\n", resp.hdr.status);
519 		goto out;
520 	}
521 
522 	if (!wait_for_completion_timeout(&gc->eq_test_event, 30 * HZ)) {
523 		dev_err(dev, "test_eq timed out on queue %d\n", eq->id);
524 		goto out;
525 	}
526 
527 	if (eq->id != gc->test_event_eq_id) {
528 		dev_err(dev, "test_eq got an event on wrong queue %d (%d)\n",
529 			gc->test_event_eq_id, eq->id);
530 		goto out;
531 	}
532 
533 	err = 0;
534 out:
535 	mutex_unlock(&gc->eq_test_event_mutex);
536 	return err;
537 }
538 
539 static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
540 			       struct gdma_queue *queue)
541 {
542 	int err;
543 
544 	if (flush_evenets) {
545 		err = mana_gd_test_eq(gc, queue);
546 		if (err)
547 			dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
548 	}
549 
550 	mana_gd_deregiser_irq(queue);
551 
552 	if (mana_gd_is_mana(queue->gdma_dev)) {
553 		napi_disable(&queue->eq.napi);
554 		netif_napi_del(&queue->eq.napi);
555 	}
556 
557 	if (queue->eq.disable_needed)
558 		mana_gd_disable_queue(queue);
559 }
560 
561 static int mana_gd_create_eq(struct gdma_dev *gd,
562 			     const struct gdma_queue_spec *spec,
563 			     bool create_hwq, struct gdma_queue *queue)
564 {
565 	struct gdma_context *gc = gd->gdma_context;
566 	struct device *dev = gc->dev;
567 	u32 log2_num_entries;
568 	int err;
569 
570 	queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
571 
572 	log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
573 
574 	if (spec->eq.log2_throttle_limit > log2_num_entries) {
575 		dev_err(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n",
576 			spec->eq.log2_throttle_limit, log2_num_entries);
577 		return -EINVAL;
578 	}
579 
580 	err = mana_gd_register_irq(queue, spec);
581 	if (err) {
582 		dev_err(dev, "Failed to register irq: %d\n", err);
583 		return err;
584 	}
585 
586 	queue->eq.callback = spec->eq.callback;
587 	queue->eq.context = spec->eq.context;
588 	queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
589 	queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
590 
591 	if (create_hwq) {
592 		err = mana_gd_create_hw_eq(gc, queue);
593 		if (err)
594 			goto out;
595 
596 		err = mana_gd_test_eq(gc, queue);
597 		if (err)
598 			goto out;
599 	}
600 
601 	return 0;
602 out:
603 	dev_err(dev, "Failed to create EQ: %d\n", err);
604 	mana_gd_destroy_eq(gc, false, queue);
605 	return err;
606 }
607 
608 static void mana_gd_create_cq(const struct gdma_queue_spec *spec,
609 			      struct gdma_queue *queue)
610 {
611 	u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
612 
613 	queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
614 	queue->cq.parent = spec->cq.parent_eq;
615 	queue->cq.context = spec->cq.context;
616 	queue->cq.callback = spec->cq.callback;
617 }
618 
619 static void mana_gd_destroy_cq(struct gdma_context *gc,
620 			       struct gdma_queue *queue)
621 {
622 	u32 id = queue->id;
623 
624 	if (id >= gc->max_num_cqs)
625 		return;
626 
627 	if (!gc->cq_table[id])
628 		return;
629 
630 	gc->cq_table[id] = NULL;
631 }
632 
633 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
634 			     const struct gdma_queue_spec *spec,
635 			     struct gdma_queue **queue_ptr)
636 {
637 	struct gdma_context *gc = gd->gdma_context;
638 	struct gdma_mem_info *gmi;
639 	struct gdma_queue *queue;
640 	int err;
641 
642 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
643 	if (!queue)
644 		return -ENOMEM;
645 
646 	gmi = &queue->mem_info;
647 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
648 	if (err)
649 		goto free_q;
650 
651 	queue->head = 0;
652 	queue->tail = 0;
653 	queue->queue_mem_ptr = gmi->virt_addr;
654 	queue->queue_size = spec->queue_size;
655 	queue->monitor_avl_buf = spec->monitor_avl_buf;
656 	queue->type = spec->type;
657 	queue->gdma_dev = gd;
658 
659 	if (spec->type == GDMA_EQ)
660 		err = mana_gd_create_eq(gd, spec, false, queue);
661 	else if (spec->type == GDMA_CQ)
662 		mana_gd_create_cq(spec, queue);
663 
664 	if (err)
665 		goto out;
666 
667 	*queue_ptr = queue;
668 	return 0;
669 out:
670 	mana_gd_free_memory(gmi);
671 free_q:
672 	kfree(queue);
673 	return err;
674 }
675 
676 static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
677 {
678 	struct gdma_destroy_dma_region_req req = {};
679 	struct gdma_general_resp resp = {};
680 	int err;
681 
682 	if (gdma_region == GDMA_INVALID_DMA_REGION)
683 		return;
684 
685 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
686 			     sizeof(resp));
687 	req.gdma_region = gdma_region;
688 
689 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
690 	if (err || resp.hdr.status)
691 		dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
692 			err, resp.hdr.status);
693 }
694 
695 static int mana_gd_create_dma_region(struct gdma_dev *gd,
696 				     struct gdma_mem_info *gmi)
697 {
698 	unsigned int num_page = gmi->length / PAGE_SIZE;
699 	struct gdma_create_dma_region_req *req = NULL;
700 	struct gdma_create_dma_region_resp resp = {};
701 	struct gdma_context *gc = gd->gdma_context;
702 	struct hw_channel_context *hwc;
703 	u32 length = gmi->length;
704 	u32 req_msg_size;
705 	int err;
706 	int i;
707 
708 	if (length < PAGE_SIZE || !is_power_of_2(length))
709 		return -EINVAL;
710 
711 	if (offset_in_page(gmi->virt_addr) != 0)
712 		return -EINVAL;
713 
714 	hwc = gc->hwc.driver_data;
715 	req_msg_size = sizeof(*req) + num_page * sizeof(u64);
716 	if (req_msg_size > hwc->max_req_msg_size)
717 		return -EINVAL;
718 
719 	req = kzalloc(req_msg_size, GFP_KERNEL);
720 	if (!req)
721 		return -ENOMEM;
722 
723 	mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
724 			     req_msg_size, sizeof(resp));
725 	req->length = length;
726 	req->offset_in_page = 0;
727 	req->gdma_page_type = GDMA_PAGE_TYPE_4K;
728 	req->page_count = num_page;
729 	req->page_addr_list_len = num_page;
730 
731 	for (i = 0; i < num_page; i++)
732 		req->page_addr_list[i] = gmi->dma_handle +  i * PAGE_SIZE;
733 
734 	err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
735 	if (err)
736 		goto out;
737 
738 	if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
739 		dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
740 			resp.hdr.status);
741 		err = -EPROTO;
742 		goto out;
743 	}
744 
745 	gmi->gdma_region = resp.gdma_region;
746 out:
747 	kfree(req);
748 	return err;
749 }
750 
751 int mana_gd_create_mana_eq(struct gdma_dev *gd,
752 			   const struct gdma_queue_spec *spec,
753 			   struct gdma_queue **queue_ptr)
754 {
755 	struct gdma_context *gc = gd->gdma_context;
756 	struct gdma_mem_info *gmi;
757 	struct gdma_queue *queue;
758 	int err;
759 
760 	if (spec->type != GDMA_EQ)
761 		return -EINVAL;
762 
763 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
764 	if (!queue)
765 		return -ENOMEM;
766 
767 	gmi = &queue->mem_info;
768 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
769 	if (err)
770 		goto free_q;
771 
772 	err = mana_gd_create_dma_region(gd, gmi);
773 	if (err)
774 		goto out;
775 
776 	queue->head = 0;
777 	queue->tail = 0;
778 	queue->queue_mem_ptr = gmi->virt_addr;
779 	queue->queue_size = spec->queue_size;
780 	queue->monitor_avl_buf = spec->monitor_avl_buf;
781 	queue->type = spec->type;
782 	queue->gdma_dev = gd;
783 
784 	err = mana_gd_create_eq(gd, spec, true, queue);
785 	if (err)
786 		goto out;
787 
788 	*queue_ptr = queue;
789 	return 0;
790 out:
791 	mana_gd_free_memory(gmi);
792 free_q:
793 	kfree(queue);
794 	return err;
795 }
796 
797 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
798 			      const struct gdma_queue_spec *spec,
799 			      struct gdma_queue **queue_ptr)
800 {
801 	struct gdma_context *gc = gd->gdma_context;
802 	struct gdma_mem_info *gmi;
803 	struct gdma_queue *queue;
804 	int err;
805 
806 	if (spec->type != GDMA_CQ && spec->type != GDMA_SQ &&
807 	    spec->type != GDMA_RQ)
808 		return -EINVAL;
809 
810 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
811 	if (!queue)
812 		return -ENOMEM;
813 
814 	gmi = &queue->mem_info;
815 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
816 	if (err)
817 		goto free_q;
818 
819 	err = mana_gd_create_dma_region(gd, gmi);
820 	if (err)
821 		goto out;
822 
823 	queue->head = 0;
824 	queue->tail = 0;
825 	queue->queue_mem_ptr = gmi->virt_addr;
826 	queue->queue_size = spec->queue_size;
827 	queue->monitor_avl_buf = spec->monitor_avl_buf;
828 	queue->type = spec->type;
829 	queue->gdma_dev = gd;
830 
831 	if (spec->type == GDMA_CQ)
832 		mana_gd_create_cq(spec, queue);
833 
834 	*queue_ptr = queue;
835 	return 0;
836 out:
837 	mana_gd_free_memory(gmi);
838 free_q:
839 	kfree(queue);
840 	return err;
841 }
842 
843 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
844 {
845 	struct gdma_mem_info *gmi = &queue->mem_info;
846 
847 	switch (queue->type) {
848 	case GDMA_EQ:
849 		mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
850 		break;
851 
852 	case GDMA_CQ:
853 		mana_gd_destroy_cq(gc, queue);
854 		break;
855 
856 	case GDMA_RQ:
857 		break;
858 
859 	case GDMA_SQ:
860 		break;
861 
862 	default:
863 		dev_err(gc->dev, "Can't destroy unknown queue: type=%d\n",
864 			queue->type);
865 		return;
866 	}
867 
868 	mana_gd_destroy_dma_region(gc, gmi->gdma_region);
869 	mana_gd_free_memory(gmi);
870 	kfree(queue);
871 }
872 
873 int mana_gd_verify_vf_version(struct pci_dev *pdev)
874 {
875 	struct gdma_context *gc = pci_get_drvdata(pdev);
876 	struct gdma_verify_ver_resp resp = {};
877 	struct gdma_verify_ver_req req = {};
878 	int err;
879 
880 	mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
881 			     sizeof(req), sizeof(resp));
882 
883 	req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
884 	req.protocol_ver_max = GDMA_PROTOCOL_LAST;
885 
886 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
887 	if (err || resp.hdr.status) {
888 		dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
889 			err, resp.hdr.status);
890 		return err ? err : -EPROTO;
891 	}
892 
893 	return 0;
894 }
895 
896 int mana_gd_register_device(struct gdma_dev *gd)
897 {
898 	struct gdma_context *gc = gd->gdma_context;
899 	struct gdma_register_device_resp resp = {};
900 	struct gdma_general_req req = {};
901 	int err;
902 
903 	gd->pdid = INVALID_PDID;
904 	gd->doorbell = INVALID_DOORBELL;
905 	gd->gpa_mkey = INVALID_MEM_KEY;
906 
907 	mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req),
908 			     sizeof(resp));
909 
910 	req.hdr.dev_id = gd->dev_id;
911 
912 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
913 	if (err || resp.hdr.status) {
914 		dev_err(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n",
915 			err, resp.hdr.status);
916 		return err ? err : -EPROTO;
917 	}
918 
919 	gd->pdid = resp.pdid;
920 	gd->gpa_mkey = resp.gpa_mkey;
921 	gd->doorbell = resp.db_id;
922 
923 	return 0;
924 }
925 
926 int mana_gd_deregister_device(struct gdma_dev *gd)
927 {
928 	struct gdma_context *gc = gd->gdma_context;
929 	struct gdma_general_resp resp = {};
930 	struct gdma_general_req req = {};
931 	int err;
932 
933 	if (gd->pdid == INVALID_PDID)
934 		return -EINVAL;
935 
936 	mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req),
937 			     sizeof(resp));
938 
939 	req.hdr.dev_id = gd->dev_id;
940 
941 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
942 	if (err || resp.hdr.status) {
943 		dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
944 			err, resp.hdr.status);
945 		if (!err)
946 			err = -EPROTO;
947 	}
948 
949 	gd->pdid = INVALID_PDID;
950 	gd->doorbell = INVALID_DOORBELL;
951 	gd->gpa_mkey = INVALID_MEM_KEY;
952 
953 	return err;
954 }
955 
956 u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
957 {
958 	u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE;
959 	u32 wq_size = wq->queue_size;
960 
961 	WARN_ON_ONCE(used_space > wq_size);
962 
963 	return wq_size - used_space;
964 }
965 
966 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset)
967 {
968 	u32 offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1);
969 
970 	WARN_ON_ONCE((offset + GDMA_WQE_BU_SIZE) > wq->queue_size);
971 
972 	return wq->queue_mem_ptr + offset;
973 }
974 
975 static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
976 				    enum gdma_queue_type q_type,
977 				    u32 client_oob_size, u32 sgl_data_size,
978 				    u8 *wqe_ptr)
979 {
980 	bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL);
981 	bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0);
982 	struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr;
983 	u8 *ptr;
984 
985 	memset(header, 0, sizeof(struct gdma_wqe));
986 	header->num_sge = wqe_req->num_sge;
987 	header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
988 
989 	if (oob_in_sgl) {
990 		WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
991 
992 		header->client_oob_in_sgl = 1;
993 
994 		if (pad_data)
995 			header->last_vbytes = wqe_req->sgl[0].size;
996 	}
997 
998 	if (q_type == GDMA_SQ)
999 		header->client_data_unit = wqe_req->client_data_unit;
1000 
1001 	/* The size of gdma_wqe + client_oob_size must be less than or equal
1002 	 * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond
1003 	 * the queue memory buffer boundary.
1004 	 */
1005 	ptr = wqe_ptr + sizeof(header);
1006 
1007 	if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) {
1008 		memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size);
1009 
1010 		if (client_oob_size > wqe_req->inline_oob_size)
1011 			memset(ptr + wqe_req->inline_oob_size, 0,
1012 			       client_oob_size - wqe_req->inline_oob_size);
1013 	}
1014 
1015 	return sizeof(header) + client_oob_size;
1016 }
1017 
1018 static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr,
1019 			      const struct gdma_wqe_request *wqe_req)
1020 {
1021 	u32 sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1022 	const u8 *address = (u8 *)wqe_req->sgl;
1023 	u8 *base_ptr, *end_ptr;
1024 	u32 size_to_end;
1025 
1026 	base_ptr = wq->queue_mem_ptr;
1027 	end_ptr = base_ptr + wq->queue_size;
1028 	size_to_end = (u32)(end_ptr - wqe_ptr);
1029 
1030 	if (size_to_end < sgl_size) {
1031 		memcpy(wqe_ptr, address, size_to_end);
1032 
1033 		wqe_ptr = base_ptr;
1034 		address += size_to_end;
1035 		sgl_size -= size_to_end;
1036 	}
1037 
1038 	memcpy(wqe_ptr, address, sgl_size);
1039 }
1040 
1041 int mana_gd_post_work_request(struct gdma_queue *wq,
1042 			      const struct gdma_wqe_request *wqe_req,
1043 			      struct gdma_posted_wqe_info *wqe_info)
1044 {
1045 	u32 client_oob_size = wqe_req->inline_oob_size;
1046 	struct gdma_context *gc;
1047 	u32 sgl_data_size;
1048 	u32 max_wqe_size;
1049 	u32 wqe_size;
1050 	u8 *wqe_ptr;
1051 
1052 	if (wqe_req->num_sge == 0)
1053 		return -EINVAL;
1054 
1055 	if (wq->type == GDMA_RQ) {
1056 		if (client_oob_size != 0)
1057 			return -EINVAL;
1058 
1059 		client_oob_size = INLINE_OOB_SMALL_SIZE;
1060 
1061 		max_wqe_size = GDMA_MAX_RQE_SIZE;
1062 	} else {
1063 		if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
1064 		    client_oob_size != INLINE_OOB_LARGE_SIZE)
1065 			return -EINVAL;
1066 
1067 		max_wqe_size = GDMA_MAX_SQE_SIZE;
1068 	}
1069 
1070 	sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1071 	wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
1072 			 sgl_data_size, GDMA_WQE_BU_SIZE);
1073 	if (wqe_size > max_wqe_size)
1074 		return -EINVAL;
1075 
1076 	if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
1077 		gc = wq->gdma_dev->gdma_context;
1078 		dev_err(gc->dev, "unsuccessful flow control!\n");
1079 		return -ENOSPC;
1080 	}
1081 
1082 	if (wqe_info)
1083 		wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
1084 
1085 	wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
1086 	wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
1087 					    sgl_data_size, wqe_ptr);
1088 	if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size)
1089 		wqe_ptr -= wq->queue_size;
1090 
1091 	mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
1092 
1093 	wq->head += wqe_size / GDMA_WQE_BU_SIZE;
1094 
1095 	return 0;
1096 }
1097 
1098 int mana_gd_post_and_ring(struct gdma_queue *queue,
1099 			  const struct gdma_wqe_request *wqe_req,
1100 			  struct gdma_posted_wqe_info *wqe_info)
1101 {
1102 	struct gdma_context *gc = queue->gdma_dev->gdma_context;
1103 	int err;
1104 
1105 	err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
1106 	if (err)
1107 		return err;
1108 
1109 	mana_gd_wq_ring_doorbell(gc, queue);
1110 
1111 	return 0;
1112 }
1113 
1114 static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
1115 {
1116 	unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe);
1117 	struct gdma_cqe *cq_cqe = cq->queue_mem_ptr;
1118 	u32 owner_bits, new_bits, old_bits;
1119 	struct gdma_cqe *cqe;
1120 
1121 	cqe = &cq_cqe[cq->head % num_cqe];
1122 	owner_bits = cqe->cqe_info.owner_bits;
1123 
1124 	old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK;
1125 	/* Return 0 if no more entries. */
1126 	if (owner_bits == old_bits)
1127 		return 0;
1128 
1129 	new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
1130 	/* Return -1 if overflow detected. */
1131 	if (owner_bits != new_bits)
1132 		return -1;
1133 
1134 	comp->wq_num = cqe->cqe_info.wq_num;
1135 	comp->is_sq = cqe->cqe_info.is_sq;
1136 	memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
1137 
1138 	return 1;
1139 }
1140 
1141 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
1142 {
1143 	int cqe_idx;
1144 	int ret;
1145 
1146 	for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) {
1147 		ret = mana_gd_read_cqe(cq, &comp[cqe_idx]);
1148 
1149 		if (ret < 0) {
1150 			cq->head -= cqe_idx;
1151 			return ret;
1152 		}
1153 
1154 		if (ret == 0)
1155 			break;
1156 
1157 		cq->head++;
1158 	}
1159 
1160 	return cqe_idx;
1161 }
1162 
1163 static irqreturn_t mana_gd_intr(int irq, void *arg)
1164 {
1165 	struct gdma_irq_context *gic = arg;
1166 
1167 	if (gic->handler)
1168 		gic->handler(gic->arg);
1169 
1170 	return IRQ_HANDLED;
1171 }
1172 
1173 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r)
1174 {
1175 	r->map = bitmap_zalloc(res_avail, GFP_KERNEL);
1176 	if (!r->map)
1177 		return -ENOMEM;
1178 
1179 	r->size = res_avail;
1180 	spin_lock_init(&r->lock);
1181 
1182 	return 0;
1183 }
1184 
1185 void mana_gd_free_res_map(struct gdma_resource *r)
1186 {
1187 	bitmap_free(r->map);
1188 	r->map = NULL;
1189 	r->size = 0;
1190 }
1191 
1192 static int mana_gd_setup_irqs(struct pci_dev *pdev)
1193 {
1194 	unsigned int max_queues_per_port = num_online_cpus();
1195 	struct gdma_context *gc = pci_get_drvdata(pdev);
1196 	struct gdma_irq_context *gic;
1197 	unsigned int max_irqs;
1198 	int nvec, irq;
1199 	int err, i, j;
1200 
1201 	if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
1202 		max_queues_per_port = MANA_MAX_NUM_QUEUES;
1203 
1204 	max_irqs = max_queues_per_port * MAX_PORTS_IN_MANA_DEV;
1205 
1206 	/* Need 1 interrupt for the Hardware communication Channel (HWC) */
1207 	max_irqs++;
1208 
1209 	nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
1210 	if (nvec < 0)
1211 		return nvec;
1212 
1213 	gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
1214 				   GFP_KERNEL);
1215 	if (!gc->irq_contexts) {
1216 		err = -ENOMEM;
1217 		goto free_irq_vector;
1218 	}
1219 
1220 	for (i = 0; i < nvec; i++) {
1221 		gic = &gc->irq_contexts[i];
1222 		gic->handler = NULL;
1223 		gic->arg = NULL;
1224 
1225 		irq = pci_irq_vector(pdev, i);
1226 		if (irq < 0) {
1227 			err = irq;
1228 			goto free_irq;
1229 		}
1230 
1231 		err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
1232 		if (err)
1233 			goto free_irq;
1234 	}
1235 
1236 	err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
1237 	if (err)
1238 		goto free_irq;
1239 
1240 	gc->max_num_msix = nvec;
1241 	gc->num_msix_usable = nvec;
1242 
1243 	return 0;
1244 
1245 free_irq:
1246 	for (j = i - 1; j >= 0; j--) {
1247 		irq = pci_irq_vector(pdev, j);
1248 		gic = &gc->irq_contexts[j];
1249 		free_irq(irq, gic);
1250 	}
1251 
1252 	kfree(gc->irq_contexts);
1253 	gc->irq_contexts = NULL;
1254 free_irq_vector:
1255 	pci_free_irq_vectors(pdev);
1256 	return err;
1257 }
1258 
1259 static void mana_gd_remove_irqs(struct pci_dev *pdev)
1260 {
1261 	struct gdma_context *gc = pci_get_drvdata(pdev);
1262 	struct gdma_irq_context *gic;
1263 	int irq, i;
1264 
1265 	if (gc->max_num_msix < 1)
1266 		return;
1267 
1268 	mana_gd_free_res_map(&gc->msix_resource);
1269 
1270 	for (i = 0; i < gc->max_num_msix; i++) {
1271 		irq = pci_irq_vector(pdev, i);
1272 		if (irq < 0)
1273 			continue;
1274 
1275 		gic = &gc->irq_contexts[i];
1276 		free_irq(irq, gic);
1277 	}
1278 
1279 	pci_free_irq_vectors(pdev);
1280 
1281 	gc->max_num_msix = 0;
1282 	gc->num_msix_usable = 0;
1283 	kfree(gc->irq_contexts);
1284 	gc->irq_contexts = NULL;
1285 }
1286 
1287 static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1288 {
1289 	struct gdma_context *gc;
1290 	void __iomem *bar0_va;
1291 	int bar = 0;
1292 	int err;
1293 
1294 	err = pci_enable_device(pdev);
1295 	if (err)
1296 		return -ENXIO;
1297 
1298 	pci_set_master(pdev);
1299 
1300 	err = pci_request_regions(pdev, "mana");
1301 	if (err)
1302 		goto disable_dev;
1303 
1304 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1305 	if (err)
1306 		goto release_region;
1307 
1308 	err = -ENOMEM;
1309 	gc = vzalloc(sizeof(*gc));
1310 	if (!gc)
1311 		goto release_region;
1312 
1313 	bar0_va = pci_iomap(pdev, bar, 0);
1314 	if (!bar0_va)
1315 		goto free_gc;
1316 
1317 	gc->bar0_va = bar0_va;
1318 	gc->dev = &pdev->dev;
1319 
1320 	pci_set_drvdata(pdev, gc);
1321 
1322 	mana_gd_init_registers(pdev);
1323 
1324 	mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
1325 
1326 	err = mana_gd_setup_irqs(pdev);
1327 	if (err)
1328 		goto unmap_bar;
1329 
1330 	mutex_init(&gc->eq_test_event_mutex);
1331 
1332 	err = mana_hwc_create_channel(gc);
1333 	if (err)
1334 		goto remove_irq;
1335 
1336 	err = mana_gd_verify_vf_version(pdev);
1337 	if (err)
1338 		goto remove_irq;
1339 
1340 	err = mana_gd_query_max_resources(pdev);
1341 	if (err)
1342 		goto remove_irq;
1343 
1344 	err = mana_gd_detect_devices(pdev);
1345 	if (err)
1346 		goto remove_irq;
1347 
1348 	err = mana_probe(&gc->mana);
1349 	if (err)
1350 		goto clean_up_gdma;
1351 
1352 	return 0;
1353 
1354 clean_up_gdma:
1355 	mana_hwc_destroy_channel(gc);
1356 	vfree(gc->cq_table);
1357 	gc->cq_table = NULL;
1358 remove_irq:
1359 	mana_gd_remove_irqs(pdev);
1360 unmap_bar:
1361 	pci_iounmap(pdev, bar0_va);
1362 free_gc:
1363 	vfree(gc);
1364 release_region:
1365 	pci_release_regions(pdev);
1366 disable_dev:
1367 	pci_clear_master(pdev);
1368 	pci_disable_device(pdev);
1369 	dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
1370 	return err;
1371 }
1372 
1373 static void mana_gd_remove(struct pci_dev *pdev)
1374 {
1375 	struct gdma_context *gc = pci_get_drvdata(pdev);
1376 
1377 	mana_remove(&gc->mana);
1378 
1379 	mana_hwc_destroy_channel(gc);
1380 	vfree(gc->cq_table);
1381 	gc->cq_table = NULL;
1382 
1383 	mana_gd_remove_irqs(pdev);
1384 
1385 	pci_iounmap(pdev, gc->bar0_va);
1386 
1387 	vfree(gc);
1388 
1389 	pci_release_regions(pdev);
1390 	pci_clear_master(pdev);
1391 	pci_disable_device(pdev);
1392 }
1393 
1394 #ifndef PCI_VENDOR_ID_MICROSOFT
1395 #define PCI_VENDOR_ID_MICROSOFT 0x1414
1396 #endif
1397 
1398 static const struct pci_device_id mana_id_table[] = {
1399 	{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) },
1400 	{ }
1401 };
1402 
1403 static struct pci_driver mana_driver = {
1404 	.name		= "mana",
1405 	.id_table	= mana_id_table,
1406 	.probe		= mana_gd_probe,
1407 	.remove		= mana_gd_remove,
1408 };
1409 
1410 module_pci_driver(mana_driver);
1411 
1412 MODULE_DEVICE_TABLE(pci, mana_id_table);
1413 
1414 MODULE_LICENSE("Dual BSD/GPL");
1415 MODULE_DESCRIPTION("Microsoft Azure Network Adapter driver");
1416