xref: /openbmc/qemu/hw/rdma/vmw/pvrdma_cmd.c (revision 073d9f2c)
1 /*
2  * QEMU paravirtual RDMA - Command channel
3  *
4  * Copyright (C) 2018 Oracle
5  * Copyright (C) 2018 Red Hat Inc
6  *
7  * Authors:
8  *     Yuval Shaia <yuval.shaia@oracle.com>
9  *     Marcel Apfelbaum <marcel@redhat.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
12  * See the COPYING file in the top-level directory.
13  *
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qemu/error-report.h"
18 #include "cpu.h"
19 #include "hw/hw.h"
20 #include "hw/pci/pci.h"
21 #include "hw/pci/pci_ids.h"
22 
23 #include "../rdma_backend.h"
24 #include "../rdma_rm.h"
25 #include "../rdma_utils.h"
26 
27 #include "pvrdma.h"
28 #include "standard-headers/rdma/vmw_pvrdma-abi.h"
29 
30 static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma,
31                                 uint32_t nchunks, size_t length)
32 {
33     uint64_t *dir, *tbl;
34     int tbl_idx, dir_idx, addr_idx;
35     void *host_virt = NULL, *curr_page;
36 
37     if (!nchunks) {
38         pr_dbg("nchunks=0\n");
39         return NULL;
40     }
41 
42     dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE);
43     if (!dir) {
44         error_report("PVRDMA: Failed to map to page directory");
45         return NULL;
46     }
47 
48     tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE);
49     if (!tbl) {
50         error_report("PVRDMA: Failed to map to page table 0");
51         goto out_unmap_dir;
52     }
53 
54     curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE);
55     if (!curr_page) {
56         error_report("PVRDMA: Failed to map the first page");
57         goto out_unmap_tbl;
58     }
59 
60     host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE);
61     pr_dbg("mremap %p -> %p\n", curr_page, host_virt);
62     if (host_virt == MAP_FAILED) {
63         host_virt = NULL;
64         error_report("PVRDMA: Failed to remap memory for host_virt");
65         goto out_unmap_tbl;
66     }
67 
68     rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
69 
70     pr_dbg("host_virt=%p\n", host_virt);
71 
72     dir_idx = 0;
73     tbl_idx = 1;
74     addr_idx = 1;
75     while (addr_idx < nchunks) {
76         if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) {
77             tbl_idx = 0;
78             dir_idx++;
79             pr_dbg("Mapping to table %d\n", dir_idx);
80             rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
81             tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE);
82             if (!tbl) {
83                 error_report("PVRDMA: Failed to map to page table %d", dir_idx);
84                 goto out_unmap_host_virt;
85             }
86         }
87 
88         pr_dbg("guest_dma[%d]=0x%" PRIx64 "\n", addr_idx, tbl[tbl_idx]);
89 
90         curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx],
91                                      TARGET_PAGE_SIZE);
92         if (!curr_page) {
93             error_report("PVRDMA: Failed to map to page %d, dir %d", tbl_idx,
94                          dir_idx);
95             goto out_unmap_host_virt;
96         }
97 
98         mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED,
99                host_virt + TARGET_PAGE_SIZE * addr_idx);
100 
101         rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
102 
103         addr_idx++;
104 
105         tbl_idx++;
106     }
107 
108     goto out_unmap_tbl;
109 
110 out_unmap_host_virt:
111     munmap(host_virt, length);
112     host_virt = NULL;
113 
114 out_unmap_tbl:
115     rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
116 
117 out_unmap_dir:
118     rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE);
119 
120     return host_virt;
121 }
122 
123 static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req,
124                       union pvrdma_cmd_resp *rsp)
125 {
126     struct pvrdma_cmd_query_port *cmd = &req->query_port;
127     struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp;
128     struct pvrdma_port_attr attrs = {0};
129 
130     pr_dbg("port=%d\n", cmd->port_num);
131     if (cmd->port_num > MAX_PORTS) {
132         return -EINVAL;
133     }
134 
135     if (rdma_backend_query_port(&dev->backend_dev,
136                                 (struct ibv_port_attr *)&attrs)) {
137         return -ENOMEM;
138     }
139 
140     memset(resp, 0, sizeof(*resp));
141 
142     resp->attrs.state = dev->func0->device_active ? attrs.state :
143                                                     PVRDMA_PORT_DOWN;
144     resp->attrs.max_mtu = attrs.max_mtu;
145     resp->attrs.active_mtu = attrs.active_mtu;
146     resp->attrs.phys_state = attrs.phys_state;
147     resp->attrs.gid_tbl_len = MIN(MAX_PORT_GIDS, attrs.gid_tbl_len);
148     resp->attrs.max_msg_sz = 1024;
149     resp->attrs.pkey_tbl_len = MIN(MAX_PORT_PKEYS, attrs.pkey_tbl_len);
150     resp->attrs.active_width = 1;
151     resp->attrs.active_speed = 1;
152 
153     return 0;
154 }
155 
156 static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req,
157                       union pvrdma_cmd_resp *rsp)
158 {
159     struct pvrdma_cmd_query_pkey *cmd = &req->query_pkey;
160     struct pvrdma_cmd_query_pkey_resp *resp = &rsp->query_pkey_resp;
161 
162     pr_dbg("port=%d\n", cmd->port_num);
163     if (cmd->port_num > MAX_PORTS) {
164         return -EINVAL;
165     }
166 
167     pr_dbg("index=%d\n", cmd->index);
168     if (cmd->index > MAX_PKEYS) {
169         return -EINVAL;
170     }
171 
172     memset(resp, 0, sizeof(*resp));
173 
174     resp->pkey = PVRDMA_PKEY;
175     pr_dbg("pkey=0x%x\n", resp->pkey);
176 
177     return 0;
178 }
179 
180 static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
181                      union pvrdma_cmd_resp *rsp)
182 {
183     struct pvrdma_cmd_create_pd *cmd = &req->create_pd;
184     struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp;
185     int rc;
186 
187     pr_dbg("context=0x%x\n", cmd->ctx_handle ? cmd->ctx_handle : 0);
188 
189     memset(resp, 0, sizeof(*resp));
190     rc = rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev,
191                           &resp->pd_handle, cmd->ctx_handle);
192 
193     return rc;
194 }
195 
196 static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
197                       union pvrdma_cmd_resp *rsp)
198 {
199     struct pvrdma_cmd_destroy_pd *cmd = &req->destroy_pd;
200 
201     pr_dbg("pd_handle=%d\n", cmd->pd_handle);
202 
203     rdma_rm_dealloc_pd(&dev->rdma_dev_res, cmd->pd_handle);
204 
205     return 0;
206 }
207 
208 static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
209                      union pvrdma_cmd_resp *rsp)
210 {
211     struct pvrdma_cmd_create_mr *cmd = &req->create_mr;
212     struct pvrdma_cmd_create_mr_resp *resp = &rsp->create_mr_resp;
213     PCIDevice *pci_dev = PCI_DEVICE(dev);
214     void *host_virt = NULL;
215     int rc = 0;
216 
217     memset(resp, 0, sizeof(*resp));
218 
219     pr_dbg("pd_handle=%d\n", cmd->pd_handle);
220     pr_dbg("access_flags=0x%x\n", cmd->access_flags);
221     pr_dbg("flags=0x%x\n", cmd->flags);
222 
223     if (!(cmd->flags & PVRDMA_MR_FLAG_DMA)) {
224         host_virt = pvrdma_map_to_pdir(pci_dev, cmd->pdir_dma, cmd->nchunks,
225                                        cmd->length);
226         if (!host_virt) {
227             pr_dbg("Failed to map to pdir\n");
228             return -EINVAL;
229         }
230     }
231 
232     rc = rdma_rm_alloc_mr(&dev->rdma_dev_res, cmd->pd_handle, cmd->start,
233                           cmd->length, host_virt, cmd->access_flags,
234                           &resp->mr_handle, &resp->lkey, &resp->rkey);
235     if (rc && host_virt) {
236         munmap(host_virt, cmd->length);
237     }
238 
239     return rc;
240 }
241 
242 static int destroy_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
243                       union pvrdma_cmd_resp *rsp)
244 {
245     struct pvrdma_cmd_destroy_mr *cmd = &req->destroy_mr;
246 
247     pr_dbg("mr_handle=%d\n", cmd->mr_handle);
248 
249     rdma_rm_dealloc_mr(&dev->rdma_dev_res, cmd->mr_handle);
250 
251     return 0;
252 }
253 
254 static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring,
255                           uint64_t pdir_dma, uint32_t nchunks, uint32_t cqe)
256 {
257     uint64_t *dir = NULL, *tbl = NULL;
258     PvrdmaRing *r;
259     int rc = -EINVAL;
260     char ring_name[MAX_RING_NAME_SZ];
261 
262     if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
263         pr_dbg("invalid nchunks: %d\n", nchunks);
264         return rc;
265     }
266 
267     pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma);
268     dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
269     if (!dir) {
270         pr_dbg("Failed to map to CQ page directory\n");
271         goto out;
272     }
273 
274     tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
275     if (!tbl) {
276         pr_dbg("Failed to map to CQ page table\n");
277         goto out;
278     }
279 
280     r = g_malloc(sizeof(*r));
281     *ring = r;
282 
283     r->ring_state = (struct pvrdma_ring *)
284         rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
285 
286     if (!r->ring_state) {
287         pr_dbg("Failed to map to CQ ring state\n");
288         goto out_free_ring;
289     }
290 
291     sprintf(ring_name, "cq_ring_%" PRIx64, pdir_dma);
292     rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1],
293                           cqe, sizeof(struct pvrdma_cqe),
294                           /* first page is ring state */
295                           (dma_addr_t *)&tbl[1], nchunks - 1);
296     if (rc) {
297         goto out_unmap_ring_state;
298     }
299 
300     goto out;
301 
302 out_unmap_ring_state:
303     /* ring_state was in slot 1, not 0 so need to jump back */
304     rdma_pci_dma_unmap(pci_dev, --r->ring_state, TARGET_PAGE_SIZE);
305 
306 out_free_ring:
307     g_free(r);
308 
309 out:
310     rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
311     rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
312 
313     return rc;
314 }
315 
316 static void destroy_cq_ring(PvrdmaRing *ring)
317 {
318     pvrdma_ring_free(ring);
319     /* ring_state was in slot 1, not 0 so need to jump back */
320     rdma_pci_dma_unmap(ring->dev, --ring->ring_state, TARGET_PAGE_SIZE);
321     g_free(ring);
322 }
323 
324 static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
325                      union pvrdma_cmd_resp *rsp)
326 {
327     struct pvrdma_cmd_create_cq *cmd = &req->create_cq;
328     struct pvrdma_cmd_create_cq_resp *resp = &rsp->create_cq_resp;
329     PvrdmaRing *ring = NULL;
330     int rc;
331 
332     memset(resp, 0, sizeof(*resp));
333 
334     resp->cqe = cmd->cqe;
335 
336     rc = create_cq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma, cmd->nchunks,
337                         cmd->cqe);
338     if (rc) {
339         return rc;
340     }
341 
342     pr_dbg("ring=%p\n", ring);
343 
344     rc = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev, cmd->cqe,
345                           &resp->cq_handle, ring);
346     if (rc) {
347         destroy_cq_ring(ring);
348     }
349 
350     resp->cqe = cmd->cqe;
351 
352     return rc;
353 }
354 
355 static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
356                       union pvrdma_cmd_resp *rsp)
357 {
358     struct pvrdma_cmd_destroy_cq *cmd = &req->destroy_cq;
359     RdmaRmCQ *cq;
360     PvrdmaRing *ring;
361 
362     pr_dbg("cq_handle=%d\n", cmd->cq_handle);
363 
364     cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle);
365     if (!cq) {
366         pr_dbg("Invalid CQ handle\n");
367         return -EINVAL;
368     }
369 
370     ring = (PvrdmaRing *)cq->opaque;
371     destroy_cq_ring(ring);
372 
373     rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle);
374 
375     return 0;
376 }
377 
378 static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
379                            PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge,
380                            uint32_t spages, uint32_t rcqe, uint32_t rmax_sge,
381                            uint32_t rpages)
382 {
383     uint64_t *dir = NULL, *tbl = NULL;
384     PvrdmaRing *sr, *rr;
385     int rc = -EINVAL;
386     char ring_name[MAX_RING_NAME_SZ];
387     uint32_t wqe_sz;
388 
389     if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES
390         || !rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES) {
391         pr_dbg("invalid pages: %d, %d\n", spages, rpages);
392         return rc;
393     }
394 
395     pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma);
396     dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
397     if (!dir) {
398         pr_dbg("Failed to map to CQ page directory\n");
399         goto out;
400     }
401 
402     tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
403     if (!tbl) {
404         pr_dbg("Failed to map to CQ page table\n");
405         goto out;
406     }
407 
408     sr = g_malloc(2 * sizeof(*rr));
409     rr = &sr[1];
410     pr_dbg("sring=%p\n", sr);
411     pr_dbg("rring=%p\n", rr);
412 
413     *rings = sr;
414 
415     pr_dbg("scqe=%d\n", scqe);
416     pr_dbg("smax_sge=%d\n", smax_sge);
417     pr_dbg("spages=%d\n", spages);
418     pr_dbg("rcqe=%d\n", rcqe);
419     pr_dbg("rmax_sge=%d\n", rmax_sge);
420     pr_dbg("rpages=%d\n", rpages);
421 
422     /* Create send ring */
423     sr->ring_state = (struct pvrdma_ring *)
424         rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
425     if (!sr->ring_state) {
426         pr_dbg("Failed to map to CQ ring state\n");
427         goto out_free_sr_mem;
428     }
429 
430     wqe_sz = pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr) +
431                       sizeof(struct pvrdma_sge) * smax_sge - 1);
432 
433     sprintf(ring_name, "qp_sring_%" PRIx64, pdir_dma);
434     rc = pvrdma_ring_init(sr, ring_name, pci_dev, sr->ring_state,
435                           scqe, wqe_sz, (dma_addr_t *)&tbl[1], spages);
436     if (rc) {
437         goto out_unmap_ring_state;
438     }
439 
440     /* Create recv ring */
441     rr->ring_state = &sr->ring_state[1];
442     wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
443                       sizeof(struct pvrdma_sge) * rmax_sge - 1);
444     sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
445     rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
446                           rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages], rpages);
447     if (rc) {
448         goto out_free_sr;
449     }
450 
451     goto out;
452 
453 out_free_sr:
454     pvrdma_ring_free(sr);
455 
456 out_unmap_ring_state:
457     rdma_pci_dma_unmap(pci_dev, sr->ring_state, TARGET_PAGE_SIZE);
458 
459 out_free_sr_mem:
460     g_free(sr);
461 
462 out:
463     rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
464     rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
465 
466     return rc;
467 }
468 
469 static void destroy_qp_rings(PvrdmaRing *ring)
470 {
471     pr_dbg("sring=%p\n", &ring[0]);
472     pvrdma_ring_free(&ring[0]);
473     pr_dbg("rring=%p\n", &ring[1]);
474     pvrdma_ring_free(&ring[1]);
475 
476     rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
477     g_free(ring);
478 }
479 
480 static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
481                      union pvrdma_cmd_resp *rsp)
482 {
483     struct pvrdma_cmd_create_qp *cmd = &req->create_qp;
484     struct pvrdma_cmd_create_qp_resp *resp = &rsp->create_qp_resp;
485     PvrdmaRing *rings = NULL;
486     int rc;
487 
488     memset(resp, 0, sizeof(*resp));
489 
490     pr_dbg("total_chunks=%d\n", cmd->total_chunks);
491     pr_dbg("send_chunks=%d\n", cmd->send_chunks);
492 
493     rc = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings,
494                          cmd->max_send_wr, cmd->max_send_sge, cmd->send_chunks,
495                          cmd->max_recv_wr, cmd->max_recv_sge,
496                          cmd->total_chunks - cmd->send_chunks - 1);
497     if (rc) {
498         return rc;
499     }
500 
501     pr_dbg("rings=%p\n", rings);
502 
503     rc = rdma_rm_alloc_qp(&dev->rdma_dev_res, cmd->pd_handle, cmd->qp_type,
504                           cmd->max_send_wr, cmd->max_send_sge,
505                           cmd->send_cq_handle, cmd->max_recv_wr,
506                           cmd->max_recv_sge, cmd->recv_cq_handle, rings,
507                           &resp->qpn);
508     if (rc) {
509         destroy_qp_rings(rings);
510         return rc;
511     }
512 
513     resp->max_send_wr = cmd->max_send_wr;
514     resp->max_recv_wr = cmd->max_recv_wr;
515     resp->max_send_sge = cmd->max_send_sge;
516     resp->max_recv_sge = cmd->max_recv_sge;
517     resp->max_inline_data = cmd->max_inline_data;
518 
519     return 0;
520 }
521 
522 static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
523                      union pvrdma_cmd_resp *rsp)
524 {
525     struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp;
526     int rc;
527 
528     pr_dbg("qp_handle=%d\n", cmd->qp_handle);
529 
530     memset(rsp, 0, sizeof(*rsp));
531 
532     /* No need to verify sgid_index since it is u8 */
533 
534     rc = rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev,
535                            cmd->qp_handle, cmd->attr_mask,
536                            cmd->attrs.ah_attr.grh.sgid_index,
537                            (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid,
538                            cmd->attrs.dest_qp_num,
539                            (enum ibv_qp_state)cmd->attrs.qp_state,
540                            cmd->attrs.qkey, cmd->attrs.rq_psn,
541                            cmd->attrs.sq_psn);
542 
543     return rc;
544 }
545 
546 static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
547                      union pvrdma_cmd_resp *rsp)
548 {
549     struct pvrdma_cmd_query_qp *cmd = &req->query_qp;
550     struct pvrdma_cmd_query_qp_resp *resp = &rsp->query_qp_resp;
551     struct ibv_qp_init_attr init_attr;
552     int rc;
553 
554     pr_dbg("qp_handle=%d\n", cmd->qp_handle);
555     pr_dbg("attr_mask=0x%x\n", cmd->attr_mask);
556 
557     memset(rsp, 0, sizeof(*rsp));
558 
559     rc = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev, cmd->qp_handle,
560                           (struct ibv_qp_attr *)&resp->attrs, cmd->attr_mask,
561                           &init_attr);
562 
563     return rc;
564 }
565 
566 static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
567                       union pvrdma_cmd_resp *rsp)
568 {
569     struct pvrdma_cmd_destroy_qp *cmd = &req->destroy_qp;
570     RdmaRmQP *qp;
571     PvrdmaRing *ring;
572 
573     qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle);
574     if (!qp) {
575         pr_dbg("Invalid QP handle\n");
576         return -EINVAL;
577     }
578 
579     rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
580 
581     ring = (PvrdmaRing *)qp->opaque;
582     destroy_qp_rings(ring);
583 
584     return 0;
585 }
586 
587 static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
588                        union pvrdma_cmd_resp *rsp)
589 {
590     struct pvrdma_cmd_create_bind *cmd = &req->create_bind;
591     int rc;
592     union ibv_gid *gid = (union ibv_gid *)&cmd->new_gid;
593 
594     pr_dbg("index=%d\n", cmd->index);
595 
596     if (cmd->index >= MAX_PORT_GIDS) {
597         return -EINVAL;
598     }
599 
600     pr_dbg("gid[%d]=0x%llx,0x%llx\n", cmd->index,
601            (long long unsigned int)be64_to_cpu(gid->global.subnet_prefix),
602            (long long unsigned int)be64_to_cpu(gid->global.interface_id));
603 
604     rc = rdma_rm_add_gid(&dev->rdma_dev_res, &dev->backend_dev,
605                          dev->backend_eth_device_name, gid, cmd->index);
606 
607     return rc;
608 }
609 
610 static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
611                         union pvrdma_cmd_resp *rsp)
612 {
613     int rc;
614 
615     struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind;
616 
617     pr_dbg("index=%d\n", cmd->index);
618 
619     if (cmd->index >= MAX_PORT_GIDS) {
620         return -EINVAL;
621     }
622 
623     rc = rdma_rm_del_gid(&dev->rdma_dev_res, &dev->backend_dev,
624                         dev->backend_eth_device_name, cmd->index);
625 
626     return rc;
627 }
628 
629 static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
630                      union pvrdma_cmd_resp *rsp)
631 {
632     struct pvrdma_cmd_create_uc *cmd = &req->create_uc;
633     struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp;
634     int rc;
635 
636     pr_dbg("pfn=%d\n", cmd->pfn);
637 
638     memset(resp, 0, sizeof(*resp));
639     rc = rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn, &resp->ctx_handle);
640 
641     return rc;
642 }
643 
644 static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
645                       union pvrdma_cmd_resp *rsp)
646 {
647     struct pvrdma_cmd_destroy_uc *cmd = &req->destroy_uc;
648 
649     pr_dbg("ctx_handle=%d\n", cmd->ctx_handle);
650 
651     rdma_rm_dealloc_uc(&dev->rdma_dev_res, cmd->ctx_handle);
652 
653     return 0;
654 }
655 
656 struct cmd_handler {
657     uint32_t cmd;
658     uint32_t ack;
659     int (*exec)(PVRDMADev *dev, union pvrdma_cmd_req *req,
660             union pvrdma_cmd_resp *rsp);
661 };
662 
663 static struct cmd_handler cmd_handlers[] = {
664     {PVRDMA_CMD_QUERY_PORT,   PVRDMA_CMD_QUERY_PORT_RESP,        query_port},
665     {PVRDMA_CMD_QUERY_PKEY,   PVRDMA_CMD_QUERY_PKEY_RESP,        query_pkey},
666     {PVRDMA_CMD_CREATE_PD,    PVRDMA_CMD_CREATE_PD_RESP,         create_pd},
667     {PVRDMA_CMD_DESTROY_PD,   PVRDMA_CMD_DESTROY_PD_RESP_NOOP,   destroy_pd},
668     {PVRDMA_CMD_CREATE_MR,    PVRDMA_CMD_CREATE_MR_RESP,         create_mr},
669     {PVRDMA_CMD_DESTROY_MR,   PVRDMA_CMD_DESTROY_MR_RESP_NOOP,   destroy_mr},
670     {PVRDMA_CMD_CREATE_CQ,    PVRDMA_CMD_CREATE_CQ_RESP,         create_cq},
671     {PVRDMA_CMD_RESIZE_CQ,    PVRDMA_CMD_RESIZE_CQ_RESP,         NULL},
672     {PVRDMA_CMD_DESTROY_CQ,   PVRDMA_CMD_DESTROY_CQ_RESP_NOOP,   destroy_cq},
673     {PVRDMA_CMD_CREATE_QP,    PVRDMA_CMD_CREATE_QP_RESP,         create_qp},
674     {PVRDMA_CMD_MODIFY_QP,    PVRDMA_CMD_MODIFY_QP_RESP,         modify_qp},
675     {PVRDMA_CMD_QUERY_QP,     PVRDMA_CMD_QUERY_QP_RESP,          query_qp},
676     {PVRDMA_CMD_DESTROY_QP,   PVRDMA_CMD_DESTROY_QP_RESP,        destroy_qp},
677     {PVRDMA_CMD_CREATE_UC,    PVRDMA_CMD_CREATE_UC_RESP,         create_uc},
678     {PVRDMA_CMD_DESTROY_UC,   PVRDMA_CMD_DESTROY_UC_RESP_NOOP,   destroy_uc},
679     {PVRDMA_CMD_CREATE_BIND,  PVRDMA_CMD_CREATE_BIND_RESP_NOOP,  create_bind},
680     {PVRDMA_CMD_DESTROY_BIND, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, destroy_bind},
681 };
682 
683 int execute_command(PVRDMADev *dev)
684 {
685     int err = 0xFFFF;
686     DSRInfo *dsr_info;
687 
688     dsr_info = &dev->dsr_info;
689 
690     pr_dbg("cmd=%d\n", dsr_info->req->hdr.cmd);
691     if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) /
692                       sizeof(struct cmd_handler)) {
693         pr_dbg("Unsupported command\n");
694         goto out;
695     }
696 
697     if (!cmd_handlers[dsr_info->req->hdr.cmd].exec) {
698         pr_dbg("Unsupported command (not implemented yet)\n");
699         goto out;
700     }
701 
702     err = cmd_handlers[dsr_info->req->hdr.cmd].exec(dev, dsr_info->req,
703                                                     dsr_info->rsp);
704     dsr_info->rsp->hdr.response = dsr_info->req->hdr.response;
705     dsr_info->rsp->hdr.ack = cmd_handlers[dsr_info->req->hdr.cmd].ack;
706     dsr_info->rsp->hdr.err = err < 0 ? -err : 0;
707     pr_dbg("rsp->hdr.err=%d\n", dsr_info->rsp->hdr.err);
708 
709 out:
710     set_reg_val(dev, PVRDMA_REG_ERR, err);
711     post_interrupt(dev, INTR_VEC_CMD_RING);
712 
713     return (err == 0) ? 0 : -EINVAL;
714 }
715