1 /*
2 * QEMU paravirtual RDMA - Device rings
3 *
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
6 *
7 * Authors:
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include "hw/pci/pci.h"
18 #include "cpu.h"
19 #include "qemu/cutils.h"
20
21 #include "trace.h"
22
23 #include "../rdma_utils.h"
24 #include "pvrdma_dev_ring.h"
25
pvrdma_ring_init(PvrdmaRing * ring,const char * name,PCIDevice * dev,PvrdmaRingState * ring_state,uint32_t max_elems,size_t elem_sz,dma_addr_t * tbl,uint32_t npages)26 int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev,
27 PvrdmaRingState *ring_state, uint32_t max_elems,
28 size_t elem_sz, dma_addr_t *tbl, uint32_t npages)
29 {
30 int i;
31 int rc = 0;
32
33 pstrcpy(ring->name, MAX_RING_NAME_SZ, name);
34 ring->dev = dev;
35 ring->ring_state = ring_state;
36 ring->max_elems = max_elems;
37 ring->elem_sz = elem_sz;
38 /* TODO: Give a moment to think if we want to redo driver settings
39 qatomic_set(&ring->ring_state->prod_tail, 0);
40 qatomic_set(&ring->ring_state->cons_head, 0);
41 */
42 ring->npages = npages;
43 ring->pages = g_new0(void *, npages);
44
45 for (i = 0; i < npages; i++) {
46 if (!tbl[i]) {
47 rdma_error_report("npages=%d but tbl[%d] is NULL", npages, i);
48 continue;
49 }
50
51 ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE);
52 if (!ring->pages[i]) {
53 rc = -ENOMEM;
54 rdma_error_report("Failed to map to page %d in ring %s", i, name);
55 goto out_free;
56 }
57 memset(ring->pages[i], 0, TARGET_PAGE_SIZE);
58 }
59
60 goto out;
61
62 out_free:
63 while (i--) {
64 rdma_pci_dma_unmap(dev, ring->pages[i], TARGET_PAGE_SIZE);
65 }
66 g_free(ring->pages);
67
68 out:
69 return rc;
70 }
71
pvrdma_ring_next_elem_read(PvrdmaRing * ring)72 void *pvrdma_ring_next_elem_read(PvrdmaRing *ring)
73 {
74 unsigned int idx, offset;
75 const uint32_t tail = qatomic_read(&ring->ring_state->prod_tail);
76 const uint32_t head = qatomic_read(&ring->ring_state->cons_head);
77
78 if (tail & ~((ring->max_elems << 1) - 1) ||
79 head & ~((ring->max_elems << 1) - 1) ||
80 tail == head) {
81 trace_pvrdma_ring_next_elem_read_no_data(ring->name);
82 return NULL;
83 }
84
85 idx = head & (ring->max_elems - 1);
86 offset = idx * ring->elem_sz;
87 return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
88 }
89
pvrdma_ring_read_inc(PvrdmaRing * ring)90 void pvrdma_ring_read_inc(PvrdmaRing *ring)
91 {
92 uint32_t idx = qatomic_read(&ring->ring_state->cons_head);
93
94 idx = (idx + 1) & ((ring->max_elems << 1) - 1);
95 qatomic_set(&ring->ring_state->cons_head, idx);
96 }
97
pvrdma_ring_next_elem_write(PvrdmaRing * ring)98 void *pvrdma_ring_next_elem_write(PvrdmaRing *ring)
99 {
100 unsigned int idx, offset;
101 const uint32_t tail = qatomic_read(&ring->ring_state->prod_tail);
102 const uint32_t head = qatomic_read(&ring->ring_state->cons_head);
103
104 if (tail & ~((ring->max_elems << 1) - 1) ||
105 head & ~((ring->max_elems << 1) - 1) ||
106 tail == (head ^ ring->max_elems)) {
107 rdma_error_report("CQ is full");
108 return NULL;
109 }
110
111 idx = tail & (ring->max_elems - 1);
112 offset = idx * ring->elem_sz;
113 return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
114 }
115
pvrdma_ring_write_inc(PvrdmaRing * ring)116 void pvrdma_ring_write_inc(PvrdmaRing *ring)
117 {
118 uint32_t idx = qatomic_read(&ring->ring_state->prod_tail);
119
120 idx = (idx + 1) & ((ring->max_elems << 1) - 1);
121 qatomic_set(&ring->ring_state->prod_tail, idx);
122 }
123
pvrdma_ring_free(PvrdmaRing * ring)124 void pvrdma_ring_free(PvrdmaRing *ring)
125 {
126 if (!ring) {
127 return;
128 }
129
130 if (!ring->pages) {
131 return;
132 }
133
134 while (ring->npages--) {
135 rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages],
136 TARGET_PAGE_SIZE);
137 }
138
139 g_free(ring->pages);
140 ring->pages = NULL;
141 }
142