virtio_ring.c (fb3fba6b162aaa42aeba6e9034f3e92716c2a749) virtio_ring.c (1ce9e6055fa0a9043405c5604cf19169ec5379ff)
1/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.

--- 69 unchanged lines hidden (view full) ---

78#define LAST_ADD_TIME_INVALID(vq)
79#endif
80
81struct vring_desc_state_split {
82 void *data; /* Data for callback. */
83 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
84};
85
1/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.

--- 69 unchanged lines hidden (view full) ---

78#define LAST_ADD_TIME_INVALID(vq)
79#endif
80
81struct vring_desc_state_split {
82 void *data; /* Data for callback. */
83 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
84};
85
86struct vring_desc_state_packed {
87 void *data; /* Data for callback. */
88 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
89 u16 num; /* Descriptor list length. */
90 u16 next; /* The next desc state in a list. */
91 u16 last; /* The last desc state in a list. */
92};
93
94struct vring_desc_extra_packed {
95 dma_addr_t addr; /* Buffer DMA addr. */
96 u32 len; /* Buffer length. */
97 u16 flags; /* Descriptor flags. */
98};
99
86struct vring_virtqueue {
87 struct virtqueue vq;
88
100struct vring_virtqueue {
101 struct virtqueue vq;
102
103 /* Is this a packed ring? */
104 bool packed_ring;
105
89 /* Is DMA API used? */
90 bool use_dma_api;
91
92 /* Can we use weak barriers? */
93 bool weak_barriers;
94
95 /* Other side has made a mess, don't try any more. */
96 bool broken;

--- 7 unchanged lines hidden (view full) ---

104 /* Head of free buffer list. */
105 unsigned int free_head;
106 /* Number we've added since last sync. */
107 unsigned int num_added;
108
109 /* Last used index we've seen. */
110 u16 last_used_idx;
111
106 /* Is DMA API used? */
107 bool use_dma_api;
108
109 /* Can we use weak barriers? */
110 bool weak_barriers;
111
112 /* Other side has made a mess, don't try any more. */
113 bool broken;

--- 7 unchanged lines hidden (view full) ---

121 /* Head of free buffer list. */
122 unsigned int free_head;
123 /* Number we've added since last sync. */
124 unsigned int num_added;
125
126 /* Last used index we've seen. */
127 u16 last_used_idx;
128
112 struct {
113 /* Actual memory layout for this queue */
114 struct vring vring;
129 union {
130 /* Available for split ring */
131 struct {
132 /* Actual memory layout for this queue. */
133 struct vring vring;
115
134
116 /* Last written value to avail->flags */
117 u16 avail_flags_shadow;
135 /* Last written value to avail->flags */
136 u16 avail_flags_shadow;
118
137
119 /* Last written value to avail->idx in guest byte order */
120 u16 avail_idx_shadow;
138 /*
139 * Last written value to avail->idx in
140 * guest byte order.
141 */
142 u16 avail_idx_shadow;
121
143
122 /* Per-descriptor state. */
123 struct vring_desc_state_split *desc_state;
144 /* Per-descriptor state. */
145 struct vring_desc_state_split *desc_state;
124
146
125 /* DMA, allocation, and size information */
126 size_t queue_size_in_bytes;
127 dma_addr_t queue_dma_addr;
128 } split;
147 /* DMA address and size information */
148 dma_addr_t queue_dma_addr;
149 size_t queue_size_in_bytes;
150 } split;
129
151
152 /* Available for packed ring */
153 struct {
154 /* Actual memory layout for this queue. */
155 struct vring_packed vring;
156
157 /* Driver ring wrap counter. */
158 bool avail_wrap_counter;
159
160 /* Device ring wrap counter. */
161 bool used_wrap_counter;
162
163 /* Avail used flags. */
164 u16 avail_used_flags;
165
166 /* Index of the next avail descriptor. */
167 u16 next_avail_idx;
168
169 /*
170 * Last written value to driver->flags in
171 * guest byte order.
172 */
173 u16 event_flags_shadow;
174
175 /* Per-descriptor state. */
176 struct vring_desc_state_packed *desc_state;
177 struct vring_desc_extra_packed *desc_extra;
178
179 /* DMA address and size information */
180 dma_addr_t ring_dma_addr;
181 dma_addr_t driver_event_dma_addr;
182 dma_addr_t device_event_dma_addr;
183 size_t ring_size_in_bytes;
184 size_t event_size_in_bytes;
185 } packed;
186 };
187
130 /* How to notify other side. FIXME: commonalize hcalls! */
131 bool (*notify)(struct virtqueue *vq);
132
133 /* DMA, allocation, and size information */
134 bool we_own_ring;
135
136#ifdef DEBUG
137 /* They're supposed to lock for us. */

--- 698 unchanged lines hidden (view full) ---

836 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
837 to_vvq(vq)->we_own_ring = true;
838
839 return vq;
840}
841
842
843/*
188 /* How to notify other side. FIXME: commonalize hcalls! */
189 bool (*notify)(struct virtqueue *vq);
190
191 /* DMA, allocation, and size information */
192 bool we_own_ring;
193
194#ifdef DEBUG
195 /* They're supposed to lock for us. */

--- 698 unchanged lines hidden (view full) ---

894 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
895 to_vvq(vq)->we_own_ring = true;
896
897 return vq;
898}
899
900
901/*
902 * Packed ring specific functions - *_packed().
903 */
904
905static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
906 struct vring_desc_extra_packed *state)
907{
908 u16 flags;
909
910 if (!vq->use_dma_api)
911 return;
912
913 flags = state->flags;
914
915 if (flags & VRING_DESC_F_INDIRECT) {
916 dma_unmap_single(vring_dma_dev(vq),
917 state->addr, state->len,
918 (flags & VRING_DESC_F_WRITE) ?
919 DMA_FROM_DEVICE : DMA_TO_DEVICE);
920 } else {
921 dma_unmap_page(vring_dma_dev(vq),
922 state->addr, state->len,
923 (flags & VRING_DESC_F_WRITE) ?
924 DMA_FROM_DEVICE : DMA_TO_DEVICE);
925 }
926}
927
928static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
929 struct vring_packed_desc *desc)
930{
931 u16 flags;
932
933 if (!vq->use_dma_api)
934 return;
935
936 flags = le16_to_cpu(desc->flags);
937
938 if (flags & VRING_DESC_F_INDIRECT) {
939 dma_unmap_single(vring_dma_dev(vq),
940 le64_to_cpu(desc->addr),
941 le32_to_cpu(desc->len),
942 (flags & VRING_DESC_F_WRITE) ?
943 DMA_FROM_DEVICE : DMA_TO_DEVICE);
944 } else {
945 dma_unmap_page(vring_dma_dev(vq),
946 le64_to_cpu(desc->addr),
947 le32_to_cpu(desc->len),
948 (flags & VRING_DESC_F_WRITE) ?
949 DMA_FROM_DEVICE : DMA_TO_DEVICE);
950 }
951}
952
953static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
954 gfp_t gfp)
955{
956 struct vring_packed_desc *desc;
957
958 /*
959 * We require lowmem mappings for the descriptors because
960 * otherwise virt_to_phys will give us bogus addresses in the
961 * virtqueue.
962 */
963 gfp &= ~__GFP_HIGHMEM;
964
965 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
966
967 return desc;
968}
969
970static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
971 struct scatterlist *sgs[],
972 unsigned int total_sg,
973 unsigned int out_sgs,
974 unsigned int in_sgs,
975 void *data,
976 gfp_t gfp)
977{
978 struct vring_packed_desc *desc;
979 struct scatterlist *sg;
980 unsigned int i, n, err_idx;
981 u16 head, id;
982 dma_addr_t addr;
983
984 head = vq->packed.next_avail_idx;
985 desc = alloc_indirect_packed(total_sg, gfp);
986
987 if (unlikely(vq->vq.num_free < 1)) {
988 pr_debug("Can't add buf len 1 - avail = 0\n");
989 END_USE(vq);
990 return -ENOSPC;
991 }
992
993 i = 0;
994 id = vq->free_head;
995 BUG_ON(id == vq->packed.vring.num);
996
997 for (n = 0; n < out_sgs + in_sgs; n++) {
998 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
999 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1000 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1001 if (vring_mapping_error(vq, addr))
1002 goto unmap_release;
1003
1004 desc[i].flags = cpu_to_le16(n < out_sgs ?
1005 0 : VRING_DESC_F_WRITE);
1006 desc[i].addr = cpu_to_le64(addr);
1007 desc[i].len = cpu_to_le32(sg->length);
1008 i++;
1009 }
1010 }
1011
1012 /* Now that the indirect table is filled in, map it. */
1013 addr = vring_map_single(vq, desc,
1014 total_sg * sizeof(struct vring_packed_desc),
1015 DMA_TO_DEVICE);
1016 if (vring_mapping_error(vq, addr))
1017 goto unmap_release;
1018
1019 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1020 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1021 sizeof(struct vring_packed_desc));
1022 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1023
1024 if (vq->use_dma_api) {
1025 vq->packed.desc_extra[id].addr = addr;
1026 vq->packed.desc_extra[id].len = total_sg *
1027 sizeof(struct vring_packed_desc);
1028 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1029 vq->packed.avail_used_flags;
1030 }
1031
1032 /*
1033 * A driver MUST NOT make the first descriptor in the list
1034 * available before all subsequent descriptors comprising
1035 * the list are made available.
1036 */
1037 virtio_wmb(vq->weak_barriers);
1038 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1039 vq->packed.avail_used_flags);
1040
1041 /* We're using some buffers from the free list. */
1042 vq->vq.num_free -= 1;
1043
1044 /* Update free pointer */
1045 n = head + 1;
1046 if (n >= vq->packed.vring.num) {
1047 n = 0;
1048 vq->packed.avail_wrap_counter ^= 1;
1049 vq->packed.avail_used_flags ^=
1050 1 << VRING_PACKED_DESC_F_AVAIL |
1051 1 << VRING_PACKED_DESC_F_USED;
1052 }
1053 vq->packed.next_avail_idx = n;
1054 vq->free_head = vq->packed.desc_state[id].next;
1055
1056 /* Store token and indirect buffer state. */
1057 vq->packed.desc_state[id].num = 1;
1058 vq->packed.desc_state[id].data = data;
1059 vq->packed.desc_state[id].indir_desc = desc;
1060 vq->packed.desc_state[id].last = id;
1061
1062 vq->num_added += 1;
1063
1064 pr_debug("Added buffer head %i to %p\n", head, vq);
1065 END_USE(vq);
1066
1067 return 0;
1068
1069unmap_release:
1070 err_idx = i;
1071
1072 for (i = 0; i < err_idx; i++)
1073 vring_unmap_desc_packed(vq, &desc[i]);
1074
1075 kfree(desc);
1076
1077 END_USE(vq);
1078 return -EIO;
1079}
1080
1081static inline int virtqueue_add_packed(struct virtqueue *_vq,
1082 struct scatterlist *sgs[],
1083 unsigned int total_sg,
1084 unsigned int out_sgs,
1085 unsigned int in_sgs,
1086 void *data,
1087 void *ctx,
1088 gfp_t gfp)
1089{
1090 struct vring_virtqueue *vq = to_vvq(_vq);
1091 struct vring_packed_desc *desc;
1092 struct scatterlist *sg;
1093 unsigned int i, n, c, descs_used, err_idx;
1094 __le16 uninitialized_var(head_flags), flags;
1095 u16 head, id, uninitialized_var(prev), curr, avail_used_flags;
1096
1097 START_USE(vq);
1098
1099 BUG_ON(data == NULL);
1100 BUG_ON(ctx && vq->indirect);
1101
1102 if (unlikely(vq->broken)) {
1103 END_USE(vq);
1104 return -EIO;
1105 }
1106
1107 LAST_ADD_TIME_UPDATE(vq);
1108
1109 BUG_ON(total_sg == 0);
1110
1111 if (virtqueue_use_indirect(_vq, total_sg))
1112 return virtqueue_add_indirect_packed(vq, sgs, total_sg,
1113 out_sgs, in_sgs, data, gfp);
1114
1115 head = vq->packed.next_avail_idx;
1116 avail_used_flags = vq->packed.avail_used_flags;
1117
1118 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1119
1120 desc = vq->packed.vring.desc;
1121 i = head;
1122 descs_used = total_sg;
1123
1124 if (unlikely(vq->vq.num_free < descs_used)) {
1125 pr_debug("Can't add buf len %i - avail = %i\n",
1126 descs_used, vq->vq.num_free);
1127 END_USE(vq);
1128 return -ENOSPC;
1129 }
1130
1131 id = vq->free_head;
1132 BUG_ON(id == vq->packed.vring.num);
1133
1134 curr = id;
1135 c = 0;
1136 for (n = 0; n < out_sgs + in_sgs; n++) {
1137 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1138 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1139 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1140 if (vring_mapping_error(vq, addr))
1141 goto unmap_release;
1142
1143 flags = cpu_to_le16(vq->packed.avail_used_flags |
1144 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1145 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1146 if (i == head)
1147 head_flags = flags;
1148 else
1149 desc[i].flags = flags;
1150
1151 desc[i].addr = cpu_to_le64(addr);
1152 desc[i].len = cpu_to_le32(sg->length);
1153 desc[i].id = cpu_to_le16(id);
1154
1155 if (unlikely(vq->use_dma_api)) {
1156 vq->packed.desc_extra[curr].addr = addr;
1157 vq->packed.desc_extra[curr].len = sg->length;
1158 vq->packed.desc_extra[curr].flags =
1159 le16_to_cpu(flags);
1160 }
1161 prev = curr;
1162 curr = vq->packed.desc_state[curr].next;
1163
1164 if ((unlikely(++i >= vq->packed.vring.num))) {
1165 i = 0;
1166 vq->packed.avail_used_flags ^=
1167 1 << VRING_PACKED_DESC_F_AVAIL |
1168 1 << VRING_PACKED_DESC_F_USED;
1169 }
1170 }
1171 }
1172
1173 if (i < head)
1174 vq->packed.avail_wrap_counter ^= 1;
1175
1176 /* We're using some buffers from the free list. */
1177 vq->vq.num_free -= descs_used;
1178
1179 /* Update free pointer */
1180 vq->packed.next_avail_idx = i;
1181 vq->free_head = curr;
1182
1183 /* Store token. */
1184 vq->packed.desc_state[id].num = descs_used;
1185 vq->packed.desc_state[id].data = data;
1186 vq->packed.desc_state[id].indir_desc = ctx;
1187 vq->packed.desc_state[id].last = prev;
1188
1189 /*
1190 * A driver MUST NOT make the first descriptor in the list
1191 * available before all subsequent descriptors comprising
1192 * the list are made available.
1193 */
1194 virtio_wmb(vq->weak_barriers);
1195 vq->packed.vring.desc[head].flags = head_flags;
1196 vq->num_added += descs_used;
1197
1198 pr_debug("Added buffer head %i to %p\n", head, vq);
1199 END_USE(vq);
1200
1201 return 0;
1202
1203unmap_release:
1204 err_idx = i;
1205 i = head;
1206
1207 vq->packed.avail_used_flags = avail_used_flags;
1208
1209 for (n = 0; n < total_sg; n++) {
1210 if (i == err_idx)
1211 break;
1212 vring_unmap_desc_packed(vq, &desc[i]);
1213 i++;
1214 if (i >= vq->packed.vring.num)
1215 i = 0;
1216 }
1217
1218 END_USE(vq);
1219 return -EIO;
1220}
1221
1222static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1223{
1224 struct vring_virtqueue *vq = to_vvq(_vq);
1225 u16 flags;
1226 bool needs_kick;
1227 union {
1228 struct {
1229 __le16 off_wrap;
1230 __le16 flags;
1231 };
1232 u32 u32;
1233 } snapshot;
1234
1235 START_USE(vq);
1236
1237 /*
1238 * We need to expose the new flags value before checking notification
1239 * suppressions.
1240 */
1241 virtio_mb(vq->weak_barriers);
1242
1243 vq->num_added = 0;
1244
1245 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1246 flags = le16_to_cpu(snapshot.flags);
1247
1248 LAST_ADD_TIME_CHECK(vq);
1249 LAST_ADD_TIME_INVALID(vq);
1250
1251 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1252 END_USE(vq);
1253 return needs_kick;
1254}
1255
1256static void detach_buf_packed(struct vring_virtqueue *vq,
1257 unsigned int id, void **ctx)
1258{
1259 struct vring_desc_state_packed *state = NULL;
1260 struct vring_packed_desc *desc;
1261 unsigned int i, curr;
1262
1263 state = &vq->packed.desc_state[id];
1264
1265 /* Clear data ptr. */
1266 state->data = NULL;
1267
1268 vq->packed.desc_state[state->last].next = vq->free_head;
1269 vq->free_head = id;
1270 vq->vq.num_free += state->num;
1271
1272 if (unlikely(vq->use_dma_api)) {
1273 curr = id;
1274 for (i = 0; i < state->num; i++) {
1275 vring_unmap_state_packed(vq,
1276 &vq->packed.desc_extra[curr]);
1277 curr = vq->packed.desc_state[curr].next;
1278 }
1279 }
1280
1281 if (vq->indirect) {
1282 u32 len;
1283
1284 /* Free the indirect table, if any, now that it's unmapped. */
1285 desc = state->indir_desc;
1286 if (!desc)
1287 return;
1288
1289 if (vq->use_dma_api) {
1290 len = vq->packed.desc_extra[id].len;
1291 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1292 i++)
1293 vring_unmap_desc_packed(vq, &desc[i]);
1294 }
1295 kfree(desc);
1296 state->indir_desc = NULL;
1297 } else if (ctx) {
1298 *ctx = state->indir_desc;
1299 }
1300}
1301
1302static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1303 u16 idx, bool used_wrap_counter)
1304{
1305 bool avail, used;
1306 u16 flags;
1307
1308 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1309 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1310 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1311
1312 return avail == used && used == used_wrap_counter;
1313}
1314
1315static inline bool more_used_packed(const struct vring_virtqueue *vq)
1316{
1317 return is_used_desc_packed(vq, vq->last_used_idx,
1318 vq->packed.used_wrap_counter);
1319}
1320
1321static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1322 unsigned int *len,
1323 void **ctx)
1324{
1325 struct vring_virtqueue *vq = to_vvq(_vq);
1326 u16 last_used, id;
1327 void *ret;
1328
1329 START_USE(vq);
1330
1331 if (unlikely(vq->broken)) {
1332 END_USE(vq);
1333 return NULL;
1334 }
1335
1336 if (!more_used_packed(vq)) {
1337 pr_debug("No more buffers in queue\n");
1338 END_USE(vq);
1339 return NULL;
1340 }
1341
1342 /* Only get used elements after they have been exposed by host. */
1343 virtio_rmb(vq->weak_barriers);
1344
1345 last_used = vq->last_used_idx;
1346 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1347 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1348
1349 if (unlikely(id >= vq->packed.vring.num)) {
1350 BAD_RING(vq, "id %u out of range\n", id);
1351 return NULL;
1352 }
1353 if (unlikely(!vq->packed.desc_state[id].data)) {
1354 BAD_RING(vq, "id %u is not a head!\n", id);
1355 return NULL;
1356 }
1357
1358 /* detach_buf_packed clears data, so grab it now. */
1359 ret = vq->packed.desc_state[id].data;
1360 detach_buf_packed(vq, id, ctx);
1361
1362 vq->last_used_idx += vq->packed.desc_state[id].num;
1363 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1364 vq->last_used_idx -= vq->packed.vring.num;
1365 vq->packed.used_wrap_counter ^= 1;
1366 }
1367
1368 LAST_ADD_TIME_INVALID(vq);
1369
1370 END_USE(vq);
1371 return ret;
1372}
1373
1374static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1375{
1376 struct vring_virtqueue *vq = to_vvq(_vq);
1377
1378 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1379 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1380 vq->packed.vring.driver->flags =
1381 cpu_to_le16(vq->packed.event_flags_shadow);
1382 }
1383}
1384
1385static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1386{
1387 struct vring_virtqueue *vq = to_vvq(_vq);
1388
1389 START_USE(vq);
1390
1391 /*
1392 * We optimistically turn back on interrupts, then check if there was
1393 * more to do.
1394 */
1395
1396 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1397 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_ENABLE;
1398 vq->packed.vring.driver->flags =
1399 cpu_to_le16(vq->packed.event_flags_shadow);
1400 }
1401
1402 END_USE(vq);
1403 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1404 VRING_PACKED_EVENT_F_WRAP_CTR);
1405}
1406
1407static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1408{
1409 struct vring_virtqueue *vq = to_vvq(_vq);
1410 bool wrap_counter;
1411 u16 used_idx;
1412
1413 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1414 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1415
1416 return is_used_desc_packed(vq, used_idx, wrap_counter);
1417}
1418
1419static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1420{
1421 struct vring_virtqueue *vq = to_vvq(_vq);
1422 u16 used_idx, wrap_counter;
1423
1424 START_USE(vq);
1425
1426 /*
1427 * We optimistically turn back on interrupts, then check if there was
1428 * more to do.
1429 */
1430
1431 used_idx = vq->last_used_idx;
1432 wrap_counter = vq->packed.used_wrap_counter;
1433
1434 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1435 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_ENABLE;
1436 vq->packed.vring.driver->flags =
1437 cpu_to_le16(vq->packed.event_flags_shadow);
1438 }
1439
1440 /*
1441 * We need to update event suppression structure first
1442 * before re-checking for more used buffers.
1443 */
1444 virtio_mb(vq->weak_barriers);
1445
1446 if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
1447 END_USE(vq);
1448 return false;
1449 }
1450
1451 END_USE(vq);
1452 return true;
1453}
1454
1455static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1456{
1457 struct vring_virtqueue *vq = to_vvq(_vq);
1458 unsigned int i;
1459 void *buf;
1460
1461 START_USE(vq);
1462
1463 for (i = 0; i < vq->packed.vring.num; i++) {
1464 if (!vq->packed.desc_state[i].data)
1465 continue;
1466 /* detach_buf clears data, so grab it now. */
1467 buf = vq->packed.desc_state[i].data;
1468 detach_buf_packed(vq, i, NULL);
1469 END_USE(vq);
1470 return buf;
1471 }
1472 /* That should have freed everything. */
1473 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1474
1475 END_USE(vq);
1476 return NULL;
1477}
1478
1479static struct virtqueue *vring_create_virtqueue_packed(
1480 unsigned int index,
1481 unsigned int num,
1482 unsigned int vring_align,
1483 struct virtio_device *vdev,
1484 bool weak_barriers,
1485 bool may_reduce_num,
1486 bool context,
1487 bool (*notify)(struct virtqueue *),
1488 void (*callback)(struct virtqueue *),
1489 const char *name)
1490{
1491 struct vring_virtqueue *vq;
1492 struct vring_packed_desc *ring;
1493 struct vring_packed_desc_event *driver, *device;
1494 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1495 size_t ring_size_in_bytes, event_size_in_bytes;
1496 unsigned int i;
1497
1498 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1499
1500 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1501 &ring_dma_addr,
1502 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1503 if (!ring)
1504 goto err_ring;
1505
1506 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1507
1508 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1509 &driver_event_dma_addr,
1510 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1511 if (!driver)
1512 goto err_driver;
1513
1514 device = vring_alloc_queue(vdev, event_size_in_bytes,
1515 &device_event_dma_addr,
1516 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1517 if (!device)
1518 goto err_device;
1519
1520 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1521 if (!vq)
1522 goto err_vq;
1523
1524 vq->vq.callback = callback;
1525 vq->vq.vdev = vdev;
1526 vq->vq.name = name;
1527 vq->vq.num_free = num;
1528 vq->vq.index = index;
1529 vq->we_own_ring = true;
1530 vq->notify = notify;
1531 vq->weak_barriers = weak_barriers;
1532 vq->broken = false;
1533 vq->last_used_idx = 0;
1534 vq->num_added = 0;
1535 vq->packed_ring = true;
1536 vq->use_dma_api = vring_use_dma_api(vdev);
1537 list_add_tail(&vq->vq.list, &vdev->vqs);
1538#ifdef DEBUG
1539 vq->in_use = false;
1540 vq->last_add_time_valid = false;
1541#endif
1542
1543 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1544 !context;
1545 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1546
1547 vq->packed.ring_dma_addr = ring_dma_addr;
1548 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1549 vq->packed.device_event_dma_addr = device_event_dma_addr;
1550
1551 vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1552 vq->packed.event_size_in_bytes = event_size_in_bytes;
1553
1554 vq->packed.vring.num = num;
1555 vq->packed.vring.desc = ring;
1556 vq->packed.vring.driver = driver;
1557 vq->packed.vring.device = device;
1558
1559 vq->packed.next_avail_idx = 0;
1560 vq->packed.avail_wrap_counter = 1;
1561 vq->packed.used_wrap_counter = 1;
1562 vq->packed.event_flags_shadow = 0;
1563 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1564
1565 vq->packed.desc_state = kmalloc_array(num,
1566 sizeof(struct vring_desc_state_packed),
1567 GFP_KERNEL);
1568 if (!vq->packed.desc_state)
1569 goto err_desc_state;
1570
1571 memset(vq->packed.desc_state, 0,
1572 num * sizeof(struct vring_desc_state_packed));
1573
1574 /* Put everything in free lists. */
1575 vq->free_head = 0;
1576 for (i = 0; i < num-1; i++)
1577 vq->packed.desc_state[i].next = i + 1;
1578
1579 vq->packed.desc_extra = kmalloc_array(num,
1580 sizeof(struct vring_desc_extra_packed),
1581 GFP_KERNEL);
1582 if (!vq->packed.desc_extra)
1583 goto err_desc_extra;
1584
1585 memset(vq->packed.desc_extra, 0,
1586 num * sizeof(struct vring_desc_extra_packed));
1587
1588 /* No callback? Tell other side not to bother us. */
1589 if (!callback) {
1590 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1591 vq->packed.vring.driver->flags =
1592 cpu_to_le16(vq->packed.event_flags_shadow);
1593 }
1594
1595 return &vq->vq;
1596
1597err_desc_extra:
1598 kfree(vq->packed.desc_state);
1599err_desc_state:
1600 kfree(vq);
1601err_vq:
1602 vring_free_queue(vdev, event_size_in_bytes, device, ring_dma_addr);
1603err_device:
1604 vring_free_queue(vdev, event_size_in_bytes, driver, ring_dma_addr);
1605err_driver:
1606 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1607err_ring:
1608 return NULL;
1609}
1610
1611
1612/*
844 * Generic functions and exported symbols.
845 */
846
847static inline int virtqueue_add(struct virtqueue *_vq,
848 struct scatterlist *sgs[],
849 unsigned int total_sg,
850 unsigned int out_sgs,
851 unsigned int in_sgs,
852 void *data,
853 void *ctx,
854 gfp_t gfp)
855{
1613 * Generic functions and exported symbols.
1614 */
1615
1616static inline int virtqueue_add(struct virtqueue *_vq,
1617 struct scatterlist *sgs[],
1618 unsigned int total_sg,
1619 unsigned int out_sgs,
1620 unsigned int in_sgs,
1621 void *data,
1622 void *ctx,
1623 gfp_t gfp)
1624{
856 return virtqueue_add_split(_vq, sgs, total_sg,
857 out_sgs, in_sgs, data, ctx, gfp);
1625 struct vring_virtqueue *vq = to_vvq(_vq);
1626
1627 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1628 out_sgs, in_sgs, data, ctx, gfp) :
1629 virtqueue_add_split(_vq, sgs, total_sg,
1630 out_sgs, in_sgs, data, ctx, gfp);
858}
859
860/**
861 * virtqueue_add_sgs - expose buffers to other end
862 * @vq: the struct virtqueue we're talking about.
863 * @sgs: array of terminated scatterlists.
864 * @out_num: the number of scatterlists readable by other side
865 * @in_num: the number of scatterlists which are writable (after readable ones)

--- 102 unchanged lines hidden (view full) ---

968 * if (virtqueue_kick_prepare(vq))
969 * virtqueue_notify(vq);
970 *
971 * This is sometimes useful because the virtqueue_kick_prepare() needs
972 * to be serialized, but the actual virtqueue_notify() call does not.
973 */
974bool virtqueue_kick_prepare(struct virtqueue *_vq)
975{
1631}
1632
1633/**
1634 * virtqueue_add_sgs - expose buffers to other end
1635 * @vq: the struct virtqueue we're talking about.
1636 * @sgs: array of terminated scatterlists.
1637 * @out_num: the number of scatterlists readable by other side
1638 * @in_num: the number of scatterlists which are writable (after readable ones)

--- 102 unchanged lines hidden (view full) ---

1741 * if (virtqueue_kick_prepare(vq))
1742 * virtqueue_notify(vq);
1743 *
1744 * This is sometimes useful because the virtqueue_kick_prepare() needs
1745 * to be serialized, but the actual virtqueue_notify() call does not.
1746 */
1747bool virtqueue_kick_prepare(struct virtqueue *_vq)
1748{
976 return virtqueue_kick_prepare_split(_vq);
1749 struct vring_virtqueue *vq = to_vvq(_vq);
1750
1751 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1752 virtqueue_kick_prepare_split(_vq);
977}
978EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
979
980/**
981 * virtqueue_notify - second half of split virtqueue_kick call.
982 * @vq: the struct virtqueue
983 *
984 * This does not need to be serialized.

--- 50 unchanged lines hidden (view full) ---

1035 * operations at the same time (except where noted).
1036 *
1037 * Returns NULL if there are no used buffers, or the "data" token
1038 * handed to virtqueue_add_*().
1039 */
1040void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1041 void **ctx)
1042{
1753}
1754EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1755
1756/**
1757 * virtqueue_notify - second half of split virtqueue_kick call.
1758 * @vq: the struct virtqueue
1759 *
1760 * This does not need to be serialized.

--- 50 unchanged lines hidden (view full) ---

1811 * operations at the same time (except where noted).
1812 *
1813 * Returns NULL if there are no used buffers, or the "data" token
1814 * handed to virtqueue_add_*().
1815 */
1816void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1817 void **ctx)
1818{
1043 return virtqueue_get_buf_ctx_split(_vq, len, ctx);
1819 struct vring_virtqueue *vq = to_vvq(_vq);
1820
1821 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
1822 virtqueue_get_buf_ctx_split(_vq, len, ctx);
1044}
1045EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1046
1047void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1048{
1049 return virtqueue_get_buf_ctx(_vq, len, NULL);
1050}
1051EXPORT_SYMBOL_GPL(virtqueue_get_buf);
1823}
1824EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1825
1826void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1827{
1828 return virtqueue_get_buf_ctx(_vq, len, NULL);
1829}
1830EXPORT_SYMBOL_GPL(virtqueue_get_buf);
1052
1053/**
1054 * virtqueue_disable_cb - disable callbacks
1055 * @vq: the struct virtqueue we're talking about.
1056 *
1057 * Note that this is not necessarily synchronous, hence unreliable and only
1058 * useful as an optimization.
1059 *
1060 * Unlike other operations, this need not be serialized.
1061 */
1062void virtqueue_disable_cb(struct virtqueue *_vq)
1063{
1831/**
1832 * virtqueue_disable_cb - disable callbacks
1833 * @vq: the struct virtqueue we're talking about.
1834 *
1835 * Note that this is not necessarily synchronous, hence unreliable and only
1836 * useful as an optimization.
1837 *
1838 * Unlike other operations, this need not be serialized.
1839 */
1840void virtqueue_disable_cb(struct virtqueue *_vq)
1841{
1064 virtqueue_disable_cb_split(_vq);
1842 struct vring_virtqueue *vq = to_vvq(_vq);
1843
1844 if (vq->packed_ring)
1845 virtqueue_disable_cb_packed(_vq);
1846 else
1847 virtqueue_disable_cb_split(_vq);
1065}
1066EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1067
1068/**
1069 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
1070 * @vq: the struct virtqueue we're talking about.
1071 *
1072 * This re-enables callbacks; it returns current queue state
1073 * in an opaque unsigned value. This value should be later tested by
1074 * virtqueue_poll, to detect a possible race between the driver checking for
1075 * more work, and enabling callbacks.
1076 *
1077 * Caller must ensure we don't call this with other virtqueue
1078 * operations at the same time (except where noted).
1079 */
1080unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1081{
1848}
1849EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1850
1851/**
1852 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
1853 * @vq: the struct virtqueue we're talking about.
1854 *
1855 * This re-enables callbacks; it returns current queue state
1856 * in an opaque unsigned value. This value should be later tested by
1857 * virtqueue_poll, to detect a possible race between the driver checking for
1858 * more work, and enabling callbacks.
1859 *
1860 * Caller must ensure we don't call this with other virtqueue
1861 * operations at the same time (except where noted).
1862 */
1863unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1864{
1082 return virtqueue_enable_cb_prepare_split(_vq);
1865 struct vring_virtqueue *vq = to_vvq(_vq);
1866
1867 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
1868 virtqueue_enable_cb_prepare_split(_vq);
1083}
1084EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1085
1086/**
1087 * virtqueue_poll - query pending used buffers
1088 * @vq: the struct virtqueue we're talking about.
1089 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1090 *
1091 * Returns "true" if there are pending used buffers in the queue.
1092 *
1093 * This does not need to be serialized.
1094 */
1095bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1096{
1097 struct vring_virtqueue *vq = to_vvq(_vq);
1098
1099 virtio_mb(vq->weak_barriers);
1869}
1870EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1871
1872/**
1873 * virtqueue_poll - query pending used buffers
1874 * @vq: the struct virtqueue we're talking about.
1875 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1876 *
1877 * Returns "true" if there are pending used buffers in the queue.
1878 *
1879 * This does not need to be serialized.
1880 */
1881bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1882{
1883 struct vring_virtqueue *vq = to_vvq(_vq);
1884
1885 virtio_mb(vq->weak_barriers);
1100 return virtqueue_poll_split(_vq, last_used_idx);
1886 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
1887 virtqueue_poll_split(_vq, last_used_idx);
1101}
1102EXPORT_SYMBOL_GPL(virtqueue_poll);
1103
1104/**
1105 * virtqueue_enable_cb - restart callbacks after disable_cb.
1106 * @vq: the struct virtqueue we're talking about.
1107 *
1108 * This re-enables callbacks; it returns "false" if there are pending

--- 21 unchanged lines hidden (view full) ---

1130 * to detect a possible race between the driver checking for more work,
1131 * and enabling callbacks.
1132 *
1133 * Caller must ensure we don't call this with other virtqueue
1134 * operations at the same time (except where noted).
1135 */
1136bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
1137{
1888}
1889EXPORT_SYMBOL_GPL(virtqueue_poll);
1890
1891/**
1892 * virtqueue_enable_cb - restart callbacks after disable_cb.
1893 * @vq: the struct virtqueue we're talking about.
1894 *
1895 * This re-enables callbacks; it returns "false" if there are pending

--- 21 unchanged lines hidden (view full) ---

1917 * to detect a possible race between the driver checking for more work,
1918 * and enabling callbacks.
1919 *
1920 * Caller must ensure we don't call this with other virtqueue
1921 * operations at the same time (except where noted).
1922 */
1923bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
1924{
1138 return virtqueue_enable_cb_delayed_split(_vq);
1925 struct vring_virtqueue *vq = to_vvq(_vq);
1926
1927 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
1928 virtqueue_enable_cb_delayed_split(_vq);
1139}
1140EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
1141
1142/**
1143 * virtqueue_detach_unused_buf - detach first unused buffer
1144 * @vq: the struct virtqueue we're talking about.
1145 *
1146 * Returns NULL or the "data" token handed to virtqueue_add_*().
1147 * This is not valid on an active queue; it is useful only for device
1148 * shutdown.
1149 */
1150void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
1151{
1929}
1930EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
1931
1932/**
1933 * virtqueue_detach_unused_buf - detach first unused buffer
1934 * @vq: the struct virtqueue we're talking about.
1935 *
1936 * Returns NULL or the "data" token handed to virtqueue_add_*().
1937 * This is not valid on an active queue; it is useful only for device
1938 * shutdown.
1939 */
1940void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
1941{
1152 return virtqueue_detach_unused_buf_split(_vq);
1942 struct vring_virtqueue *vq = to_vvq(_vq);
1943
1944 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
1945 virtqueue_detach_unused_buf_split(_vq);
1153}
1154EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
1155
1156static inline bool more_used(const struct vring_virtqueue *vq)
1157{
1946}
1947EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
1948
1949static inline bool more_used(const struct vring_virtqueue *vq)
1950{
1158 return more_used_split(vq);
1951 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
1159}
1160
1161irqreturn_t vring_interrupt(int irq, void *_vq)
1162{
1163 struct vring_virtqueue *vq = to_vvq(_vq);
1164
1165 if (!more_used(vq)) {
1166 pr_debug("virtqueue interrupt with no work for %p\n", vq);

--- 6 unchanged lines hidden (view full) ---

1173 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
1174 if (vq->vq.callback)
1175 vq->vq.callback(&vq->vq);
1176
1177 return IRQ_HANDLED;
1178}
1179EXPORT_SYMBOL_GPL(vring_interrupt);
1180
1952}
1953
1954irqreturn_t vring_interrupt(int irq, void *_vq)
1955{
1956 struct vring_virtqueue *vq = to_vvq(_vq);
1957
1958 if (!more_used(vq)) {
1959 pr_debug("virtqueue interrupt with no work for %p\n", vq);

--- 6 unchanged lines hidden (view full) ---

1966 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
1967 if (vq->vq.callback)
1968 vq->vq.callback(&vq->vq);
1969
1970 return IRQ_HANDLED;
1971}
1972EXPORT_SYMBOL_GPL(vring_interrupt);
1973
1974/* Only available for split ring */
1181struct virtqueue *__vring_new_virtqueue(unsigned int index,
1182 struct vring vring,
1183 struct virtio_device *vdev,
1184 bool weak_barriers,
1185 bool context,
1186 bool (*notify)(struct virtqueue *),
1187 void (*callback)(struct virtqueue *),
1188 const char *name)
1189{
1190 unsigned int i;
1191 struct vring_virtqueue *vq;
1192
1975struct virtqueue *__vring_new_virtqueue(unsigned int index,
1976 struct vring vring,
1977 struct virtio_device *vdev,
1978 bool weak_barriers,
1979 bool context,
1980 bool (*notify)(struct virtqueue *),
1981 void (*callback)(struct virtqueue *),
1982 const char *name)
1983{
1984 unsigned int i;
1985 struct vring_virtqueue *vq;
1986
1987 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
1988 return NULL;
1989
1193 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1194 if (!vq)
1195 return NULL;
1196
1990 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1991 if (!vq)
1992 return NULL;
1993
1994 vq->packed_ring = false;
1197 vq->vq.callback = callback;
1198 vq->vq.vdev = vdev;
1199 vq->vq.name = name;
1200 vq->vq.num_free = vring.num;
1201 vq->vq.index = index;
1202 vq->we_own_ring = false;
1203 vq->notify = notify;
1204 vq->weak_barriers = weak_barriers;

--- 51 unchanged lines hidden (view full) ---

1256 struct virtio_device *vdev,
1257 bool weak_barriers,
1258 bool may_reduce_num,
1259 bool context,
1260 bool (*notify)(struct virtqueue *),
1261 void (*callback)(struct virtqueue *),
1262 const char *name)
1263{
1995 vq->vq.callback = callback;
1996 vq->vq.vdev = vdev;
1997 vq->vq.name = name;
1998 vq->vq.num_free = vring.num;
1999 vq->vq.index = index;
2000 vq->we_own_ring = false;
2001 vq->notify = notify;
2002 vq->weak_barriers = weak_barriers;

--- 51 unchanged lines hidden (view full) ---

2054 struct virtio_device *vdev,
2055 bool weak_barriers,
2056 bool may_reduce_num,
2057 bool context,
2058 bool (*notify)(struct virtqueue *),
2059 void (*callback)(struct virtqueue *),
2060 const char *name)
2061{
2062
2063 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2064 return vring_create_virtqueue_packed(index, num, vring_align,
2065 vdev, weak_barriers, may_reduce_num,
2066 context, notify, callback, name);
2067
1264 return vring_create_virtqueue_split(index, num, vring_align,
1265 vdev, weak_barriers, may_reduce_num,
1266 context, notify, callback, name);
1267}
1268EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1269
2068 return vring_create_virtqueue_split(index, num, vring_align,
2069 vdev, weak_barriers, may_reduce_num,
2070 context, notify, callback, name);
2071}
2072EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2073
2074/* Only available for split ring */
1270struct virtqueue *vring_new_virtqueue(unsigned int index,
1271 unsigned int num,
1272 unsigned int vring_align,
1273 struct virtio_device *vdev,
1274 bool weak_barriers,
1275 bool context,
1276 void *pages,
1277 bool (*notify)(struct virtqueue *vq),
1278 void (*callback)(struct virtqueue *vq),
1279 const char *name)
1280{
1281 struct vring vring;
2075struct virtqueue *vring_new_virtqueue(unsigned int index,
2076 unsigned int num,
2077 unsigned int vring_align,
2078 struct virtio_device *vdev,
2079 bool weak_barriers,
2080 bool context,
2081 void *pages,
2082 bool (*notify)(struct virtqueue *vq),
2083 void (*callback)(struct virtqueue *vq),
2084 const char *name)
2085{
2086 struct vring vring;
2087
2088 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2089 return NULL;
2090
1282 vring_init(&vring, num, pages, vring_align);
1283 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
1284 notify, callback, name);
1285}
1286EXPORT_SYMBOL_GPL(vring_new_virtqueue);
1287
1288void vring_del_virtqueue(struct virtqueue *_vq)
1289{
1290 struct vring_virtqueue *vq = to_vvq(_vq);
1291
1292 if (vq->we_own_ring) {
2091 vring_init(&vring, num, pages, vring_align);
2092 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2093 notify, callback, name);
2094}
2095EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2096
2097void vring_del_virtqueue(struct virtqueue *_vq)
2098{
2099 struct vring_virtqueue *vq = to_vvq(_vq);
2100
2101 if (vq->we_own_ring) {
1293 vring_free_queue(vq->vq.vdev,
1294 vq->split.queue_size_in_bytes,
1295 vq->split.vring.desc,
1296 vq->split.queue_dma_addr);
1297 kfree(vq->split.desc_state);
2102 if (vq->packed_ring) {
2103 vring_free_queue(vq->vq.vdev,
2104 vq->packed.ring_size_in_bytes,
2105 vq->packed.vring.desc,
2106 vq->packed.ring_dma_addr);
2107
2108 vring_free_queue(vq->vq.vdev,
2109 vq->packed.event_size_in_bytes,
2110 vq->packed.vring.driver,
2111 vq->packed.driver_event_dma_addr);
2112
2113 vring_free_queue(vq->vq.vdev,
2114 vq->packed.event_size_in_bytes,
2115 vq->packed.vring.device,
2116 vq->packed.device_event_dma_addr);
2117
2118 kfree(vq->packed.desc_state);
2119 kfree(vq->packed.desc_extra);
2120 } else {
2121 vring_free_queue(vq->vq.vdev,
2122 vq->split.queue_size_in_bytes,
2123 vq->split.vring.desc,
2124 vq->split.queue_dma_addr);
2125
2126 kfree(vq->split.desc_state);
2127 }
1298 }
1299 list_del(&_vq->list);
1300 kfree(vq);
1301}
1302EXPORT_SYMBOL_GPL(vring_del_virtqueue);
1303
1304/* Manipulates transport-specific feature bits. */
1305void vring_transport_features(struct virtio_device *vdev)

--- 25 unchanged lines hidden (view full) ---

1331 * Returns the size of the vring. This is mainly used for boasting to
1332 * userspace. Unlike other operations, this need not be serialized.
1333 */
1334unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1335{
1336
1337 struct vring_virtqueue *vq = to_vvq(_vq);
1338
2128 }
2129 list_del(&_vq->list);
2130 kfree(vq);
2131}
2132EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2133
2134/* Manipulates transport-specific feature bits. */
2135void vring_transport_features(struct virtio_device *vdev)

--- 25 unchanged lines hidden (view full) ---

2161 * Returns the size of the vring. This is mainly used for boasting to
2162 * userspace. Unlike other operations, this need not be serialized.
2163 */
2164unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2165{
2166
2167 struct vring_virtqueue *vq = to_vvq(_vq);
2168
1339 return vq->split.vring.num;
2169 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
1340}
1341EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1342
1343bool virtqueue_is_broken(struct virtqueue *_vq)
1344{
1345 struct vring_virtqueue *vq = to_vvq(_vq);
1346
1347 return vq->broken;

--- 16 unchanged lines hidden (view full) ---

1364EXPORT_SYMBOL_GPL(virtio_break_device);
1365
1366dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
1367{
1368 struct vring_virtqueue *vq = to_vvq(_vq);
1369
1370 BUG_ON(!vq->we_own_ring);
1371
2170}
2171EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2172
2173bool virtqueue_is_broken(struct virtqueue *_vq)
2174{
2175 struct vring_virtqueue *vq = to_vvq(_vq);
2176
2177 return vq->broken;

--- 16 unchanged lines hidden (view full) ---

2194EXPORT_SYMBOL_GPL(virtio_break_device);
2195
2196dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
2197{
2198 struct vring_virtqueue *vq = to_vvq(_vq);
2199
2200 BUG_ON(!vq->we_own_ring);
2201
2202 if (vq->packed_ring)
2203 return vq->packed.ring_dma_addr;
2204
1372 return vq->split.queue_dma_addr;
1373}
1374EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
1375
1376dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
1377{
1378 struct vring_virtqueue *vq = to_vvq(_vq);
1379
1380 BUG_ON(!vq->we_own_ring);
1381
2205 return vq->split.queue_dma_addr;
2206}
2207EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2208
2209dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
2210{
2211 struct vring_virtqueue *vq = to_vvq(_vq);
2212
2213 BUG_ON(!vq->we_own_ring);
2214
2215 if (vq->packed_ring)
2216 return vq->packed.driver_event_dma_addr;
2217
1382 return vq->split.queue_dma_addr +
1383 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
1384}
1385EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1386
1387dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1388{
1389 struct vring_virtqueue *vq = to_vvq(_vq);
1390
1391 BUG_ON(!vq->we_own_ring);
1392
2218 return vq->split.queue_dma_addr +
2219 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2220}
2221EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2222
2223dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2224{
2225 struct vring_virtqueue *vq = to_vvq(_vq);
2226
2227 BUG_ON(!vq->we_own_ring);
2228
2229 if (vq->packed_ring)
2230 return vq->packed.device_event_dma_addr;
2231
1393 return vq->split.queue_dma_addr +
1394 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
1395}
1396EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1397
2232 return vq->split.queue_dma_addr +
2233 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2234}
2235EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2236
2237/* Only available for split ring */
1398const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1399{
1400 return &to_vvq(vq)->split.vring;
1401}
1402EXPORT_SYMBOL_GPL(virtqueue_get_vring);
1403
1404MODULE_LICENSE("GPL");
2238const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2239{
2240 return &to_vvq(vq)->split.vring;
2241}
2242EXPORT_SYMBOL_GPL(virtqueue_get_vring);
2243
2244MODULE_LICENSE("GPL");