xref: /openbmc/qemu/hw/nvme/dif.c (revision 49f95221)
1 /*
2  * QEMU NVM Express End-to-End Data Protection support
3  *
4  * Copyright (c) 2021 Samsung Electronics Co., Ltd.
5  *
6  * Authors:
7  *   Klaus Jensen           <k.jensen@samsung.com>
8  *   Gollu Appalanaidu      <anaidu.gollu@samsung.com>
9  */
10 
11 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "sysemu/block-backend.h"
14 
15 #include "nvme.h"
16 #include "dif.h"
17 #include "trace.h"
18 
19 uint16_t nvme_check_prinfo(NvmeNamespace *ns, uint8_t prinfo, uint64_t slba,
20                            uint64_t reftag)
21 {
22     uint64_t mask = ns->pif ? 0xffffffffffff : 0xffffffff;
23 
24     if ((NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) == NVME_ID_NS_DPS_TYPE_1) &&
25         (prinfo & NVME_PRINFO_PRCHK_REF) && (slba & mask) != reftag) {
26         return NVME_INVALID_PROT_INFO | NVME_DNR;
27     }
28 
29     return NVME_SUCCESS;
30 }
31 
32 /* from Linux kernel (crypto/crct10dif_common.c) */
33 static uint16_t crc16_t10dif(uint16_t crc, const unsigned char *buffer,
34                              size_t len)
35 {
36     unsigned int i;
37 
38     for (i = 0; i < len; i++) {
39         crc = (crc << 8) ^ crc16_t10dif_table[((crc >> 8) ^ buffer[i]) & 0xff];
40     }
41 
42     return crc;
43 }
44 
45 /* from Linux kernel (lib/crc64.c) */
46 static uint64_t crc64_nvme(uint64_t crc, const unsigned char *buffer,
47                            size_t len)
48 {
49     size_t i;
50 
51     for (i = 0; i < len; i++) {
52         crc = (crc >> 8) ^ crc64_nvme_table[(crc & 0xff) ^ buffer[i]];
53     }
54 
55     return crc ^ (uint64_t)~0;
56 }
57 
58 static void nvme_dif_pract_generate_dif_crc16(NvmeNamespace *ns, uint8_t *buf,
59                                               size_t len, uint8_t *mbuf,
60                                               size_t mlen, uint16_t apptag,
61                                               uint64_t *reftag)
62 {
63     uint8_t *end = buf + len;
64     int16_t pil = 0;
65 
66     if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) {
67         pil = ns->lbaf.ms - nvme_pi_tuple_size(ns);
68     }
69 
70     trace_pci_nvme_dif_pract_generate_dif_crc16(len, ns->lbasz,
71                                                 ns->lbasz + pil, apptag,
72                                                 *reftag);
73 
74     for (; buf < end; buf += ns->lbasz, mbuf += ns->lbaf.ms) {
75         NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil);
76         uint16_t crc = crc16_t10dif(0x0, buf, ns->lbasz);
77 
78         if (pil) {
79             crc = crc16_t10dif(crc, mbuf, pil);
80         }
81 
82         dif->g16.guard = cpu_to_be16(crc);
83         dif->g16.apptag = cpu_to_be16(apptag);
84         dif->g16.reftag = cpu_to_be32(*reftag);
85 
86         if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) != NVME_ID_NS_DPS_TYPE_3) {
87             (*reftag)++;
88         }
89     }
90 }
91 
92 static void nvme_dif_pract_generate_dif_crc64(NvmeNamespace *ns, uint8_t *buf,
93                                               size_t len, uint8_t *mbuf,
94                                               size_t mlen, uint16_t apptag,
95                                               uint64_t *reftag)
96 {
97     uint8_t *end = buf + len;
98     int16_t pil = 0;
99 
100     if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) {
101         pil = ns->lbaf.ms - 16;
102     }
103 
104     trace_pci_nvme_dif_pract_generate_dif_crc64(len, ns->lbasz,
105                                                 ns->lbasz + pil, apptag,
106                                                 *reftag);
107 
108     for (; buf < end; buf += ns->lbasz, mbuf += ns->lbaf.ms) {
109         NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil);
110         uint64_t crc = crc64_nvme(~0ULL, buf, ns->lbasz);
111 
112         if (pil) {
113             crc = crc64_nvme(crc, mbuf, pil);
114         }
115 
116         dif->g64.guard = cpu_to_be64(crc);
117         dif->g64.apptag = cpu_to_be16(apptag);
118 
119         dif->g64.sr[0] = *reftag >> 40;
120         dif->g64.sr[1] = *reftag >> 32;
121         dif->g64.sr[2] = *reftag >> 24;
122         dif->g64.sr[3] = *reftag >> 16;
123         dif->g64.sr[4] = *reftag >> 8;
124         dif->g64.sr[5] = *reftag;
125 
126         if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) != NVME_ID_NS_DPS_TYPE_3) {
127             (*reftag)++;
128         }
129     }
130 }
131 
132 void nvme_dif_pract_generate_dif(NvmeNamespace *ns, uint8_t *buf, size_t len,
133                                  uint8_t *mbuf, size_t mlen, uint16_t apptag,
134                                  uint64_t *reftag)
135 {
136     switch (ns->pif) {
137     case NVME_PI_GUARD_16:
138         return nvme_dif_pract_generate_dif_crc16(ns, buf, len, mbuf, mlen,
139                                                  apptag, reftag);
140     case NVME_PI_GUARD_64:
141         return nvme_dif_pract_generate_dif_crc64(ns, buf, len, mbuf, mlen,
142                                                  apptag, reftag);
143     }
144 
145     abort();
146 }
147 
148 static uint16_t nvme_dif_prchk_crc16(NvmeNamespace *ns, NvmeDifTuple *dif,
149                                      uint8_t *buf, uint8_t *mbuf, size_t pil,
150                                      uint8_t prinfo, uint16_t apptag,
151                                      uint16_t appmask, uint64_t reftag)
152 {
153     switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
154     case NVME_ID_NS_DPS_TYPE_3:
155         if (be32_to_cpu(dif->g16.reftag) != 0xffffffff) {
156             break;
157         }
158 
159         /* fallthrough */
160     case NVME_ID_NS_DPS_TYPE_1:
161     case NVME_ID_NS_DPS_TYPE_2:
162         if (be16_to_cpu(dif->g16.apptag) != 0xffff) {
163             break;
164         }
165 
166         trace_pci_nvme_dif_prchk_disabled_crc16(be16_to_cpu(dif->g16.apptag),
167                                                 be32_to_cpu(dif->g16.reftag));
168 
169         return NVME_SUCCESS;
170     }
171 
172     if (prinfo & NVME_PRINFO_PRCHK_GUARD) {
173         uint16_t crc = crc16_t10dif(0x0, buf, ns->lbasz);
174 
175         if (pil) {
176             crc = crc16_t10dif(crc, mbuf, pil);
177         }
178 
179         trace_pci_nvme_dif_prchk_guard_crc16(be16_to_cpu(dif->g16.guard), crc);
180 
181         if (be16_to_cpu(dif->g16.guard) != crc) {
182             return NVME_E2E_GUARD_ERROR;
183         }
184     }
185 
186     if (prinfo & NVME_PRINFO_PRCHK_APP) {
187         trace_pci_nvme_dif_prchk_apptag(be16_to_cpu(dif->g16.apptag), apptag,
188                                         appmask);
189 
190         if ((be16_to_cpu(dif->g16.apptag) & appmask) != (apptag & appmask)) {
191             return NVME_E2E_APP_ERROR;
192         }
193     }
194 
195     if (prinfo & NVME_PRINFO_PRCHK_REF) {
196         trace_pci_nvme_dif_prchk_reftag_crc16(be32_to_cpu(dif->g16.reftag),
197                                               reftag);
198 
199         if (be32_to_cpu(dif->g16.reftag) != reftag) {
200             return NVME_E2E_REF_ERROR;
201         }
202     }
203 
204     return NVME_SUCCESS;
205 }
206 
207 static uint16_t nvme_dif_prchk_crc64(NvmeNamespace *ns, NvmeDifTuple *dif,
208                                      uint8_t *buf, uint8_t *mbuf, size_t pil,
209                                      uint8_t prinfo, uint16_t apptag,
210                                      uint16_t appmask, uint64_t reftag)
211 {
212     uint64_t r = 0;
213 
214     r |= (uint64_t)dif->g64.sr[0] << 40;
215     r |= (uint64_t)dif->g64.sr[1] << 32;
216     r |= (uint64_t)dif->g64.sr[2] << 24;
217     r |= (uint64_t)dif->g64.sr[3] << 16;
218     r |= (uint64_t)dif->g64.sr[4] << 8;
219     r |= (uint64_t)dif->g64.sr[5];
220 
221     switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
222     case NVME_ID_NS_DPS_TYPE_3:
223         if (r != 0xffffffffffff) {
224             break;
225         }
226 
227         /* fallthrough */
228     case NVME_ID_NS_DPS_TYPE_1:
229     case NVME_ID_NS_DPS_TYPE_2:
230         if (be16_to_cpu(dif->g64.apptag) != 0xffff) {
231             break;
232         }
233 
234         trace_pci_nvme_dif_prchk_disabled_crc64(be16_to_cpu(dif->g16.apptag),
235                                                 r);
236 
237         return NVME_SUCCESS;
238     }
239 
240     if (prinfo & NVME_PRINFO_PRCHK_GUARD) {
241         uint64_t crc = crc64_nvme(~0ULL, buf, ns->lbasz);
242 
243         if (pil) {
244             crc = crc64_nvme(crc, mbuf, pil);
245         }
246 
247         trace_pci_nvme_dif_prchk_guard_crc64(be64_to_cpu(dif->g64.guard), crc);
248 
249         if (be64_to_cpu(dif->g64.guard) != crc) {
250             return NVME_E2E_GUARD_ERROR;
251         }
252     }
253 
254     if (prinfo & NVME_PRINFO_PRCHK_APP) {
255         trace_pci_nvme_dif_prchk_apptag(be16_to_cpu(dif->g64.apptag), apptag,
256                                         appmask);
257 
258         if ((be16_to_cpu(dif->g64.apptag) & appmask) != (apptag & appmask)) {
259             return NVME_E2E_APP_ERROR;
260         }
261     }
262 
263     if (prinfo & NVME_PRINFO_PRCHK_REF) {
264         trace_pci_nvme_dif_prchk_reftag_crc64(r, reftag);
265 
266         if (r != reftag) {
267             return NVME_E2E_REF_ERROR;
268         }
269     }
270 
271     return NVME_SUCCESS;
272 }
273 
274 static uint16_t nvme_dif_prchk(NvmeNamespace *ns, NvmeDifTuple *dif,
275                                uint8_t *buf, uint8_t *mbuf, size_t pil,
276                                uint8_t prinfo, uint16_t apptag,
277                                uint16_t appmask, uint64_t reftag)
278 {
279     switch (ns->pif) {
280     case NVME_PI_GUARD_16:
281         return nvme_dif_prchk_crc16(ns, dif, buf, mbuf, pil, prinfo, apptag,
282                                     appmask, reftag);
283     case NVME_PI_GUARD_64:
284         return nvme_dif_prchk_crc64(ns, dif, buf, mbuf, pil, prinfo, apptag,
285                                     appmask, reftag);
286     }
287 
288     abort();
289 }
290 
291 uint16_t nvme_dif_check(NvmeNamespace *ns, uint8_t *buf, size_t len,
292                         uint8_t *mbuf, size_t mlen, uint8_t prinfo,
293                         uint64_t slba, uint16_t apptag,
294                         uint16_t appmask, uint64_t *reftag)
295 {
296     uint8_t *bufp, *end = buf + len;
297     int16_t pil = 0;
298     uint16_t status;
299 
300     status = nvme_check_prinfo(ns, prinfo, slba, *reftag);
301     if (status) {
302         return status;
303     }
304 
305     if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) {
306         pil = ns->lbaf.ms - nvme_pi_tuple_size(ns);
307     }
308 
309     trace_pci_nvme_dif_check(prinfo, ns->lbasz + pil);
310 
311     for (bufp = buf; bufp < end; bufp += ns->lbasz, mbuf += ns->lbaf.ms) {
312         NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil);
313         status = nvme_dif_prchk(ns, dif, bufp, mbuf, pil, prinfo, apptag,
314                                 appmask, *reftag);
315         if (status) {
316             /*
317              * The first block of a 'raw' image is always allocated, so we
318              * cannot reliably know if the block is all zeroes or not. For
319              * CRC16 this works fine because the T10 CRC16 is 0x0 for all
320              * zeroes, but the Rocksoft CRC64 is not. Thus, if a guard error is
321              * detected for the first block, check if it is zeroed and manually
322              * set the protection information to all ones to disable protection
323              * information checking.
324              */
325             if (status == NVME_E2E_GUARD_ERROR && slba == 0x0 && bufp == buf) {
326                 g_autofree uint8_t *zeroes = g_malloc0(ns->lbasz);
327 
328                 if (memcmp(bufp, zeroes, ns->lbasz) == 0) {
329                     memset(mbuf + pil, 0xff, nvme_pi_tuple_size(ns));
330                 }
331             } else {
332                 return status;
333             }
334         }
335 
336         if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) != NVME_ID_NS_DPS_TYPE_3) {
337             (*reftag)++;
338         }
339     }
340 
341     return NVME_SUCCESS;
342 }
343 
344 uint16_t nvme_dif_mangle_mdata(NvmeNamespace *ns, uint8_t *mbuf, size_t mlen,
345                                uint64_t slba)
346 {
347     BlockBackend *blk = ns->blkconf.blk;
348     BlockDriverState *bs = blk_bs(blk);
349 
350     int64_t moffset = 0, offset = nvme_l2b(ns, slba);
351     uint8_t *mbufp, *end;
352     bool zeroed;
353     int16_t pil = 0;
354     int64_t bytes = (mlen / ns->lbaf.ms) << ns->lbaf.ds;
355     int64_t pnum = 0;
356 
357     Error *err = NULL;
358 
359 
360     if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) {
361         pil = ns->lbaf.ms - nvme_pi_tuple_size(ns);
362     }
363 
364     do {
365         int ret;
366 
367         bytes -= pnum;
368 
369         ret = bdrv_block_status(bs, offset, bytes, &pnum, NULL, NULL);
370         if (ret < 0) {
371             error_setg_errno(&err, -ret, "unable to get block status");
372             error_report_err(err);
373 
374             return NVME_INTERNAL_DEV_ERROR;
375         }
376 
377         zeroed = !!(ret & BDRV_BLOCK_ZERO);
378 
379         trace_pci_nvme_block_status(offset, bytes, pnum, ret, zeroed);
380 
381         if (zeroed) {
382             mbufp = mbuf + moffset;
383             mlen = (pnum >> ns->lbaf.ds) * ns->lbaf.ms;
384             end = mbufp + mlen;
385 
386             for (; mbufp < end; mbufp += ns->lbaf.ms) {
387                 memset(mbufp + pil, 0xff, nvme_pi_tuple_size(ns));
388             }
389         }
390 
391         moffset += (pnum >> ns->lbaf.ds) * ns->lbaf.ms;
392         offset += pnum;
393     } while (pnum != bytes);
394 
395     return NVME_SUCCESS;
396 }
397 
398 static void nvme_dif_rw_cb(void *opaque, int ret)
399 {
400     NvmeBounceContext *ctx = opaque;
401     NvmeRequest *req = ctx->req;
402     NvmeNamespace *ns = req->ns;
403     BlockBackend *blk = ns->blkconf.blk;
404 
405     trace_pci_nvme_dif_rw_cb(nvme_cid(req), blk_name(blk));
406 
407     qemu_iovec_destroy(&ctx->data.iov);
408     g_free(ctx->data.bounce);
409 
410     qemu_iovec_destroy(&ctx->mdata.iov);
411     g_free(ctx->mdata.bounce);
412 
413     g_free(ctx);
414 
415     nvme_rw_complete_cb(req, ret);
416 }
417 
418 static void nvme_dif_rw_check_cb(void *opaque, int ret)
419 {
420     NvmeBounceContext *ctx = opaque;
421     NvmeRequest *req = ctx->req;
422     NvmeNamespace *ns = req->ns;
423     NvmeCtrl *n = nvme_ctrl(req);
424     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
425     uint64_t slba = le64_to_cpu(rw->slba);
426     uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control));
427     uint16_t apptag = le16_to_cpu(rw->apptag);
428     uint16_t appmask = le16_to_cpu(rw->appmask);
429     uint64_t reftag = le32_to_cpu(rw->reftag);
430     uint64_t cdw3 = le32_to_cpu(rw->cdw3);
431     uint16_t status;
432 
433     reftag |= cdw3 << 32;
434 
435     trace_pci_nvme_dif_rw_check_cb(nvme_cid(req), prinfo, apptag, appmask,
436                                    reftag);
437 
438     if (ret) {
439         goto out;
440     }
441 
442     status = nvme_dif_mangle_mdata(ns, ctx->mdata.bounce, ctx->mdata.iov.size,
443                                    slba);
444     if (status) {
445         req->status = status;
446         goto out;
447     }
448 
449     status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size,
450                             ctx->mdata.bounce, ctx->mdata.iov.size, prinfo,
451                             slba, apptag, appmask, &reftag);
452     if (status) {
453         req->status = status;
454         goto out;
455     }
456 
457     status = nvme_bounce_data(n, ctx->data.bounce, ctx->data.iov.size,
458                               NVME_TX_DIRECTION_FROM_DEVICE, req);
459     if (status) {
460         req->status = status;
461         goto out;
462     }
463 
464     if (prinfo & NVME_PRINFO_PRACT && ns->lbaf.ms == nvme_pi_tuple_size(ns)) {
465         goto out;
466     }
467 
468     status = nvme_bounce_mdata(n, ctx->mdata.bounce, ctx->mdata.iov.size,
469                                NVME_TX_DIRECTION_FROM_DEVICE, req);
470     if (status) {
471         req->status = status;
472     }
473 
474 out:
475     nvme_dif_rw_cb(ctx, ret);
476 }
477 
478 static void nvme_dif_rw_mdata_in_cb(void *opaque, int ret)
479 {
480     NvmeBounceContext *ctx = opaque;
481     NvmeRequest *req = ctx->req;
482     NvmeNamespace *ns = req->ns;
483     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
484     uint64_t slba = le64_to_cpu(rw->slba);
485     uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
486     size_t mlen = nvme_m2b(ns, nlb);
487     uint64_t offset = nvme_moff(ns, slba);
488     BlockBackend *blk = ns->blkconf.blk;
489 
490     trace_pci_nvme_dif_rw_mdata_in_cb(nvme_cid(req), blk_name(blk));
491 
492     if (ret) {
493         goto out;
494     }
495 
496     ctx->mdata.bounce = g_malloc(mlen);
497 
498     qemu_iovec_reset(&ctx->mdata.iov);
499     qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen);
500 
501     req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0,
502                                 nvme_dif_rw_check_cb, ctx);
503     return;
504 
505 out:
506     nvme_dif_rw_cb(ctx, ret);
507 }
508 
509 static void nvme_dif_rw_mdata_out_cb(void *opaque, int ret)
510 {
511     NvmeBounceContext *ctx = opaque;
512     NvmeRequest *req = ctx->req;
513     NvmeNamespace *ns = req->ns;
514     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
515     uint64_t slba = le64_to_cpu(rw->slba);
516     uint64_t offset = nvme_moff(ns, slba);
517     BlockBackend *blk = ns->blkconf.blk;
518 
519     trace_pci_nvme_dif_rw_mdata_out_cb(nvme_cid(req), blk_name(blk));
520 
521     if (ret) {
522         goto out;
523     }
524 
525     req->aiocb = blk_aio_pwritev(blk, offset, &ctx->mdata.iov, 0,
526                                  nvme_dif_rw_cb, ctx);
527     return;
528 
529 out:
530     nvme_dif_rw_cb(ctx, ret);
531 }
532 
533 uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req)
534 {
535     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
536     NvmeNamespace *ns = req->ns;
537     BlockBackend *blk = ns->blkconf.blk;
538     bool wrz = rw->opcode == NVME_CMD_WRITE_ZEROES;
539     uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
540     uint64_t slba = le64_to_cpu(rw->slba);
541     size_t len = nvme_l2b(ns, nlb);
542     size_t mlen = nvme_m2b(ns, nlb);
543     size_t mapped_len = len;
544     int64_t offset = nvme_l2b(ns, slba);
545     uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control));
546     uint16_t apptag = le16_to_cpu(rw->apptag);
547     uint16_t appmask = le16_to_cpu(rw->appmask);
548     uint64_t reftag = le32_to_cpu(rw->reftag);
549     uint64_t cdw3 = le32_to_cpu(rw->cdw3);
550     bool pract = !!(prinfo & NVME_PRINFO_PRACT);
551     NvmeBounceContext *ctx;
552     uint16_t status;
553 
554     reftag |= cdw3 << 32;
555 
556     trace_pci_nvme_dif_rw(pract, prinfo);
557 
558     ctx = g_new0(NvmeBounceContext, 1);
559     ctx->req = req;
560 
561     if (wrz) {
562         BdrvRequestFlags flags = BDRV_REQ_MAY_UNMAP;
563 
564         if (prinfo & NVME_PRINFO_PRCHK_MASK) {
565             status = NVME_INVALID_PROT_INFO | NVME_DNR;
566             goto err;
567         }
568 
569         if (pract) {
570             uint8_t *mbuf, *end;
571             int16_t pil = ns->lbaf.ms - nvme_pi_tuple_size(ns);
572 
573             status = nvme_check_prinfo(ns, prinfo, slba, reftag);
574             if (status) {
575                 goto err;
576             }
577 
578             flags = 0;
579 
580             ctx->mdata.bounce = g_malloc0(mlen);
581 
582             qemu_iovec_init(&ctx->mdata.iov, 1);
583             qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen);
584 
585             mbuf = ctx->mdata.bounce;
586             end = mbuf + mlen;
587 
588             if (ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT) {
589                 pil = 0;
590             }
591 
592             for (; mbuf < end; mbuf += ns->lbaf.ms) {
593                 NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil);
594 
595                 switch (ns->pif) {
596                 case NVME_PI_GUARD_16:
597                     dif->g16.apptag = cpu_to_be16(apptag);
598                     dif->g16.reftag = cpu_to_be32(reftag);
599 
600                     break;
601 
602                 case NVME_PI_GUARD_64:
603                     dif->g64.guard = cpu_to_be64(0x6482d367eb22b64e);
604                     dif->g64.apptag = cpu_to_be16(apptag);
605 
606                     dif->g64.sr[0] = reftag >> 40;
607                     dif->g64.sr[1] = reftag >> 32;
608                     dif->g64.sr[2] = reftag >> 24;
609                     dif->g64.sr[3] = reftag >> 16;
610                     dif->g64.sr[4] = reftag >> 8;
611                     dif->g64.sr[5] = reftag;
612 
613                     break;
614 
615                 default:
616                     abort();
617                 }
618 
619                 switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
620                 case NVME_ID_NS_DPS_TYPE_1:
621                 case NVME_ID_NS_DPS_TYPE_2:
622                     reftag++;
623                 }
624             }
625         }
626 
627         req->aiocb = blk_aio_pwrite_zeroes(blk, offset, len, flags,
628                                            nvme_dif_rw_mdata_out_cb, ctx);
629         return NVME_NO_COMPLETE;
630     }
631 
632     if (nvme_ns_ext(ns) && !(pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) {
633         mapped_len += mlen;
634     }
635 
636     status = nvme_map_dptr(n, &req->sg, mapped_len, &req->cmd);
637     if (status) {
638         goto err;
639     }
640 
641     ctx->data.bounce = g_malloc(len);
642 
643     qemu_iovec_init(&ctx->data.iov, 1);
644     qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, len);
645 
646     if (req->cmd.opcode == NVME_CMD_READ) {
647         block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size,
648                          BLOCK_ACCT_READ);
649 
650         req->aiocb = blk_aio_preadv(ns->blkconf.blk, offset, &ctx->data.iov, 0,
651                                     nvme_dif_rw_mdata_in_cb, ctx);
652         return NVME_NO_COMPLETE;
653     }
654 
655     status = nvme_bounce_data(n, ctx->data.bounce, ctx->data.iov.size,
656                               NVME_TX_DIRECTION_TO_DEVICE, req);
657     if (status) {
658         goto err;
659     }
660 
661     ctx->mdata.bounce = g_malloc(mlen);
662 
663     qemu_iovec_init(&ctx->mdata.iov, 1);
664     qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen);
665 
666     if (!(pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) {
667         status = nvme_bounce_mdata(n, ctx->mdata.bounce, ctx->mdata.iov.size,
668                                    NVME_TX_DIRECTION_TO_DEVICE, req);
669         if (status) {
670             goto err;
671         }
672     }
673 
674     status = nvme_check_prinfo(ns, prinfo, slba, reftag);
675     if (status) {
676         goto err;
677     }
678 
679     if (pract) {
680         /* splice generated protection information into the buffer */
681         nvme_dif_pract_generate_dif(ns, ctx->data.bounce, ctx->data.iov.size,
682                                     ctx->mdata.bounce, ctx->mdata.iov.size,
683                                     apptag, &reftag);
684     } else {
685         status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size,
686                                 ctx->mdata.bounce, ctx->mdata.iov.size, prinfo,
687                                 slba, apptag, appmask, &reftag);
688         if (status) {
689             goto err;
690         }
691     }
692 
693     block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size,
694                      BLOCK_ACCT_WRITE);
695 
696     req->aiocb = blk_aio_pwritev(ns->blkconf.blk, offset, &ctx->data.iov, 0,
697                                  nvme_dif_rw_mdata_out_cb, ctx);
698 
699     return NVME_NO_COMPLETE;
700 
701 err:
702     qemu_iovec_destroy(&ctx->data.iov);
703     g_free(ctx->data.bounce);
704 
705     qemu_iovec_destroy(&ctx->mdata.iov);
706     g_free(ctx->mdata.bounce);
707 
708     g_free(ctx);
709 
710     return status;
711 }
712