1 /*
2  * 9P network client for VirtIO 9P test cases (based on QTest)
3  *
4  * Copyright (c) 2014 SUSE LINUX Products GmbH
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  */
9 
10 /*
11  * Not so fast! You might want to read the 9p developer docs first:
12  * https://wiki.qemu.org/Documentation/9p
13  */
14 
15 #include "qemu/osdep.h"
16 #include "virtio-9p-client.h"
17 
18 #define QVIRTIO_9P_TIMEOUT_US (10 * 1000 * 1000)
19 static QGuestAllocator *alloc;
20 
21 void v9fs_set_allocator(QGuestAllocator *t_alloc)
22 {
23     alloc = t_alloc;
24 }
25 
26 /*
27  * Used to auto generate new fids. Start with arbitrary high value to avoid
28  * collision with hard coded fids in basic test code.
29  */
30 static uint32_t fid_generator = 1000;
31 
32 static uint32_t genfid(void)
33 {
34     return fid_generator++;
35 }
36 
37 /**
38  * Splits the @a in string by @a delim into individual (non empty) strings
39  * and outputs them to @a out. The output array @a out is NULL terminated.
40  *
41  * Output array @a out must be freed by calling split_free().
42  *
43  * @returns number of individual elements in output array @a out (without the
44  *          final NULL terminating element)
45  */
46 static int split(const char *in, const char *delim, char ***out)
47 {
48     int n = 0, i = 0;
49     char *tmp, *p;
50 
51     tmp = g_strdup(in);
52     for (p = strtok(tmp, delim); p != NULL; p = strtok(NULL, delim)) {
53         if (strlen(p) > 0) {
54             ++n;
55         }
56     }
57     g_free(tmp);
58 
59     *out = g_new0(char *, n + 1); /* last element NULL delimiter */
60 
61     tmp = g_strdup(in);
62     for (p = strtok(tmp, delim); p != NULL; p = strtok(NULL, delim)) {
63         if (strlen(p) > 0) {
64             (*out)[i++] = g_strdup(p);
65         }
66     }
67     g_free(tmp);
68 
69     return n;
70 }
71 
72 static void split_free(char ***out)
73 {
74     int i;
75     if (!*out) {
76         return;
77     }
78     for (i = 0; (*out)[i]; ++i) {
79         g_free((*out)[i]);
80     }
81     g_free(*out);
82     *out = NULL;
83 }
84 
85 void v9fs_memwrite(P9Req *req, const void *addr, size_t len)
86 {
87     qtest_memwrite(req->qts, req->t_msg + req->t_off, addr, len);
88     req->t_off += len;
89 }
90 
91 void v9fs_memskip(P9Req *req, size_t len)
92 {
93     req->r_off += len;
94 }
95 
96 void v9fs_memread(P9Req *req, void *addr, size_t len)
97 {
98     qtest_memread(req->qts, req->r_msg + req->r_off, addr, len);
99     req->r_off += len;
100 }
101 
102 void v9fs_uint8_read(P9Req *req, uint8_t *val)
103 {
104     v9fs_memread(req, val, 1);
105 }
106 
107 void v9fs_uint16_write(P9Req *req, uint16_t val)
108 {
109     uint16_t le_val = cpu_to_le16(val);
110 
111     v9fs_memwrite(req, &le_val, 2);
112 }
113 
114 void v9fs_uint16_read(P9Req *req, uint16_t *val)
115 {
116     v9fs_memread(req, val, 2);
117     le16_to_cpus(val);
118 }
119 
120 void v9fs_uint32_write(P9Req *req, uint32_t val)
121 {
122     uint32_t le_val = cpu_to_le32(val);
123 
124     v9fs_memwrite(req, &le_val, 4);
125 }
126 
127 void v9fs_uint64_write(P9Req *req, uint64_t val)
128 {
129     uint64_t le_val = cpu_to_le64(val);
130 
131     v9fs_memwrite(req, &le_val, 8);
132 }
133 
134 void v9fs_uint32_read(P9Req *req, uint32_t *val)
135 {
136     v9fs_memread(req, val, 4);
137     le32_to_cpus(val);
138 }
139 
140 void v9fs_uint64_read(P9Req *req, uint64_t *val)
141 {
142     v9fs_memread(req, val, 8);
143     le64_to_cpus(val);
144 }
145 
146 /* len[2] string[len] */
147 uint16_t v9fs_string_size(const char *string)
148 {
149     size_t len = strlen(string);
150 
151     g_assert_cmpint(len, <=, UINT16_MAX - 2);
152 
153     return 2 + len;
154 }
155 
156 void v9fs_string_write(P9Req *req, const char *string)
157 {
158     int len = strlen(string);
159 
160     g_assert_cmpint(len, <=, UINT16_MAX);
161 
162     v9fs_uint16_write(req, (uint16_t) len);
163     v9fs_memwrite(req, string, len);
164 }
165 
166 void v9fs_string_read(P9Req *req, uint16_t *len, char **string)
167 {
168     uint16_t local_len;
169 
170     v9fs_uint16_read(req, &local_len);
171     if (len) {
172         *len = local_len;
173     }
174     if (string) {
175         *string = g_malloc(local_len + 1);
176         v9fs_memread(req, *string, local_len);
177         (*string)[local_len] = 0;
178     } else {
179         v9fs_memskip(req, local_len);
180     }
181 }
182 
183 typedef struct {
184     uint32_t size;
185     uint8_t id;
186     uint16_t tag;
187 } QEMU_PACKED P9Hdr;
188 
189 P9Req *v9fs_req_init(QVirtio9P *v9p, uint32_t size, uint8_t id,
190                      uint16_t tag)
191 {
192     P9Req *req = g_new0(P9Req, 1);
193     uint32_t total_size = 7; /* 9P header has well-known size of 7 bytes */
194     P9Hdr hdr = {
195         .id = id,
196         .tag = cpu_to_le16(tag)
197     };
198 
199     g_assert_cmpint(total_size, <=, UINT32_MAX - size);
200     total_size += size;
201     hdr.size = cpu_to_le32(total_size);
202 
203     g_assert_cmpint(total_size, <=, P9_MAX_SIZE);
204 
205     req->qts = global_qtest;
206     req->v9p = v9p;
207     req->t_size = total_size;
208     req->t_msg = guest_alloc(alloc, req->t_size);
209     v9fs_memwrite(req, &hdr, 7);
210     req->tag = tag;
211     return req;
212 }
213 
214 void v9fs_req_send(P9Req *req)
215 {
216     QVirtio9P *v9p = req->v9p;
217 
218     req->r_msg = guest_alloc(alloc, P9_MAX_SIZE);
219     req->free_head = qvirtqueue_add(req->qts, v9p->vq, req->t_msg, req->t_size,
220                                     false, true);
221     qvirtqueue_add(req->qts, v9p->vq, req->r_msg, P9_MAX_SIZE, true, false);
222     qvirtqueue_kick(req->qts, v9p->vdev, v9p->vq, req->free_head);
223     req->t_off = 0;
224 }
225 
226 static const char *rmessage_name(uint8_t id)
227 {
228     return
229         id == P9_RLERROR ? "RLERROR" :
230         id == P9_RVERSION ? "RVERSION" :
231         id == P9_RATTACH ? "RATTACH" :
232         id == P9_RWALK ? "RWALK" :
233         id == P9_RLOPEN ? "RLOPEN" :
234         id == P9_RWRITE ? "RWRITE" :
235         id == P9_RMKDIR ? "RMKDIR" :
236         id == P9_RLCREATE ? "RLCREATE" :
237         id == P9_RSYMLINK ? "RSYMLINK" :
238         id == P9_RGETATTR ? "RGETATTR" :
239         id == P9_RLINK ? "RLINK" :
240         id == P9_RUNLINKAT ? "RUNLINKAT" :
241         id == P9_RFLUSH ? "RFLUSH" :
242         id == P9_RREADDIR ? "RREADDIR" :
243         "<unknown>";
244 }
245 
246 void v9fs_req_wait_for_reply(P9Req *req, uint32_t *len)
247 {
248     QVirtio9P *v9p = req->v9p;
249 
250     qvirtio_wait_used_elem(req->qts, v9p->vdev, v9p->vq, req->free_head, len,
251                            QVIRTIO_9P_TIMEOUT_US);
252 }
253 
254 void v9fs_req_recv(P9Req *req, uint8_t id)
255 {
256     P9Hdr hdr;
257 
258     v9fs_memread(req, &hdr, 7);
259     hdr.size = ldl_le_p(&hdr.size);
260     hdr.tag = lduw_le_p(&hdr.tag);
261 
262     g_assert_cmpint(hdr.size, >=, 7);
263     g_assert_cmpint(hdr.size, <=, P9_MAX_SIZE);
264     g_assert_cmpint(hdr.tag, ==, req->tag);
265 
266     if (hdr.id != id) {
267         g_printerr("Received response %d (%s) instead of %d (%s)\n",
268                    hdr.id, rmessage_name(hdr.id), id, rmessage_name(id));
269 
270         if (hdr.id == P9_RLERROR) {
271             uint32_t err;
272             v9fs_uint32_read(req, &err);
273             g_printerr("Rlerror has errno %d (%s)\n", err, strerror(err));
274         }
275     }
276     g_assert_cmpint(hdr.id, ==, id);
277 }
278 
279 void v9fs_req_free(P9Req *req)
280 {
281     guest_free(alloc, req->t_msg);
282     guest_free(alloc, req->r_msg);
283     g_free(req);
284 }
285 
286 /* size[4] Rlerror tag[2] ecode[4] */
287 void v9fs_rlerror(P9Req *req, uint32_t *err)
288 {
289     v9fs_req_recv(req, P9_RLERROR);
290     v9fs_uint32_read(req, err);
291     v9fs_req_free(req);
292 }
293 
294 /* size[4] Tversion tag[2] msize[4] version[s] */
295 TVersionRes v9fs_tversion(TVersionOpt opt)
296 {
297     P9Req *req;
298     uint32_t err;
299     uint32_t body_size = 4;
300     uint16_t string_size;
301     uint16_t server_len;
302     g_autofree char *server_version = NULL;
303 
304     g_assert(opt.client);
305 
306     if (!opt.msize) {
307         opt.msize = P9_MAX_SIZE;
308     }
309 
310     if (!opt.tag) {
311         opt.tag = P9_NOTAG;
312     }
313 
314     if (!opt.version) {
315         opt.version = "9P2000.L";
316     }
317 
318     string_size = v9fs_string_size(opt.version);
319     g_assert_cmpint(body_size, <=, UINT32_MAX - string_size);
320     body_size += string_size;
321     req = v9fs_req_init(opt.client, body_size, P9_TVERSION, opt.tag);
322 
323     v9fs_uint32_write(req, opt.msize);
324     v9fs_string_write(req, opt.version);
325     v9fs_req_send(req);
326 
327     if (!opt.requestOnly) {
328         v9fs_req_wait_for_reply(req, NULL);
329         if (opt.expectErr) {
330             v9fs_rlerror(req, &err);
331             g_assert_cmpint(err, ==, opt.expectErr);
332         } else {
333             v9fs_rversion(req, &server_len, &server_version);
334             g_assert_cmpmem(server_version, server_len,
335                             opt.version, strlen(opt.version));
336         }
337         req = NULL; /* request was freed */
338     }
339 
340     return (TVersionRes) {
341         .req = req,
342     };
343 }
344 
345 /* size[4] Rversion tag[2] msize[4] version[s] */
346 void v9fs_rversion(P9Req *req, uint16_t *len, char **version)
347 {
348     uint32_t msize;
349 
350     v9fs_req_recv(req, P9_RVERSION);
351     v9fs_uint32_read(req, &msize);
352 
353     g_assert_cmpint(msize, ==, P9_MAX_SIZE);
354 
355     if (len || version) {
356         v9fs_string_read(req, len, version);
357     }
358 
359     v9fs_req_free(req);
360 }
361 
362 /* size[4] Tattach tag[2] fid[4] afid[4] uname[s] aname[s] n_uname[4] */
363 TAttachRes v9fs_tattach(TAttachOpt opt)
364 {
365     uint32_t err;
366     const char *uname = ""; /* ignored by QEMU */
367     const char *aname = ""; /* ignored by QEMU */
368 
369     g_assert(opt.client);
370     /* expecting either Rattach or Rlerror, but obviously not both */
371     g_assert(!opt.expectErr || !opt.rattach.qid);
372 
373     if (!opt.requestOnly) {
374         v9fs_tversion((TVersionOpt) { .client = opt.client });
375     }
376 
377     if (!opt.n_uname) {
378         opt.n_uname = getuid();
379     }
380 
381     P9Req *req = v9fs_req_init(opt.client, 4 + 4 + 2 + 2 + 4, P9_TATTACH,
382                                opt.tag);
383 
384     v9fs_uint32_write(req, opt.fid);
385     v9fs_uint32_write(req, P9_NOFID);
386     v9fs_string_write(req, uname);
387     v9fs_string_write(req, aname);
388     v9fs_uint32_write(req, opt.n_uname);
389     v9fs_req_send(req);
390 
391     if (!opt.requestOnly) {
392         v9fs_req_wait_for_reply(req, NULL);
393         if (opt.expectErr) {
394             v9fs_rlerror(req, &err);
395             g_assert_cmpint(err, ==, opt.expectErr);
396         } else {
397             v9fs_rattach(req, opt.rattach.qid);
398         }
399         req = NULL; /* request was freed */
400     }
401 
402     return (TAttachRes) {
403         .req = req,
404     };
405 }
406 
407 /* size[4] Rattach tag[2] qid[13] */
408 void v9fs_rattach(P9Req *req, v9fs_qid *qid)
409 {
410     v9fs_req_recv(req, P9_RATTACH);
411     if (qid) {
412         v9fs_memread(req, qid, 13);
413     }
414     v9fs_req_free(req);
415 }
416 
417 /* size[4] Twalk tag[2] fid[4] newfid[4] nwname[2] nwname*(wname[s]) */
418 TWalkRes v9fs_twalk(TWalkOpt opt)
419 {
420     P9Req *req;
421     int i;
422     uint32_t body_size = 4 + 4 + 2;
423     uint32_t err;
424     char **wnames = NULL;
425 
426     g_assert(opt.client);
427     /* expecting either high- or low-level path, both not both */
428     g_assert(!opt.path || !(opt.nwname || opt.wnames));
429     /* expecting either Rwalk or Rlerror, but obviously not both */
430     g_assert(!opt.expectErr || !(opt.rwalk.nwqid || opt.rwalk.wqid));
431 
432     if (!opt.newfid) {
433         opt.newfid = genfid();
434     }
435 
436     if (opt.path) {
437         opt.nwname = split(opt.path, "/", &wnames);
438         opt.wnames = wnames;
439     }
440 
441     for (i = 0; i < opt.nwname; i++) {
442         uint16_t wname_size = v9fs_string_size(opt.wnames[i]);
443 
444         g_assert_cmpint(body_size, <=, UINT32_MAX - wname_size);
445         body_size += wname_size;
446     }
447     req = v9fs_req_init(opt.client, body_size, P9_TWALK, opt.tag);
448     v9fs_uint32_write(req, opt.fid);
449     v9fs_uint32_write(req, opt.newfid);
450     v9fs_uint16_write(req, opt.nwname);
451     for (i = 0; i < opt.nwname; i++) {
452         v9fs_string_write(req, opt.wnames[i]);
453     }
454     v9fs_req_send(req);
455 
456     if (!opt.requestOnly) {
457         v9fs_req_wait_for_reply(req, NULL);
458         if (opt.expectErr) {
459             v9fs_rlerror(req, &err);
460             g_assert_cmpint(err, ==, opt.expectErr);
461         } else {
462             v9fs_rwalk(req, opt.rwalk.nwqid, opt.rwalk.wqid);
463         }
464         req = NULL; /* request was freed */
465     }
466 
467     split_free(&wnames);
468 
469     return (TWalkRes) {
470         .newfid = opt.newfid,
471         .req = req,
472     };
473 }
474 
475 /* size[4] Rwalk tag[2] nwqid[2] nwqid*(wqid[13]) */
476 void v9fs_rwalk(P9Req *req, uint16_t *nwqid, v9fs_qid **wqid)
477 {
478     uint16_t local_nwqid;
479 
480     v9fs_req_recv(req, P9_RWALK);
481     v9fs_uint16_read(req, &local_nwqid);
482     if (nwqid) {
483         *nwqid = local_nwqid;
484     }
485     if (wqid) {
486         *wqid = g_malloc(local_nwqid * 13);
487         v9fs_memread(req, *wqid, local_nwqid * 13);
488     }
489     v9fs_req_free(req);
490 }
491 
492 /* size[4] Tgetattr tag[2] fid[4] request_mask[8] */
493 TGetAttrRes v9fs_tgetattr(TGetAttrOpt opt)
494 {
495     P9Req *req;
496     uint32_t err;
497 
498     g_assert(opt.client);
499     /* expecting either Rgetattr or Rlerror, but obviously not both */
500     g_assert(!opt.expectErr || !opt.rgetattr.attr);
501 
502     if (!opt.request_mask) {
503         opt.request_mask = P9_GETATTR_ALL;
504     }
505 
506     req = v9fs_req_init(opt.client, 4 + 8, P9_TGETATTR, opt.tag);
507     v9fs_uint32_write(req, opt.fid);
508     v9fs_uint64_write(req, opt.request_mask);
509     v9fs_req_send(req);
510 
511     if (!opt.requestOnly) {
512         v9fs_req_wait_for_reply(req, NULL);
513         if (opt.expectErr) {
514             v9fs_rlerror(req, &err);
515             g_assert_cmpint(err, ==, opt.expectErr);
516         } else {
517             v9fs_rgetattr(req, opt.rgetattr.attr);
518         }
519         req = NULL; /* request was freed */
520     }
521 
522     return (TGetAttrRes) { .req = req };
523 }
524 
525 /*
526  * size[4] Rgetattr tag[2] valid[8] qid[13] mode[4] uid[4] gid[4] nlink[8]
527  *                  rdev[8] size[8] blksize[8] blocks[8]
528  *                  atime_sec[8] atime_nsec[8] mtime_sec[8] mtime_nsec[8]
529  *                  ctime_sec[8] ctime_nsec[8] btime_sec[8] btime_nsec[8]
530  *                  gen[8] data_version[8]
531  */
532 void v9fs_rgetattr(P9Req *req, v9fs_attr *attr)
533 {
534     v9fs_req_recv(req, P9_RGETATTR);
535 
536     v9fs_uint64_read(req, &attr->valid);
537     v9fs_memread(req, &attr->qid, 13);
538     v9fs_uint32_read(req, &attr->mode);
539     v9fs_uint32_read(req, &attr->uid);
540     v9fs_uint32_read(req, &attr->gid);
541     v9fs_uint64_read(req, &attr->nlink);
542     v9fs_uint64_read(req, &attr->rdev);
543     v9fs_uint64_read(req, &attr->size);
544     v9fs_uint64_read(req, &attr->blksize);
545     v9fs_uint64_read(req, &attr->blocks);
546     v9fs_uint64_read(req, &attr->atime_sec);
547     v9fs_uint64_read(req, &attr->atime_nsec);
548     v9fs_uint64_read(req, &attr->mtime_sec);
549     v9fs_uint64_read(req, &attr->mtime_nsec);
550     v9fs_uint64_read(req, &attr->ctime_sec);
551     v9fs_uint64_read(req, &attr->ctime_nsec);
552     v9fs_uint64_read(req, &attr->btime_sec);
553     v9fs_uint64_read(req, &attr->btime_nsec);
554     v9fs_uint64_read(req, &attr->gen);
555     v9fs_uint64_read(req, &attr->data_version);
556 
557     v9fs_req_free(req);
558 }
559 
560 /* size[4] Treaddir tag[2] fid[4] offset[8] count[4] */
561 TReadDirRes v9fs_treaddir(TReadDirOpt opt)
562 {
563     P9Req *req;
564     uint32_t err;
565 
566     g_assert(opt.client);
567     /* expecting either Rreaddir or Rlerror, but obviously not both */
568     g_assert(!opt.expectErr || !(opt.rreaddir.count ||
569              opt.rreaddir.nentries || opt.rreaddir.entries));
570 
571     req = v9fs_req_init(opt.client, 4 + 8 + 4, P9_TREADDIR, opt.tag);
572     v9fs_uint32_write(req, opt.fid);
573     v9fs_uint64_write(req, opt.offset);
574     v9fs_uint32_write(req, opt.count);
575     v9fs_req_send(req);
576 
577     if (!opt.requestOnly) {
578         v9fs_req_wait_for_reply(req, NULL);
579         if (opt.expectErr) {
580             v9fs_rlerror(req, &err);
581             g_assert_cmpint(err, ==, opt.expectErr);
582         } else {
583             v9fs_rreaddir(req, opt.rreaddir.count, opt.rreaddir.nentries,
584                           opt.rreaddir.entries);
585         }
586         req = NULL; /* request was freed */
587     }
588 
589     return (TReadDirRes) { .req = req };
590 }
591 
592 /* size[4] Rreaddir tag[2] count[4] data[count] */
593 void v9fs_rreaddir(P9Req *req, uint32_t *count, uint32_t *nentries,
594                    struct V9fsDirent **entries)
595 {
596     uint32_t local_count;
597     struct V9fsDirent *e = NULL;
598     /* only used to avoid a leak if entries was NULL */
599     struct V9fsDirent *unused_entries = NULL;
600     uint16_t slen;
601     uint32_t n = 0;
602 
603     v9fs_req_recv(req, P9_RREADDIR);
604     v9fs_uint32_read(req, &local_count);
605 
606     if (count) {
607         *count = local_count;
608     }
609 
610     for (int32_t togo = (int32_t)local_count;
611          togo >= 13 + 8 + 1 + 2;
612          togo -= 13 + 8 + 1 + 2 + slen, ++n)
613     {
614         if (!e) {
615             e = g_new(struct V9fsDirent, 1);
616             if (entries) {
617                 *entries = e;
618             } else {
619                 unused_entries = e;
620             }
621         } else {
622             e = e->next = g_new(struct V9fsDirent, 1);
623         }
624         e->next = NULL;
625         /* qid[13] offset[8] type[1] name[s] */
626         v9fs_memread(req, &e->qid, 13);
627         v9fs_uint64_read(req, &e->offset);
628         v9fs_uint8_read(req, &e->type);
629         v9fs_string_read(req, &slen, &e->name);
630     }
631 
632     if (nentries) {
633         *nentries = n;
634     }
635 
636     v9fs_free_dirents(unused_entries);
637     v9fs_req_free(req);
638 }
639 
640 void v9fs_free_dirents(struct V9fsDirent *e)
641 {
642     struct V9fsDirent *next = NULL;
643 
644     for (; e; e = next) {
645         next = e->next;
646         g_free(e->name);
647         g_free(e);
648     }
649 }
650 
651 /* size[4] Tlopen tag[2] fid[4] flags[4] */
652 TLOpenRes v9fs_tlopen(TLOpenOpt opt)
653 {
654     P9Req *req;
655     uint32_t err;
656 
657     g_assert(opt.client);
658     /* expecting either Rlopen or Rlerror, but obviously not both */
659     g_assert(!opt.expectErr || !(opt.rlopen.qid || opt.rlopen.iounit));
660 
661     req = v9fs_req_init(opt.client,  4 + 4, P9_TLOPEN, opt.tag);
662     v9fs_uint32_write(req, opt.fid);
663     v9fs_uint32_write(req, opt.flags);
664     v9fs_req_send(req);
665 
666     if (!opt.requestOnly) {
667         v9fs_req_wait_for_reply(req, NULL);
668         if (opt.expectErr) {
669             v9fs_rlerror(req, &err);
670             g_assert_cmpint(err, ==, opt.expectErr);
671         } else {
672             v9fs_rlopen(req, opt.rlopen.qid, opt.rlopen.iounit);
673         }
674         req = NULL; /* request was freed */
675     }
676 
677     return (TLOpenRes) { .req = req };
678 }
679 
680 /* size[4] Rlopen tag[2] qid[13] iounit[4] */
681 void v9fs_rlopen(P9Req *req, v9fs_qid *qid, uint32_t *iounit)
682 {
683     v9fs_req_recv(req, P9_RLOPEN);
684     if (qid) {
685         v9fs_memread(req, qid, 13);
686     } else {
687         v9fs_memskip(req, 13);
688     }
689     if (iounit) {
690         v9fs_uint32_read(req, iounit);
691     }
692     v9fs_req_free(req);
693 }
694 
695 /* size[4] Twrite tag[2] fid[4] offset[8] count[4] data[count] */
696 TWriteRes v9fs_twrite(TWriteOpt opt)
697 {
698     P9Req *req;
699     uint32_t err;
700     uint32_t body_size = 4 + 8 + 4;
701     uint32_t written = 0;
702 
703     g_assert(opt.client);
704 
705     g_assert_cmpint(body_size, <=, UINT32_MAX - opt.count);
706     body_size += opt.count;
707     req = v9fs_req_init(opt.client, body_size, P9_TWRITE, opt.tag);
708     v9fs_uint32_write(req, opt.fid);
709     v9fs_uint64_write(req, opt.offset);
710     v9fs_uint32_write(req, opt.count);
711     v9fs_memwrite(req, opt.data, opt.count);
712     v9fs_req_send(req);
713 
714     if (!opt.requestOnly) {
715         v9fs_req_wait_for_reply(req, NULL);
716         if (opt.expectErr) {
717             v9fs_rlerror(req, &err);
718             g_assert_cmpint(err, ==, opt.expectErr);
719         } else {
720             v9fs_rwrite(req, &written);
721         }
722         req = NULL; /* request was freed */
723     }
724 
725     return (TWriteRes) {
726         .req = req,
727         .count = written
728     };
729 }
730 
731 /* size[4] Rwrite tag[2] count[4] */
732 void v9fs_rwrite(P9Req *req, uint32_t *count)
733 {
734     v9fs_req_recv(req, P9_RWRITE);
735     if (count) {
736         v9fs_uint32_read(req, count);
737     }
738     v9fs_req_free(req);
739 }
740 
741 /* size[4] Tflush tag[2] oldtag[2] */
742 TFlushRes v9fs_tflush(TFlushOpt opt)
743 {
744     P9Req *req;
745     uint32_t err;
746 
747     g_assert(opt.client);
748 
749     req = v9fs_req_init(opt.client, 2, P9_TFLUSH, opt.tag);
750     v9fs_uint32_write(req, opt.oldtag);
751     v9fs_req_send(req);
752 
753     if (!opt.requestOnly) {
754         v9fs_req_wait_for_reply(req, NULL);
755         if (opt.expectErr) {
756             v9fs_rlerror(req, &err);
757             g_assert_cmpint(err, ==, opt.expectErr);
758         } else {
759             v9fs_rflush(req);
760         }
761         req = NULL; /* request was freed */
762     }
763 
764     return (TFlushRes) { .req = req };
765 }
766 
767 /* size[4] Rflush tag[2] */
768 void v9fs_rflush(P9Req *req)
769 {
770     v9fs_req_recv(req, P9_RFLUSH);
771     v9fs_req_free(req);
772 }
773 
774 /* size[4] Tmkdir tag[2] dfid[4] name[s] mode[4] gid[4] */
775 TMkdirRes v9fs_tmkdir(TMkdirOpt opt)
776 {
777     P9Req *req;
778     uint32_t err;
779 
780     g_assert(opt.client);
781     /* expecting either hi-level atPath or low-level dfid, but not both */
782     g_assert(!opt.atPath || !opt.dfid);
783     /* expecting either Rmkdir or Rlerror, but obviously not both */
784     g_assert(!opt.expectErr || !opt.rmkdir.qid);
785 
786     if (opt.atPath) {
787         opt.dfid = v9fs_twalk((TWalkOpt) { .client = opt.client,
788                                            .path = opt.atPath }).newfid;
789     }
790 
791     if (!opt.mode) {
792         opt.mode = 0750;
793     }
794 
795     uint32_t body_size = 4 + 4 + 4;
796     uint16_t string_size = v9fs_string_size(opt.name);
797 
798     g_assert_cmpint(body_size, <=, UINT32_MAX - string_size);
799     body_size += string_size;
800 
801     req = v9fs_req_init(opt.client, body_size, P9_TMKDIR, opt.tag);
802     v9fs_uint32_write(req, opt.dfid);
803     v9fs_string_write(req, opt.name);
804     v9fs_uint32_write(req, opt.mode);
805     v9fs_uint32_write(req, opt.gid);
806     v9fs_req_send(req);
807 
808     if (!opt.requestOnly) {
809         v9fs_req_wait_for_reply(req, NULL);
810         if (opt.expectErr) {
811             v9fs_rlerror(req, &err);
812             g_assert_cmpint(err, ==, opt.expectErr);
813         } else {
814             v9fs_rmkdir(req, opt.rmkdir.qid);
815         }
816         req = NULL; /* request was freed */
817     }
818 
819     return (TMkdirRes) { .req = req };
820 }
821 
822 /* size[4] Rmkdir tag[2] qid[13] */
823 void v9fs_rmkdir(P9Req *req, v9fs_qid *qid)
824 {
825     v9fs_req_recv(req, P9_RMKDIR);
826     if (qid) {
827         v9fs_memread(req, qid, 13);
828     } else {
829         v9fs_memskip(req, 13);
830     }
831     v9fs_req_free(req);
832 }
833 
834 /* size[4] Tlcreate tag[2] fid[4] name[s] flags[4] mode[4] gid[4] */
835 TlcreateRes v9fs_tlcreate(TlcreateOpt opt)
836 {
837     P9Req *req;
838     uint32_t err;
839 
840     g_assert(opt.client);
841     /* expecting either hi-level atPath or low-level fid, but not both */
842     g_assert(!opt.atPath || !opt.fid);
843     /* expecting either Rlcreate or Rlerror, but obviously not both */
844     g_assert(!opt.expectErr || !(opt.rlcreate.qid || opt.rlcreate.iounit));
845 
846     if (opt.atPath) {
847         opt.fid = v9fs_twalk((TWalkOpt) { .client = opt.client,
848                                           .path = opt.atPath }).newfid;
849     }
850 
851     if (!opt.mode) {
852         opt.mode = 0750;
853     }
854 
855     uint32_t body_size = 4 + 4 + 4 + 4;
856     uint16_t string_size = v9fs_string_size(opt.name);
857 
858     g_assert_cmpint(body_size, <=, UINT32_MAX - string_size);
859     body_size += string_size;
860 
861     req = v9fs_req_init(opt.client, body_size, P9_TLCREATE, opt.tag);
862     v9fs_uint32_write(req, opt.fid);
863     v9fs_string_write(req, opt.name);
864     v9fs_uint32_write(req, opt.flags);
865     v9fs_uint32_write(req, opt.mode);
866     v9fs_uint32_write(req, opt.gid);
867     v9fs_req_send(req);
868 
869     if (!opt.requestOnly) {
870         v9fs_req_wait_for_reply(req, NULL);
871         if (opt.expectErr) {
872             v9fs_rlerror(req, &err);
873             g_assert_cmpint(err, ==, opt.expectErr);
874         } else {
875             v9fs_rlcreate(req, opt.rlcreate.qid, opt.rlcreate.iounit);
876         }
877         req = NULL; /* request was freed */
878     }
879 
880     return (TlcreateRes) { .req = req };
881 }
882 
883 /* size[4] Rlcreate tag[2] qid[13] iounit[4] */
884 void v9fs_rlcreate(P9Req *req, v9fs_qid *qid, uint32_t *iounit)
885 {
886     v9fs_req_recv(req, P9_RLCREATE);
887     if (qid) {
888         v9fs_memread(req, qid, 13);
889     } else {
890         v9fs_memskip(req, 13);
891     }
892     if (iounit) {
893         v9fs_uint32_read(req, iounit);
894     }
895     v9fs_req_free(req);
896 }
897 
898 /* size[4] Tsymlink tag[2] fid[4] name[s] symtgt[s] gid[4] */
899 TsymlinkRes v9fs_tsymlink(TsymlinkOpt opt)
900 {
901     P9Req *req;
902     uint32_t err;
903 
904     g_assert(opt.client);
905     /* expecting either hi-level atPath or low-level fid, but not both */
906     g_assert(!opt.atPath || !opt.fid);
907     /* expecting either Rsymlink or Rlerror, but obviously not both */
908     g_assert(!opt.expectErr || !opt.rsymlink.qid);
909 
910     if (opt.atPath) {
911         opt.fid = v9fs_twalk((TWalkOpt) { .client = opt.client,
912                                           .path = opt.atPath }).newfid;
913     }
914 
915     uint32_t body_size = 4 + 4;
916     uint16_t string_size = v9fs_string_size(opt.name) +
917                            v9fs_string_size(opt.symtgt);
918 
919     g_assert_cmpint(body_size, <=, UINT32_MAX - string_size);
920     body_size += string_size;
921 
922     req = v9fs_req_init(opt.client, body_size, P9_TSYMLINK, opt.tag);
923     v9fs_uint32_write(req, opt.fid);
924     v9fs_string_write(req, opt.name);
925     v9fs_string_write(req, opt.symtgt);
926     v9fs_uint32_write(req, opt.gid);
927     v9fs_req_send(req);
928 
929     if (!opt.requestOnly) {
930         v9fs_req_wait_for_reply(req, NULL);
931         if (opt.expectErr) {
932             v9fs_rlerror(req, &err);
933             g_assert_cmpint(err, ==, opt.expectErr);
934         } else {
935             v9fs_rsymlink(req, opt.rsymlink.qid);
936         }
937         req = NULL; /* request was freed */
938     }
939 
940     return (TsymlinkRes) { .req = req };
941 }
942 
943 /* size[4] Rsymlink tag[2] qid[13] */
944 void v9fs_rsymlink(P9Req *req, v9fs_qid *qid)
945 {
946     v9fs_req_recv(req, P9_RSYMLINK);
947     if (qid) {
948         v9fs_memread(req, qid, 13);
949     } else {
950         v9fs_memskip(req, 13);
951     }
952     v9fs_req_free(req);
953 }
954 
955 /* size[4] Tlink tag[2] dfid[4] fid[4] name[s] */
956 TlinkRes v9fs_tlink(TlinkOpt opt)
957 {
958     P9Req *req;
959     uint32_t err;
960 
961     g_assert(opt.client);
962     /* expecting either hi-level atPath or low-level dfid, but not both */
963     g_assert(!opt.atPath || !opt.dfid);
964     /* expecting either hi-level toPath or low-level fid, but not both */
965     g_assert(!opt.toPath || !opt.fid);
966 
967     if (opt.atPath) {
968         opt.dfid = v9fs_twalk((TWalkOpt) { .client = opt.client,
969                                            .path = opt.atPath }).newfid;
970     }
971     if (opt.toPath) {
972         opt.fid = v9fs_twalk((TWalkOpt) { .client = opt.client,
973                                           .path = opt.toPath }).newfid;
974     }
975 
976     uint32_t body_size = 4 + 4;
977     uint16_t string_size = v9fs_string_size(opt.name);
978 
979     g_assert_cmpint(body_size, <=, UINT32_MAX - string_size);
980     body_size += string_size;
981 
982     req = v9fs_req_init(opt.client, body_size, P9_TLINK, opt.tag);
983     v9fs_uint32_write(req, opt.dfid);
984     v9fs_uint32_write(req, opt.fid);
985     v9fs_string_write(req, opt.name);
986     v9fs_req_send(req);
987 
988     if (!opt.requestOnly) {
989         v9fs_req_wait_for_reply(req, NULL);
990         if (opt.expectErr) {
991             v9fs_rlerror(req, &err);
992             g_assert_cmpint(err, ==, opt.expectErr);
993         } else {
994             v9fs_rlink(req);
995         }
996         req = NULL; /* request was freed */
997     }
998 
999     return (TlinkRes) { .req = req };
1000 }
1001 
1002 /* size[4] Rlink tag[2] */
1003 void v9fs_rlink(P9Req *req)
1004 {
1005     v9fs_req_recv(req, P9_RLINK);
1006     v9fs_req_free(req);
1007 }
1008 
1009 /* size[4] Tunlinkat tag[2] dirfd[4] name[s] flags[4] */
1010 TunlinkatRes v9fs_tunlinkat(TunlinkatOpt opt)
1011 {
1012     P9Req *req;
1013     uint32_t err;
1014 
1015     g_assert(opt.client);
1016     /* expecting either hi-level atPath or low-level dirfd, but not both */
1017     g_assert(!opt.atPath || !opt.dirfd);
1018 
1019     if (opt.atPath) {
1020         opt.dirfd = v9fs_twalk((TWalkOpt) { .client = opt.client,
1021                                             .path = opt.atPath }).newfid;
1022     }
1023 
1024     uint32_t body_size = 4 + 4;
1025     uint16_t string_size = v9fs_string_size(opt.name);
1026 
1027     g_assert_cmpint(body_size, <=, UINT32_MAX - string_size);
1028     body_size += string_size;
1029 
1030     req = v9fs_req_init(opt.client, body_size, P9_TUNLINKAT, opt.tag);
1031     v9fs_uint32_write(req, opt.dirfd);
1032     v9fs_string_write(req, opt.name);
1033     v9fs_uint32_write(req, opt.flags);
1034     v9fs_req_send(req);
1035 
1036     if (!opt.requestOnly) {
1037         v9fs_req_wait_for_reply(req, NULL);
1038         if (opt.expectErr) {
1039             v9fs_rlerror(req, &err);
1040             g_assert_cmpint(err, ==, opt.expectErr);
1041         } else {
1042             v9fs_runlinkat(req);
1043         }
1044         req = NULL; /* request was freed */
1045     }
1046 
1047     return (TunlinkatRes) { .req = req };
1048 }
1049 
1050 /* size[4] Runlinkat tag[2] */
1051 void v9fs_runlinkat(P9Req *req)
1052 {
1053     v9fs_req_recv(req, P9_RUNLINKAT);
1054     v9fs_req_free(req);
1055 }
1056