xref: /openbmc/qemu/hw/9pfs/9p.c (revision 6c187695)
1 /*
2  * Virtio 9p backend
3  *
4  * Copyright IBM, Corp. 2010
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * Not so fast! You might want to read the 9p developer docs first:
16  * https://wiki.qemu.org/Documentation/9p
17  */
18 
19 #include "qemu/osdep.h"
20 #ifdef CONFIG_LINUX
21 #include <linux/limits.h>
22 #else
23 #include <limits.h>
24 #endif
25 #include <glib/gprintf.h>
26 #include "hw/virtio/virtio.h"
27 #include "qapi/error.h"
28 #include "qemu/error-report.h"
29 #include "qemu/iov.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/sockets.h"
32 #include "virtio-9p.h"
33 #include "fsdev/qemu-fsdev.h"
34 #include "9p-xattr.h"
35 #include "9p-util.h"
36 #include "coth.h"
37 #include "trace.h"
38 #include "migration/blocker.h"
39 #include "qemu/xxhash.h"
40 #include <math.h>
41 
42 int open_fd_hw;
43 int total_open_fd;
44 static int open_fd_rc;
45 
46 enum {
47     Oread   = 0x00,
48     Owrite  = 0x01,
49     Ordwr   = 0x02,
50     Oexec   = 0x03,
51     Oexcl   = 0x04,
52     Otrunc  = 0x10,
53     Orexec  = 0x20,
54     Orclose = 0x40,
55     Oappend = 0x80,
56 };
57 
58 P9ARRAY_DEFINE_TYPE(V9fsPath, v9fs_path_free);
59 
60 static ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
61 {
62     ssize_t ret;
63     va_list ap;
64 
65     va_start(ap, fmt);
66     ret = pdu->s->transport->pdu_vmarshal(pdu, offset, fmt, ap);
67     va_end(ap);
68 
69     return ret;
70 }
71 
72 static ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
73 {
74     ssize_t ret;
75     va_list ap;
76 
77     va_start(ap, fmt);
78     ret = pdu->s->transport->pdu_vunmarshal(pdu, offset, fmt, ap);
79     va_end(ap);
80 
81     return ret;
82 }
83 
84 static int omode_to_uflags(int8_t mode)
85 {
86     int ret = 0;
87 
88     switch (mode & 3) {
89     case Oread:
90         ret = O_RDONLY;
91         break;
92     case Ordwr:
93         ret = O_RDWR;
94         break;
95     case Owrite:
96         ret = O_WRONLY;
97         break;
98     case Oexec:
99         ret = O_RDONLY;
100         break;
101     }
102 
103     if (mode & Otrunc) {
104         ret |= O_TRUNC;
105     }
106 
107     if (mode & Oappend) {
108         ret |= O_APPEND;
109     }
110 
111     if (mode & Oexcl) {
112         ret |= O_EXCL;
113     }
114 
115     return ret;
116 }
117 
118 typedef struct DotlOpenflagMap {
119     int dotl_flag;
120     int open_flag;
121 } DotlOpenflagMap;
122 
123 static int dotl_to_open_flags(int flags)
124 {
125     int i;
126     /*
127      * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
128      * and P9_DOTL_NOACCESS
129      */
130     int oflags = flags & O_ACCMODE;
131 
132     DotlOpenflagMap dotl_oflag_map[] = {
133         { P9_DOTL_CREATE, O_CREAT },
134         { P9_DOTL_EXCL, O_EXCL },
135         { P9_DOTL_NOCTTY , O_NOCTTY },
136         { P9_DOTL_TRUNC, O_TRUNC },
137         { P9_DOTL_APPEND, O_APPEND },
138         { P9_DOTL_NONBLOCK, O_NONBLOCK } ,
139         { P9_DOTL_DSYNC, O_DSYNC },
140         { P9_DOTL_FASYNC, FASYNC },
141 #ifndef CONFIG_DARWIN
142         { P9_DOTL_NOATIME, O_NOATIME },
143         /*
144          *  On Darwin, we could map to F_NOCACHE, which is
145          *  similar, but doesn't quite have the same
146          *  semantics. However, we don't support O_DIRECT
147          *  even on linux at the moment, so we just ignore
148          *  it here.
149          */
150         { P9_DOTL_DIRECT, O_DIRECT },
151 #endif
152         { P9_DOTL_LARGEFILE, O_LARGEFILE },
153         { P9_DOTL_DIRECTORY, O_DIRECTORY },
154         { P9_DOTL_NOFOLLOW, O_NOFOLLOW },
155         { P9_DOTL_SYNC, O_SYNC },
156     };
157 
158     for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
159         if (flags & dotl_oflag_map[i].dotl_flag) {
160             oflags |= dotl_oflag_map[i].open_flag;
161         }
162     }
163 
164     return oflags;
165 }
166 
167 void cred_init(FsCred *credp)
168 {
169     credp->fc_uid = -1;
170     credp->fc_gid = -1;
171     credp->fc_mode = -1;
172     credp->fc_rdev = -1;
173 }
174 
175 static int get_dotl_openflags(V9fsState *s, int oflags)
176 {
177     int flags;
178     /*
179      * Filter the client open flags
180      */
181     flags = dotl_to_open_flags(oflags);
182     flags &= ~(O_NOCTTY | O_ASYNC | O_CREAT);
183 #ifndef CONFIG_DARWIN
184     /*
185      * Ignore direct disk access hint until the server supports it.
186      */
187     flags &= ~O_DIRECT;
188 #endif
189     return flags;
190 }
191 
192 void v9fs_path_init(V9fsPath *path)
193 {
194     path->data = NULL;
195     path->size = 0;
196 }
197 
198 void v9fs_path_free(V9fsPath *path)
199 {
200     g_free(path->data);
201     path->data = NULL;
202     path->size = 0;
203 }
204 
205 
206 void G_GNUC_PRINTF(2, 3)
207 v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...)
208 {
209     va_list ap;
210 
211     v9fs_path_free(path);
212 
213     va_start(ap, fmt);
214     /* Bump the size for including terminating NULL */
215     path->size = g_vasprintf(&path->data, fmt, ap) + 1;
216     va_end(ap);
217 }
218 
219 void v9fs_path_copy(V9fsPath *dst, const V9fsPath *src)
220 {
221     v9fs_path_free(dst);
222     dst->size = src->size;
223     dst->data = g_memdup(src->data, src->size);
224 }
225 
226 int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath,
227                       const char *name, V9fsPath *path)
228 {
229     int err;
230     err = s->ops->name_to_path(&s->ctx, dirpath, name, path);
231     if (err < 0) {
232         err = -errno;
233     }
234     return err;
235 }
236 
237 /*
238  * Return TRUE if s1 is an ancestor of s2.
239  *
240  * E.g. "a/b" is an ancestor of "a/b/c" but not of "a/bc/d".
241  * As a special case, We treat s1 as ancestor of s2 if they are same!
242  */
243 static int v9fs_path_is_ancestor(V9fsPath *s1, V9fsPath *s2)
244 {
245     if (!strncmp(s1->data, s2->data, s1->size - 1)) {
246         if (s2->data[s1->size - 1] == '\0' || s2->data[s1->size - 1] == '/') {
247             return 1;
248         }
249     }
250     return 0;
251 }
252 
253 static size_t v9fs_string_size(V9fsString *str)
254 {
255     return str->size;
256 }
257 
258 /*
259  * returns 0 if fid got re-opened, 1 if not, < 0 on error */
260 static int coroutine_fn v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f)
261 {
262     int err = 1;
263     if (f->fid_type == P9_FID_FILE) {
264         if (f->fs.fd == -1) {
265             do {
266                 err = v9fs_co_open(pdu, f, f->open_flags);
267             } while (err == -EINTR && !pdu->cancelled);
268         }
269     } else if (f->fid_type == P9_FID_DIR) {
270         if (f->fs.dir.stream == NULL) {
271             do {
272                 err = v9fs_co_opendir(pdu, f);
273             } while (err == -EINTR && !pdu->cancelled);
274         }
275     }
276     return err;
277 }
278 
279 static V9fsFidState *coroutine_fn get_fid(V9fsPDU *pdu, int32_t fid)
280 {
281     int err;
282     V9fsFidState *f;
283     V9fsState *s = pdu->s;
284 
285     QSIMPLEQ_FOREACH(f, &s->fid_list, next) {
286         BUG_ON(f->clunked);
287         if (f->fid == fid) {
288             /*
289              * Update the fid ref upfront so that
290              * we don't get reclaimed when we yield
291              * in open later.
292              */
293             f->ref++;
294             /*
295              * check whether we need to reopen the
296              * file. We might have closed the fd
297              * while trying to free up some file
298              * descriptors.
299              */
300             err = v9fs_reopen_fid(pdu, f);
301             if (err < 0) {
302                 f->ref--;
303                 return NULL;
304             }
305             /*
306              * Mark the fid as referenced so that the LRU
307              * reclaim won't close the file descriptor
308              */
309             f->flags |= FID_REFERENCED;
310             return f;
311         }
312     }
313     return NULL;
314 }
315 
316 static V9fsFidState *alloc_fid(V9fsState *s, int32_t fid)
317 {
318     V9fsFidState *f;
319 
320     QSIMPLEQ_FOREACH(f, &s->fid_list, next) {
321         /* If fid is already there return NULL */
322         BUG_ON(f->clunked);
323         if (f->fid == fid) {
324             return NULL;
325         }
326     }
327     f = g_new0(V9fsFidState, 1);
328     f->fid = fid;
329     f->fid_type = P9_FID_NONE;
330     f->ref = 1;
331     /*
332      * Mark the fid as referenced so that the LRU
333      * reclaim won't close the file descriptor
334      */
335     f->flags |= FID_REFERENCED;
336     QSIMPLEQ_INSERT_TAIL(&s->fid_list, f, next);
337 
338     v9fs_readdir_init(s->proto_version, &f->fs.dir);
339     v9fs_readdir_init(s->proto_version, &f->fs_reclaim.dir);
340 
341     return f;
342 }
343 
344 static int coroutine_fn v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp)
345 {
346     int retval = 0;
347 
348     if (fidp->fs.xattr.xattrwalk_fid) {
349         /* getxattr/listxattr fid */
350         goto free_value;
351     }
352     /*
353      * if this is fid for setxattr. clunk should
354      * result in setxattr localcall
355      */
356     if (fidp->fs.xattr.len != fidp->fs.xattr.copied_len) {
357         /* clunk after partial write */
358         retval = -EINVAL;
359         goto free_out;
360     }
361     if (fidp->fs.xattr.len) {
362         retval = v9fs_co_lsetxattr(pdu, &fidp->path, &fidp->fs.xattr.name,
363                                    fidp->fs.xattr.value,
364                                    fidp->fs.xattr.len,
365                                    fidp->fs.xattr.flags);
366     } else {
367         retval = v9fs_co_lremovexattr(pdu, &fidp->path, &fidp->fs.xattr.name);
368     }
369 free_out:
370     v9fs_string_free(&fidp->fs.xattr.name);
371 free_value:
372     g_free(fidp->fs.xattr.value);
373     return retval;
374 }
375 
376 static int coroutine_fn free_fid(V9fsPDU *pdu, V9fsFidState *fidp)
377 {
378     int retval = 0;
379 
380     if (fidp->fid_type == P9_FID_FILE) {
381         /* If we reclaimed the fd no need to close */
382         if (fidp->fs.fd != -1) {
383             retval = v9fs_co_close(pdu, &fidp->fs);
384         }
385     } else if (fidp->fid_type == P9_FID_DIR) {
386         if (fidp->fs.dir.stream != NULL) {
387             retval = v9fs_co_closedir(pdu, &fidp->fs);
388         }
389     } else if (fidp->fid_type == P9_FID_XATTR) {
390         retval = v9fs_xattr_fid_clunk(pdu, fidp);
391     }
392     v9fs_path_free(&fidp->path);
393     g_free(fidp);
394     return retval;
395 }
396 
397 static int coroutine_fn put_fid(V9fsPDU *pdu, V9fsFidState *fidp)
398 {
399     BUG_ON(!fidp->ref);
400     fidp->ref--;
401     /*
402      * Don't free the fid if it is in reclaim list
403      */
404     if (!fidp->ref && fidp->clunked) {
405         if (fidp->fid == pdu->s->root_fid) {
406             /*
407              * if the clunked fid is root fid then we
408              * have unmounted the fs on the client side.
409              * delete the migration blocker. Ideally, this
410              * should be hooked to transport close notification
411              */
412             if (pdu->s->migration_blocker) {
413                 migrate_del_blocker(pdu->s->migration_blocker);
414                 error_free(pdu->s->migration_blocker);
415                 pdu->s->migration_blocker = NULL;
416             }
417         }
418         return free_fid(pdu, fidp);
419     }
420     return 0;
421 }
422 
423 static V9fsFidState *clunk_fid(V9fsState *s, int32_t fid)
424 {
425     V9fsFidState *fidp;
426 
427     QSIMPLEQ_FOREACH(fidp, &s->fid_list, next) {
428         if (fidp->fid == fid) {
429             QSIMPLEQ_REMOVE(&s->fid_list, fidp, V9fsFidState, next);
430             fidp->clunked = true;
431             return fidp;
432         }
433     }
434     return NULL;
435 }
436 
437 void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu)
438 {
439     int reclaim_count = 0;
440     V9fsState *s = pdu->s;
441     V9fsFidState *f;
442     QSLIST_HEAD(, V9fsFidState) reclaim_list =
443         QSLIST_HEAD_INITIALIZER(reclaim_list);
444 
445     QSIMPLEQ_FOREACH(f, &s->fid_list, next) {
446         /*
447          * Unlink fids cannot be reclaimed. Check
448          * for them and skip them. Also skip fids
449          * currently being operated on.
450          */
451         if (f->ref || f->flags & FID_NON_RECLAIMABLE) {
452             continue;
453         }
454         /*
455          * if it is a recently referenced fid
456          * we leave the fid untouched and clear the
457          * reference bit. We come back to it later
458          * in the next iteration. (a simple LRU without
459          * moving list elements around)
460          */
461         if (f->flags & FID_REFERENCED) {
462             f->flags &= ~FID_REFERENCED;
463             continue;
464         }
465         /*
466          * Add fids to reclaim list.
467          */
468         if (f->fid_type == P9_FID_FILE) {
469             if (f->fs.fd != -1) {
470                 /*
471                  * Up the reference count so that
472                  * a clunk request won't free this fid
473                  */
474                 f->ref++;
475                 QSLIST_INSERT_HEAD(&reclaim_list, f, reclaim_next);
476                 f->fs_reclaim.fd = f->fs.fd;
477                 f->fs.fd = -1;
478                 reclaim_count++;
479             }
480         } else if (f->fid_type == P9_FID_DIR) {
481             if (f->fs.dir.stream != NULL) {
482                 /*
483                  * Up the reference count so that
484                  * a clunk request won't free this fid
485                  */
486                 f->ref++;
487                 QSLIST_INSERT_HEAD(&reclaim_list, f, reclaim_next);
488                 f->fs_reclaim.dir.stream = f->fs.dir.stream;
489                 f->fs.dir.stream = NULL;
490                 reclaim_count++;
491             }
492         }
493         if (reclaim_count >= open_fd_rc) {
494             break;
495         }
496     }
497     /*
498      * Now close the fid in reclaim list. Free them if they
499      * are already clunked.
500      */
501     while (!QSLIST_EMPTY(&reclaim_list)) {
502         f = QSLIST_FIRST(&reclaim_list);
503         QSLIST_REMOVE(&reclaim_list, f, V9fsFidState, reclaim_next);
504         if (f->fid_type == P9_FID_FILE) {
505             v9fs_co_close(pdu, &f->fs_reclaim);
506         } else if (f->fid_type == P9_FID_DIR) {
507             v9fs_co_closedir(pdu, &f->fs_reclaim);
508         }
509         /*
510          * Now drop the fid reference, free it
511          * if clunked.
512          */
513         put_fid(pdu, f);
514     }
515 }
516 
517 static int coroutine_fn v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
518 {
519     int err;
520     V9fsState *s = pdu->s;
521     V9fsFidState *fidp, *fidp_next;
522 
523     fidp = QSIMPLEQ_FIRST(&s->fid_list);
524     if (!fidp) {
525         return 0;
526     }
527 
528     /*
529      * v9fs_reopen_fid() can yield : a reference on the fid must be held
530      * to ensure its pointer remains valid and we can safely pass it to
531      * QSIMPLEQ_NEXT(). The corresponding put_fid() can also yield so
532      * we must keep a reference on the next fid as well. So the logic here
533      * is to get a reference on a fid and only put it back during the next
534      * iteration after we could get a reference on the next fid. Start with
535      * the first one.
536      */
537     for (fidp->ref++; fidp; fidp = fidp_next) {
538         if (fidp->path.size == path->size &&
539             !memcmp(fidp->path.data, path->data, path->size)) {
540             /* Mark the fid non reclaimable. */
541             fidp->flags |= FID_NON_RECLAIMABLE;
542 
543             /* reopen the file/dir if already closed */
544             err = v9fs_reopen_fid(pdu, fidp);
545             if (err < 0) {
546                 put_fid(pdu, fidp);
547                 return err;
548             }
549         }
550 
551         fidp_next = QSIMPLEQ_NEXT(fidp, next);
552 
553         if (fidp_next) {
554             /*
555              * Ensure the next fid survives a potential clunk request during
556              * put_fid() below and v9fs_reopen_fid() in the next iteration.
557              */
558             fidp_next->ref++;
559         }
560 
561         /* We're done with this fid */
562         put_fid(pdu, fidp);
563     }
564 
565     return 0;
566 }
567 
568 static void coroutine_fn virtfs_reset(V9fsPDU *pdu)
569 {
570     V9fsState *s = pdu->s;
571     V9fsFidState *fidp;
572 
573     /* Free all fids */
574     while (!QSIMPLEQ_EMPTY(&s->fid_list)) {
575         /* Get fid */
576         fidp = QSIMPLEQ_FIRST(&s->fid_list);
577         fidp->ref++;
578 
579         /* Clunk fid */
580         QSIMPLEQ_REMOVE(&s->fid_list, fidp, V9fsFidState, next);
581         fidp->clunked = true;
582 
583         put_fid(pdu, fidp);
584     }
585 }
586 
587 #define P9_QID_TYPE_DIR         0x80
588 #define P9_QID_TYPE_SYMLINK     0x02
589 
590 #define P9_STAT_MODE_DIR        0x80000000
591 #define P9_STAT_MODE_APPEND     0x40000000
592 #define P9_STAT_MODE_EXCL       0x20000000
593 #define P9_STAT_MODE_MOUNT      0x10000000
594 #define P9_STAT_MODE_AUTH       0x08000000
595 #define P9_STAT_MODE_TMP        0x04000000
596 #define P9_STAT_MODE_SYMLINK    0x02000000
597 #define P9_STAT_MODE_LINK       0x01000000
598 #define P9_STAT_MODE_DEVICE     0x00800000
599 #define P9_STAT_MODE_NAMED_PIPE 0x00200000
600 #define P9_STAT_MODE_SOCKET     0x00100000
601 #define P9_STAT_MODE_SETUID     0x00080000
602 #define P9_STAT_MODE_SETGID     0x00040000
603 #define P9_STAT_MODE_SETVTX     0x00010000
604 
605 #define P9_STAT_MODE_TYPE_BITS (P9_STAT_MODE_DIR |          \
606                                 P9_STAT_MODE_SYMLINK |      \
607                                 P9_STAT_MODE_LINK |         \
608                                 P9_STAT_MODE_DEVICE |       \
609                                 P9_STAT_MODE_NAMED_PIPE |   \
610                                 P9_STAT_MODE_SOCKET)
611 
612 /* Mirrors all bits of a byte. So e.g. binary 10100000 would become 00000101. */
613 static inline uint8_t mirror8bit(uint8_t byte)
614 {
615     return (byte * 0x0202020202ULL & 0x010884422010ULL) % 1023;
616 }
617 
618 /* Same as mirror8bit() just for a 64 bit data type instead for a byte. */
619 static inline uint64_t mirror64bit(uint64_t value)
620 {
621     return ((uint64_t)mirror8bit(value         & 0xff) << 56) |
622            ((uint64_t)mirror8bit((value >> 8)  & 0xff) << 48) |
623            ((uint64_t)mirror8bit((value >> 16) & 0xff) << 40) |
624            ((uint64_t)mirror8bit((value >> 24) & 0xff) << 32) |
625            ((uint64_t)mirror8bit((value >> 32) & 0xff) << 24) |
626            ((uint64_t)mirror8bit((value >> 40) & 0xff) << 16) |
627            ((uint64_t)mirror8bit((value >> 48) & 0xff) << 8)  |
628            ((uint64_t)mirror8bit((value >> 56) & 0xff));
629 }
630 
631 /*
632  * Parameter k for the Exponential Golomb algorihm to be used.
633  *
634  * The smaller this value, the smaller the minimum bit count for the Exp.
635  * Golomb generated affixes will be (at lowest index) however for the
636  * price of having higher maximum bit count of generated affixes (at highest
637  * index). Likewise increasing this parameter yields in smaller maximum bit
638  * count for the price of having higher minimum bit count.
639  *
640  * In practice that means: a good value for k depends on the expected amount
641  * of devices to be exposed by one export. For a small amount of devices k
642  * should be small, for a large amount of devices k might be increased
643  * instead. The default of k=0 should be fine for most users though.
644  *
645  * IMPORTANT: In case this ever becomes a runtime parameter; the value of
646  * k should not change as long as guest is still running! Because that would
647  * cause completely different inode numbers to be generated on guest.
648  */
649 #define EXP_GOLOMB_K    0
650 
651 /**
652  * expGolombEncode() - Exponential Golomb algorithm for arbitrary k
653  *                     (including k=0).
654  *
655  * @n: natural number (or index) of the prefix to be generated
656  *     (1, 2, 3, ...)
657  * @k: parameter k of Exp. Golomb algorithm to be used
658  *     (see comment on EXP_GOLOMB_K macro for details about k)
659  * Return: prefix for given @n and @k
660  *
661  * The Exponential Golomb algorithm generates prefixes (NOT suffixes!)
662  * with growing length and with the mathematical property of being
663  * "prefix-free". The latter means the generated prefixes can be prepended
664  * in front of arbitrary numbers and the resulting concatenated numbers are
665  * guaranteed to be always unique.
666  *
667  * This is a minor adjustment to the original Exp. Golomb algorithm in the
668  * sense that lowest allowed index (@n) starts with 1, not with zero.
669  */
670 static VariLenAffix expGolombEncode(uint64_t n, int k)
671 {
672     const uint64_t value = n + (1 << k) - 1;
673     const int bits = (int) log2(value) + 1;
674     return (VariLenAffix) {
675         .type = AffixType_Prefix,
676         .value = value,
677         .bits = bits + MAX((bits - 1 - k), 0)
678     };
679 }
680 
681 /**
682  * invertAffix() - Converts a suffix into a prefix, or a prefix into a suffix.
683  * @affix: either suffix or prefix to be inverted
684  * Return: inversion of passed @affix
685  *
686  * Simply mirror all bits of the affix value, for the purpose to preserve
687  * respectively the mathematical "prefix-free" or "suffix-free" property
688  * after the conversion.
689  *
690  * If a passed prefix is suitable to create unique numbers, then the
691  * returned suffix is suitable to create unique numbers as well (and vice
692  * versa).
693  */
694 static VariLenAffix invertAffix(const VariLenAffix *affix)
695 {
696     return (VariLenAffix) {
697         .type =
698             (affix->type == AffixType_Suffix) ?
699                 AffixType_Prefix : AffixType_Suffix,
700         .value =
701             mirror64bit(affix->value) >>
702             ((sizeof(affix->value) * 8) - affix->bits),
703         .bits = affix->bits
704     };
705 }
706 
707 /**
708  * affixForIndex() - Generates suffix numbers with "suffix-free" property.
709  * @index: natural number (or index) of the suffix to be generated
710  *         (1, 2, 3, ...)
711  * Return: Suffix suitable to assemble unique number.
712  *
713  * This is just a wrapper function on top of the Exp. Golomb algorithm.
714  *
715  * Since the Exp. Golomb algorithm generates prefixes, but we need suffixes,
716  * this function converts the Exp. Golomb prefixes into appropriate suffixes
717  * which are still suitable for generating unique numbers.
718  */
719 static VariLenAffix affixForIndex(uint64_t index)
720 {
721     VariLenAffix prefix;
722     prefix = expGolombEncode(index, EXP_GOLOMB_K);
723     return invertAffix(&prefix); /* convert prefix to suffix */
724 }
725 
726 /* creative abuse of tb_hash_func7, which is based on xxhash */
727 static uint32_t qpp_hash(QppEntry e)
728 {
729     return qemu_xxhash7(e.ino_prefix, e.dev, 0, 0, 0);
730 }
731 
732 static uint32_t qpf_hash(QpfEntry e)
733 {
734     return qemu_xxhash7(e.ino, e.dev, 0, 0, 0);
735 }
736 
737 static bool qpd_cmp_func(const void *obj, const void *userp)
738 {
739     const QpdEntry *e1 = obj, *e2 = userp;
740     return e1->dev == e2->dev;
741 }
742 
743 static bool qpp_cmp_func(const void *obj, const void *userp)
744 {
745     const QppEntry *e1 = obj, *e2 = userp;
746     return e1->dev == e2->dev && e1->ino_prefix == e2->ino_prefix;
747 }
748 
749 static bool qpf_cmp_func(const void *obj, const void *userp)
750 {
751     const QpfEntry *e1 = obj, *e2 = userp;
752     return e1->dev == e2->dev && e1->ino == e2->ino;
753 }
754 
755 static void qp_table_remove(void *p, uint32_t h, void *up)
756 {
757     g_free(p);
758 }
759 
760 static void qp_table_destroy(struct qht *ht)
761 {
762     if (!ht || !ht->map) {
763         return;
764     }
765     qht_iter(ht, qp_table_remove, NULL);
766     qht_destroy(ht);
767 }
768 
769 static void qpd_table_init(struct qht *ht)
770 {
771     qht_init(ht, qpd_cmp_func, 1, QHT_MODE_AUTO_RESIZE);
772 }
773 
774 static void qpp_table_init(struct qht *ht)
775 {
776     qht_init(ht, qpp_cmp_func, 1, QHT_MODE_AUTO_RESIZE);
777 }
778 
779 static void qpf_table_init(struct qht *ht)
780 {
781     qht_init(ht, qpf_cmp_func, 1 << 16, QHT_MODE_AUTO_RESIZE);
782 }
783 
784 /*
785  * Returns how many (high end) bits of inode numbers of the passed fs
786  * device shall be used (in combination with the device number) to
787  * generate hash values for qpp_table entries.
788  *
789  * This function is required if variable length suffixes are used for inode
790  * number mapping on guest level. Since a device may end up having multiple
791  * entries in qpp_table, each entry most probably with a different suffix
792  * length, we thus need this function in conjunction with qpd_table to
793  * "agree" about a fix amount of bits (per device) to be always used for
794  * generating hash values for the purpose of accessing qpp_table in order
795  * get consistent behaviour when accessing qpp_table.
796  */
797 static int qid_inode_prefix_hash_bits(V9fsPDU *pdu, dev_t dev)
798 {
799     QpdEntry lookup = {
800         .dev = dev
801     }, *val;
802     uint32_t hash = dev;
803     VariLenAffix affix;
804 
805     val = qht_lookup(&pdu->s->qpd_table, &lookup, hash);
806     if (!val) {
807         val = g_new0(QpdEntry, 1);
808         *val = lookup;
809         affix = affixForIndex(pdu->s->qp_affix_next);
810         val->prefix_bits = affix.bits;
811         qht_insert(&pdu->s->qpd_table, val, hash, NULL);
812         pdu->s->qp_ndevices++;
813     }
814     return val->prefix_bits;
815 }
816 
817 /*
818  * Slow / full mapping host inode nr -> guest inode nr.
819  *
820  * This function performs a slower and much more costly remapping of an
821  * original file inode number on host to an appropriate different inode
822  * number on guest. For every (dev, inode) combination on host a new
823  * sequential number is generated, cached and exposed as inode number on
824  * guest.
825  *
826  * This is just a "last resort" fallback solution if the much faster/cheaper
827  * qid_path_suffixmap() failed. In practice this slow / full mapping is not
828  * expected ever to be used at all though.
829  *
830  * See qid_path_suffixmap() for details
831  *
832  */
833 static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf,
834                             uint64_t *path)
835 {
836     QpfEntry lookup = {
837         .dev = stbuf->st_dev,
838         .ino = stbuf->st_ino
839     }, *val;
840     uint32_t hash = qpf_hash(lookup);
841     VariLenAffix affix;
842 
843     val = qht_lookup(&pdu->s->qpf_table, &lookup, hash);
844 
845     if (!val) {
846         if (pdu->s->qp_fullpath_next == 0) {
847             /* no more files can be mapped :'( */
848             error_report_once(
849                 "9p: No more prefixes available for remapping inodes from "
850                 "host to guest."
851             );
852             return -ENFILE;
853         }
854 
855         val = g_new0(QpfEntry, 1);
856         *val = lookup;
857 
858         /* new unique inode and device combo */
859         affix = affixForIndex(
860             1ULL << (sizeof(pdu->s->qp_affix_next) * 8)
861         );
862         val->path = (pdu->s->qp_fullpath_next++ << affix.bits) | affix.value;
863         pdu->s->qp_fullpath_next &= ((1ULL << (64 - affix.bits)) - 1);
864         qht_insert(&pdu->s->qpf_table, val, hash, NULL);
865     }
866 
867     *path = val->path;
868     return 0;
869 }
870 
871 /*
872  * Quick mapping host inode nr -> guest inode nr.
873  *
874  * This function performs quick remapping of an original file inode number
875  * on host to an appropriate different inode number on guest. This remapping
876  * of inodes is required to avoid inode nr collisions on guest which would
877  * happen if the 9p export contains more than 1 exported file system (or
878  * more than 1 file system data set), because unlike on host level where the
879  * files would have different device nrs, all files exported by 9p would
880  * share the same device nr on guest (the device nr of the virtual 9p device
881  * that is).
882  *
883  * Inode remapping is performed by chopping off high end bits of the original
884  * inode number from host, shifting the result upwards and then assigning a
885  * generated suffix number for the low end bits, where the same suffix number
886  * will be shared by all inodes with the same device id AND the same high end
887  * bits that have been chopped off. That approach utilizes the fact that inode
888  * numbers very likely share the same high end bits (i.e. due to their common
889  * sequential generation by file systems) and hence we only have to generate
890  * and track a very limited amount of suffixes in practice due to that.
891  *
892  * We generate variable size suffixes for that purpose. The 1st generated
893  * suffix will only have 1 bit and hence we only need to chop off 1 bit from
894  * the original inode number. The subsequent suffixes being generated will
895  * grow in (bit) size subsequently, i.e. the 2nd and 3rd suffix being
896  * generated will have 3 bits and hence we have to chop off 3 bits from their
897  * original inodes, and so on. That approach of using variable length suffixes
898  * (i.e. over fixed size ones) utilizes the fact that in practice only a very
899  * limited amount of devices are shared by the same export (e.g. typically
900  * less than 2 dozen devices per 9p export), so in practice we need to chop
901  * off less bits than with fixed size prefixes and yet are flexible to add
902  * new devices at runtime below host's export directory at any time without
903  * having to reboot guest nor requiring to reconfigure guest for that. And due
904  * to the very limited amount of original high end bits that we chop off that
905  * way, the total amount of suffixes we need to generate is less than by using
906  * fixed size prefixes and hence it also improves performance of the inode
907  * remapping algorithm, and finally has the nice side effect that the inode
908  * numbers on guest will be much smaller & human friendly. ;-)
909  */
910 static int qid_path_suffixmap(V9fsPDU *pdu, const struct stat *stbuf,
911                               uint64_t *path)
912 {
913     const int ino_hash_bits = qid_inode_prefix_hash_bits(pdu, stbuf->st_dev);
914     QppEntry lookup = {
915         .dev = stbuf->st_dev,
916         .ino_prefix = (uint16_t) (stbuf->st_ino >> (64 - ino_hash_bits))
917     }, *val;
918     uint32_t hash = qpp_hash(lookup);
919 
920     val = qht_lookup(&pdu->s->qpp_table, &lookup, hash);
921 
922     if (!val) {
923         if (pdu->s->qp_affix_next == 0) {
924             /* we ran out of affixes */
925             warn_report_once(
926                 "9p: Potential degraded performance of inode remapping"
927             );
928             return -ENFILE;
929         }
930 
931         val = g_new0(QppEntry, 1);
932         *val = lookup;
933 
934         /* new unique inode affix and device combo */
935         val->qp_affix_index = pdu->s->qp_affix_next++;
936         val->qp_affix = affixForIndex(val->qp_affix_index);
937         qht_insert(&pdu->s->qpp_table, val, hash, NULL);
938     }
939     /* assuming generated affix to be suffix type, not prefix */
940     *path = (stbuf->st_ino << val->qp_affix.bits) | val->qp_affix.value;
941     return 0;
942 }
943 
944 static int stat_to_qid(V9fsPDU *pdu, const struct stat *stbuf, V9fsQID *qidp)
945 {
946     int err;
947     size_t size;
948 
949     if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) {
950         /* map inode+device to qid path (fast path) */
951         err = qid_path_suffixmap(pdu, stbuf, &qidp->path);
952         if (err == -ENFILE) {
953             /* fast path didn't work, fall back to full map */
954             err = qid_path_fullmap(pdu, stbuf, &qidp->path);
955         }
956         if (err) {
957             return err;
958         }
959     } else {
960         if (pdu->s->dev_id != stbuf->st_dev) {
961             if (pdu->s->ctx.export_flags & V9FS_FORBID_MULTIDEVS) {
962                 error_report_once(
963                     "9p: Multiple devices detected in same VirtFS export. "
964                     "Access of guest to additional devices is (partly) "
965                     "denied due to virtfs option 'multidevs=forbid' being "
966                     "effective."
967                 );
968                 return -ENODEV;
969             } else {
970                 warn_report_once(
971                     "9p: Multiple devices detected in same VirtFS export, "
972                     "which might lead to file ID collisions and severe "
973                     "misbehaviours on guest! You should either use a "
974                     "separate export for each device shared from host or "
975                     "use virtfs option 'multidevs=remap'!"
976                 );
977             }
978         }
979         memset(&qidp->path, 0, sizeof(qidp->path));
980         size = MIN(sizeof(stbuf->st_ino), sizeof(qidp->path));
981         memcpy(&qidp->path, &stbuf->st_ino, size);
982     }
983 
984     qidp->version = stbuf->st_mtime ^ (stbuf->st_size << 8);
985     qidp->type = 0;
986     if (S_ISDIR(stbuf->st_mode)) {
987         qidp->type |= P9_QID_TYPE_DIR;
988     }
989     if (S_ISLNK(stbuf->st_mode)) {
990         qidp->type |= P9_QID_TYPE_SYMLINK;
991     }
992 
993     return 0;
994 }
995 
996 V9fsPDU *pdu_alloc(V9fsState *s)
997 {
998     V9fsPDU *pdu = NULL;
999 
1000     if (!QLIST_EMPTY(&s->free_list)) {
1001         pdu = QLIST_FIRST(&s->free_list);
1002         QLIST_REMOVE(pdu, next);
1003         QLIST_INSERT_HEAD(&s->active_list, pdu, next);
1004     }
1005     return pdu;
1006 }
1007 
1008 void pdu_free(V9fsPDU *pdu)
1009 {
1010     V9fsState *s = pdu->s;
1011 
1012     g_assert(!pdu->cancelled);
1013     QLIST_REMOVE(pdu, next);
1014     QLIST_INSERT_HEAD(&s->free_list, pdu, next);
1015 }
1016 
1017 static void coroutine_fn pdu_complete(V9fsPDU *pdu, ssize_t len)
1018 {
1019     int8_t id = pdu->id + 1; /* Response */
1020     V9fsState *s = pdu->s;
1021     int ret;
1022 
1023     /*
1024      * The 9p spec requires that successfully cancelled pdus receive no reply.
1025      * Sending a reply would confuse clients because they would
1026      * assume that any EINTR is the actual result of the operation,
1027      * rather than a consequence of the cancellation. However, if
1028      * the operation completed (succesfully or with an error other
1029      * than caused be cancellation), we do send out that reply, both
1030      * for efficiency and to avoid confusing the rest of the state machine
1031      * that assumes passing a non-error here will mean a successful
1032      * transmission of the reply.
1033      */
1034     bool discard = pdu->cancelled && len == -EINTR;
1035     if (discard) {
1036         trace_v9fs_rcancel(pdu->tag, pdu->id);
1037         pdu->size = 0;
1038         goto out_notify;
1039     }
1040 
1041     if (len < 0) {
1042         int err = -len;
1043         len = 7;
1044 
1045         if (s->proto_version != V9FS_PROTO_2000L) {
1046             V9fsString str;
1047 
1048             str.data = strerror(err);
1049             str.size = strlen(str.data);
1050 
1051             ret = pdu_marshal(pdu, len, "s", &str);
1052             if (ret < 0) {
1053                 goto out_notify;
1054             }
1055             len += ret;
1056             id = P9_RERROR;
1057         } else {
1058             err = errno_to_dotl(err);
1059         }
1060 
1061         ret = pdu_marshal(pdu, len, "d", err);
1062         if (ret < 0) {
1063             goto out_notify;
1064         }
1065         len += ret;
1066 
1067         if (s->proto_version == V9FS_PROTO_2000L) {
1068             id = P9_RLERROR;
1069         }
1070         trace_v9fs_rerror(pdu->tag, pdu->id, err); /* Trace ERROR */
1071     }
1072 
1073     /* fill out the header */
1074     if (pdu_marshal(pdu, 0, "dbw", (int32_t)len, id, pdu->tag) < 0) {
1075         goto out_notify;
1076     }
1077 
1078     /* keep these in sync */
1079     pdu->size = len;
1080     pdu->id = id;
1081 
1082 out_notify:
1083     pdu->s->transport->push_and_notify(pdu);
1084 
1085     /* Now wakeup anybody waiting in flush for this request */
1086     if (!qemu_co_queue_next(&pdu->complete)) {
1087         pdu_free(pdu);
1088     }
1089 }
1090 
1091 static mode_t v9mode_to_mode(uint32_t mode, V9fsString *extension)
1092 {
1093     mode_t ret;
1094 
1095     ret = mode & 0777;
1096     if (mode & P9_STAT_MODE_DIR) {
1097         ret |= S_IFDIR;
1098     }
1099 
1100     if (mode & P9_STAT_MODE_SYMLINK) {
1101         ret |= S_IFLNK;
1102     }
1103     if (mode & P9_STAT_MODE_SOCKET) {
1104         ret |= S_IFSOCK;
1105     }
1106     if (mode & P9_STAT_MODE_NAMED_PIPE) {
1107         ret |= S_IFIFO;
1108     }
1109     if (mode & P9_STAT_MODE_DEVICE) {
1110         if (extension->size && extension->data[0] == 'c') {
1111             ret |= S_IFCHR;
1112         } else {
1113             ret |= S_IFBLK;
1114         }
1115     }
1116 
1117     if (!(ret & ~0777)) {
1118         ret |= S_IFREG;
1119     }
1120 
1121     if (mode & P9_STAT_MODE_SETUID) {
1122         ret |= S_ISUID;
1123     }
1124     if (mode & P9_STAT_MODE_SETGID) {
1125         ret |= S_ISGID;
1126     }
1127     if (mode & P9_STAT_MODE_SETVTX) {
1128         ret |= S_ISVTX;
1129     }
1130 
1131     return ret;
1132 }
1133 
1134 static int donttouch_stat(V9fsStat *stat)
1135 {
1136     if (stat->type == -1 &&
1137         stat->dev == -1 &&
1138         stat->qid.type == 0xff &&
1139         stat->qid.version == (uint32_t) -1 &&
1140         stat->qid.path == (uint64_t) -1 &&
1141         stat->mode == -1 &&
1142         stat->atime == -1 &&
1143         stat->mtime == -1 &&
1144         stat->length == -1 &&
1145         !stat->name.size &&
1146         !stat->uid.size &&
1147         !stat->gid.size &&
1148         !stat->muid.size &&
1149         stat->n_uid == -1 &&
1150         stat->n_gid == -1 &&
1151         stat->n_muid == -1) {
1152         return 1;
1153     }
1154 
1155     return 0;
1156 }
1157 
1158 static void v9fs_stat_init(V9fsStat *stat)
1159 {
1160     v9fs_string_init(&stat->name);
1161     v9fs_string_init(&stat->uid);
1162     v9fs_string_init(&stat->gid);
1163     v9fs_string_init(&stat->muid);
1164     v9fs_string_init(&stat->extension);
1165 }
1166 
1167 static void v9fs_stat_free(V9fsStat *stat)
1168 {
1169     v9fs_string_free(&stat->name);
1170     v9fs_string_free(&stat->uid);
1171     v9fs_string_free(&stat->gid);
1172     v9fs_string_free(&stat->muid);
1173     v9fs_string_free(&stat->extension);
1174 }
1175 
1176 static uint32_t stat_to_v9mode(const struct stat *stbuf)
1177 {
1178     uint32_t mode;
1179 
1180     mode = stbuf->st_mode & 0777;
1181     if (S_ISDIR(stbuf->st_mode)) {
1182         mode |= P9_STAT_MODE_DIR;
1183     }
1184 
1185     if (S_ISLNK(stbuf->st_mode)) {
1186         mode |= P9_STAT_MODE_SYMLINK;
1187     }
1188 
1189     if (S_ISSOCK(stbuf->st_mode)) {
1190         mode |= P9_STAT_MODE_SOCKET;
1191     }
1192 
1193     if (S_ISFIFO(stbuf->st_mode)) {
1194         mode |= P9_STAT_MODE_NAMED_PIPE;
1195     }
1196 
1197     if (S_ISBLK(stbuf->st_mode) || S_ISCHR(stbuf->st_mode)) {
1198         mode |= P9_STAT_MODE_DEVICE;
1199     }
1200 
1201     if (stbuf->st_mode & S_ISUID) {
1202         mode |= P9_STAT_MODE_SETUID;
1203     }
1204 
1205     if (stbuf->st_mode & S_ISGID) {
1206         mode |= P9_STAT_MODE_SETGID;
1207     }
1208 
1209     if (stbuf->st_mode & S_ISVTX) {
1210         mode |= P9_STAT_MODE_SETVTX;
1211     }
1212 
1213     return mode;
1214 }
1215 
1216 static int coroutine_fn stat_to_v9stat(V9fsPDU *pdu, V9fsPath *path,
1217                                        const char *basename,
1218                                        const struct stat *stbuf,
1219                                        V9fsStat *v9stat)
1220 {
1221     int err;
1222 
1223     memset(v9stat, 0, sizeof(*v9stat));
1224 
1225     err = stat_to_qid(pdu, stbuf, &v9stat->qid);
1226     if (err < 0) {
1227         return err;
1228     }
1229     v9stat->mode = stat_to_v9mode(stbuf);
1230     v9stat->atime = stbuf->st_atime;
1231     v9stat->mtime = stbuf->st_mtime;
1232     v9stat->length = stbuf->st_size;
1233 
1234     v9fs_string_free(&v9stat->uid);
1235     v9fs_string_free(&v9stat->gid);
1236     v9fs_string_free(&v9stat->muid);
1237 
1238     v9stat->n_uid = stbuf->st_uid;
1239     v9stat->n_gid = stbuf->st_gid;
1240     v9stat->n_muid = 0;
1241 
1242     v9fs_string_free(&v9stat->extension);
1243 
1244     if (v9stat->mode & P9_STAT_MODE_SYMLINK) {
1245         err = v9fs_co_readlink(pdu, path, &v9stat->extension);
1246         if (err < 0) {
1247             return err;
1248         }
1249     } else if (v9stat->mode & P9_STAT_MODE_DEVICE) {
1250         v9fs_string_sprintf(&v9stat->extension, "%c %u %u",
1251                 S_ISCHR(stbuf->st_mode) ? 'c' : 'b',
1252                 major(stbuf->st_rdev), minor(stbuf->st_rdev));
1253     } else if (S_ISDIR(stbuf->st_mode) || S_ISREG(stbuf->st_mode)) {
1254         v9fs_string_sprintf(&v9stat->extension, "%s %lu",
1255                 "HARDLINKCOUNT", (unsigned long)stbuf->st_nlink);
1256     }
1257 
1258     v9fs_string_sprintf(&v9stat->name, "%s", basename);
1259 
1260     v9stat->size = 61 +
1261         v9fs_string_size(&v9stat->name) +
1262         v9fs_string_size(&v9stat->uid) +
1263         v9fs_string_size(&v9stat->gid) +
1264         v9fs_string_size(&v9stat->muid) +
1265         v9fs_string_size(&v9stat->extension);
1266     return 0;
1267 }
1268 
1269 #define P9_STATS_MODE          0x00000001ULL
1270 #define P9_STATS_NLINK         0x00000002ULL
1271 #define P9_STATS_UID           0x00000004ULL
1272 #define P9_STATS_GID           0x00000008ULL
1273 #define P9_STATS_RDEV          0x00000010ULL
1274 #define P9_STATS_ATIME         0x00000020ULL
1275 #define P9_STATS_MTIME         0x00000040ULL
1276 #define P9_STATS_CTIME         0x00000080ULL
1277 #define P9_STATS_INO           0x00000100ULL
1278 #define P9_STATS_SIZE          0x00000200ULL
1279 #define P9_STATS_BLOCKS        0x00000400ULL
1280 
1281 #define P9_STATS_BTIME         0x00000800ULL
1282 #define P9_STATS_GEN           0x00001000ULL
1283 #define P9_STATS_DATA_VERSION  0x00002000ULL
1284 
1285 #define P9_STATS_BASIC         0x000007ffULL /* Mask for fields up to BLOCKS */
1286 #define P9_STATS_ALL           0x00003fffULL /* Mask for All fields above */
1287 
1288 
1289 /**
1290  * blksize_to_iounit() - Block size exposed to 9p client.
1291  * Return: block size
1292  *
1293  * @pdu: 9p client request
1294  * @blksize: host filesystem's block size
1295  *
1296  * Convert host filesystem's block size into an appropriate block size for
1297  * 9p client (guest OS side). The value returned suggests an "optimum" block
1298  * size for 9p I/O, i.e. to maximize performance.
1299  */
1300 static int32_t blksize_to_iounit(const V9fsPDU *pdu, int32_t blksize)
1301 {
1302     int32_t iounit = 0;
1303     V9fsState *s = pdu->s;
1304 
1305     /*
1306      * iounit should be multiples of blksize (host filesystem block size)
1307      * as well as less than (client msize - P9_IOHDRSZ)
1308      */
1309     if (blksize) {
1310         iounit = QEMU_ALIGN_DOWN(s->msize - P9_IOHDRSZ, blksize);
1311     }
1312     if (!iounit) {
1313         iounit = s->msize - P9_IOHDRSZ;
1314     }
1315     return iounit;
1316 }
1317 
1318 static int32_t stat_to_iounit(const V9fsPDU *pdu, const struct stat *stbuf)
1319 {
1320     return blksize_to_iounit(pdu, stbuf->st_blksize);
1321 }
1322 
1323 static int stat_to_v9stat_dotl(V9fsPDU *pdu, const struct stat *stbuf,
1324                                 V9fsStatDotl *v9lstat)
1325 {
1326     memset(v9lstat, 0, sizeof(*v9lstat));
1327 
1328     v9lstat->st_mode = stbuf->st_mode;
1329     v9lstat->st_nlink = stbuf->st_nlink;
1330     v9lstat->st_uid = stbuf->st_uid;
1331     v9lstat->st_gid = stbuf->st_gid;
1332     v9lstat->st_rdev = host_dev_to_dotl_dev(stbuf->st_rdev);
1333     v9lstat->st_size = stbuf->st_size;
1334     v9lstat->st_blksize = stat_to_iounit(pdu, stbuf);
1335     v9lstat->st_blocks = stbuf->st_blocks;
1336     v9lstat->st_atime_sec = stbuf->st_atime;
1337     v9lstat->st_mtime_sec = stbuf->st_mtime;
1338     v9lstat->st_ctime_sec = stbuf->st_ctime;
1339 #ifdef CONFIG_DARWIN
1340     v9lstat->st_atime_nsec = stbuf->st_atimespec.tv_nsec;
1341     v9lstat->st_mtime_nsec = stbuf->st_mtimespec.tv_nsec;
1342     v9lstat->st_ctime_nsec = stbuf->st_ctimespec.tv_nsec;
1343 #else
1344     v9lstat->st_atime_nsec = stbuf->st_atim.tv_nsec;
1345     v9lstat->st_mtime_nsec = stbuf->st_mtim.tv_nsec;
1346     v9lstat->st_ctime_nsec = stbuf->st_ctim.tv_nsec;
1347 #endif
1348     /* Currently we only support BASIC fields in stat */
1349     v9lstat->st_result_mask = P9_STATS_BASIC;
1350 
1351     return stat_to_qid(pdu, stbuf, &v9lstat->qid);
1352 }
1353 
1354 static void print_sg(struct iovec *sg, int cnt)
1355 {
1356     int i;
1357 
1358     printf("sg[%d]: {", cnt);
1359     for (i = 0; i < cnt; i++) {
1360         if (i) {
1361             printf(", ");
1362         }
1363         printf("(%p, %zd)", sg[i].iov_base, sg[i].iov_len);
1364     }
1365     printf("}\n");
1366 }
1367 
1368 /* Will call this only for path name based fid */
1369 static void v9fs_fix_path(V9fsPath *dst, V9fsPath *src, int len)
1370 {
1371     V9fsPath str;
1372     v9fs_path_init(&str);
1373     v9fs_path_copy(&str, dst);
1374     v9fs_path_sprintf(dst, "%s%s", src->data, str.data + len);
1375     v9fs_path_free(&str);
1376 }
1377 
1378 static inline bool is_ro_export(FsContext *ctx)
1379 {
1380     return ctx->export_flags & V9FS_RDONLY;
1381 }
1382 
1383 static void coroutine_fn v9fs_version(void *opaque)
1384 {
1385     ssize_t err;
1386     V9fsPDU *pdu = opaque;
1387     V9fsState *s = pdu->s;
1388     V9fsString version;
1389     size_t offset = 7;
1390 
1391     v9fs_string_init(&version);
1392     err = pdu_unmarshal(pdu, offset, "ds", &s->msize, &version);
1393     if (err < 0) {
1394         goto out;
1395     }
1396     trace_v9fs_version(pdu->tag, pdu->id, s->msize, version.data);
1397 
1398     virtfs_reset(pdu);
1399 
1400     if (!strcmp(version.data, "9P2000.u")) {
1401         s->proto_version = V9FS_PROTO_2000U;
1402     } else if (!strcmp(version.data, "9P2000.L")) {
1403         s->proto_version = V9FS_PROTO_2000L;
1404     } else {
1405         v9fs_string_sprintf(&version, "unknown");
1406         /* skip min. msize check, reporting invalid version has priority */
1407         goto marshal;
1408     }
1409 
1410     if (s->msize < P9_MIN_MSIZE) {
1411         err = -EMSGSIZE;
1412         error_report(
1413             "9pfs: Client requested msize < minimum msize ("
1414             stringify(P9_MIN_MSIZE) ") supported by this server."
1415         );
1416         goto out;
1417     }
1418 
1419     /* 8192 is the default msize of Linux clients */
1420     if (s->msize <= 8192 && !(s->ctx.export_flags & V9FS_NO_PERF_WARN)) {
1421         warn_report_once(
1422             "9p: degraded performance: a reasonable high msize should be "
1423             "chosen on client/guest side (chosen msize is <= 8192). See "
1424             "https://wiki.qemu.org/Documentation/9psetup#msize for details."
1425         );
1426     }
1427 
1428 marshal:
1429     err = pdu_marshal(pdu, offset, "ds", s->msize, &version);
1430     if (err < 0) {
1431         goto out;
1432     }
1433     err += offset;
1434     trace_v9fs_version_return(pdu->tag, pdu->id, s->msize, version.data);
1435 out:
1436     pdu_complete(pdu, err);
1437     v9fs_string_free(&version);
1438 }
1439 
1440 static void coroutine_fn v9fs_attach(void *opaque)
1441 {
1442     V9fsPDU *pdu = opaque;
1443     V9fsState *s = pdu->s;
1444     int32_t fid, afid, n_uname;
1445     V9fsString uname, aname;
1446     V9fsFidState *fidp;
1447     size_t offset = 7;
1448     V9fsQID qid;
1449     ssize_t err;
1450     struct stat stbuf;
1451 
1452     v9fs_string_init(&uname);
1453     v9fs_string_init(&aname);
1454     err = pdu_unmarshal(pdu, offset, "ddssd", &fid,
1455                         &afid, &uname, &aname, &n_uname);
1456     if (err < 0) {
1457         goto out_nofid;
1458     }
1459     trace_v9fs_attach(pdu->tag, pdu->id, fid, afid, uname.data, aname.data);
1460 
1461     fidp = alloc_fid(s, fid);
1462     if (fidp == NULL) {
1463         err = -EINVAL;
1464         goto out_nofid;
1465     }
1466     fidp->uid = n_uname;
1467     err = v9fs_co_name_to_path(pdu, NULL, "/", &fidp->path);
1468     if (err < 0) {
1469         err = -EINVAL;
1470         clunk_fid(s, fid);
1471         goto out;
1472     }
1473     err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1474     if (err < 0) {
1475         err = -EINVAL;
1476         clunk_fid(s, fid);
1477         goto out;
1478     }
1479     err = stat_to_qid(pdu, &stbuf, &qid);
1480     if (err < 0) {
1481         err = -EINVAL;
1482         clunk_fid(s, fid);
1483         goto out;
1484     }
1485 
1486     /*
1487      * disable migration if we haven't done already.
1488      * attach could get called multiple times for the same export.
1489      */
1490     if (!s->migration_blocker) {
1491         error_setg(&s->migration_blocker,
1492                    "Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'",
1493                    s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag);
1494         err = migrate_add_blocker(s->migration_blocker, NULL);
1495         if (err < 0) {
1496             error_free(s->migration_blocker);
1497             s->migration_blocker = NULL;
1498             clunk_fid(s, fid);
1499             goto out;
1500         }
1501         s->root_fid = fid;
1502     }
1503 
1504     err = pdu_marshal(pdu, offset, "Q", &qid);
1505     if (err < 0) {
1506         clunk_fid(s, fid);
1507         goto out;
1508     }
1509     err += offset;
1510 
1511     memcpy(&s->root_st, &stbuf, sizeof(stbuf));
1512     trace_v9fs_attach_return(pdu->tag, pdu->id,
1513                              qid.type, qid.version, qid.path);
1514 out:
1515     put_fid(pdu, fidp);
1516 out_nofid:
1517     pdu_complete(pdu, err);
1518     v9fs_string_free(&uname);
1519     v9fs_string_free(&aname);
1520 }
1521 
1522 static void coroutine_fn v9fs_stat(void *opaque)
1523 {
1524     int32_t fid;
1525     V9fsStat v9stat;
1526     ssize_t err = 0;
1527     size_t offset = 7;
1528     struct stat stbuf;
1529     V9fsFidState *fidp;
1530     V9fsPDU *pdu = opaque;
1531     char *basename;
1532 
1533     err = pdu_unmarshal(pdu, offset, "d", &fid);
1534     if (err < 0) {
1535         goto out_nofid;
1536     }
1537     trace_v9fs_stat(pdu->tag, pdu->id, fid);
1538 
1539     fidp = get_fid(pdu, fid);
1540     if (fidp == NULL) {
1541         err = -ENOENT;
1542         goto out_nofid;
1543     }
1544     err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1545     if (err < 0) {
1546         goto out;
1547     }
1548     basename = g_path_get_basename(fidp->path.data);
1549     err = stat_to_v9stat(pdu, &fidp->path, basename, &stbuf, &v9stat);
1550     g_free(basename);
1551     if (err < 0) {
1552         goto out;
1553     }
1554     err = pdu_marshal(pdu, offset, "wS", 0, &v9stat);
1555     if (err < 0) {
1556         v9fs_stat_free(&v9stat);
1557         goto out;
1558     }
1559     trace_v9fs_stat_return(pdu->tag, pdu->id, v9stat.mode,
1560                            v9stat.atime, v9stat.mtime, v9stat.length);
1561     err += offset;
1562     v9fs_stat_free(&v9stat);
1563 out:
1564     put_fid(pdu, fidp);
1565 out_nofid:
1566     pdu_complete(pdu, err);
1567 }
1568 
1569 static void coroutine_fn v9fs_getattr(void *opaque)
1570 {
1571     int32_t fid;
1572     size_t offset = 7;
1573     ssize_t retval = 0;
1574     struct stat stbuf;
1575     V9fsFidState *fidp;
1576     uint64_t request_mask;
1577     V9fsStatDotl v9stat_dotl;
1578     V9fsPDU *pdu = opaque;
1579 
1580     retval = pdu_unmarshal(pdu, offset, "dq", &fid, &request_mask);
1581     if (retval < 0) {
1582         goto out_nofid;
1583     }
1584     trace_v9fs_getattr(pdu->tag, pdu->id, fid, request_mask);
1585 
1586     fidp = get_fid(pdu, fid);
1587     if (fidp == NULL) {
1588         retval = -ENOENT;
1589         goto out_nofid;
1590     }
1591     /*
1592      * Currently we only support BASIC fields in stat, so there is no
1593      * need to look at request_mask.
1594      */
1595     retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1596     if (retval < 0) {
1597         goto out;
1598     }
1599     retval = stat_to_v9stat_dotl(pdu, &stbuf, &v9stat_dotl);
1600     if (retval < 0) {
1601         goto out;
1602     }
1603 
1604     /*  fill st_gen if requested and supported by underlying fs */
1605     if (request_mask & P9_STATS_GEN) {
1606         retval = v9fs_co_st_gen(pdu, &fidp->path, stbuf.st_mode, &v9stat_dotl);
1607         switch (retval) {
1608         case 0:
1609             /* we have valid st_gen: update result mask */
1610             v9stat_dotl.st_result_mask |= P9_STATS_GEN;
1611             break;
1612         case -EINTR:
1613             /* request cancelled, e.g. by Tflush */
1614             goto out;
1615         default:
1616             /* failed to get st_gen: not fatal, ignore */
1617             break;
1618         }
1619     }
1620     retval = pdu_marshal(pdu, offset, "A", &v9stat_dotl);
1621     if (retval < 0) {
1622         goto out;
1623     }
1624     retval += offset;
1625     trace_v9fs_getattr_return(pdu->tag, pdu->id, v9stat_dotl.st_result_mask,
1626                               v9stat_dotl.st_mode, v9stat_dotl.st_uid,
1627                               v9stat_dotl.st_gid);
1628 out:
1629     put_fid(pdu, fidp);
1630 out_nofid:
1631     pdu_complete(pdu, retval);
1632 }
1633 
1634 /* Attribute flags */
1635 #define P9_ATTR_MODE       (1 << 0)
1636 #define P9_ATTR_UID        (1 << 1)
1637 #define P9_ATTR_GID        (1 << 2)
1638 #define P9_ATTR_SIZE       (1 << 3)
1639 #define P9_ATTR_ATIME      (1 << 4)
1640 #define P9_ATTR_MTIME      (1 << 5)
1641 #define P9_ATTR_CTIME      (1 << 6)
1642 #define P9_ATTR_ATIME_SET  (1 << 7)
1643 #define P9_ATTR_MTIME_SET  (1 << 8)
1644 
1645 #define P9_ATTR_MASK    127
1646 
1647 static void coroutine_fn v9fs_setattr(void *opaque)
1648 {
1649     int err = 0;
1650     int32_t fid;
1651     V9fsFidState *fidp;
1652     size_t offset = 7;
1653     V9fsIattr v9iattr;
1654     V9fsPDU *pdu = opaque;
1655 
1656     err = pdu_unmarshal(pdu, offset, "dI", &fid, &v9iattr);
1657     if (err < 0) {
1658         goto out_nofid;
1659     }
1660 
1661     trace_v9fs_setattr(pdu->tag, pdu->id, fid,
1662                        v9iattr.valid, v9iattr.mode, v9iattr.uid, v9iattr.gid,
1663                        v9iattr.size, v9iattr.atime_sec, v9iattr.mtime_sec);
1664 
1665     fidp = get_fid(pdu, fid);
1666     if (fidp == NULL) {
1667         err = -EINVAL;
1668         goto out_nofid;
1669     }
1670     if (v9iattr.valid & P9_ATTR_MODE) {
1671         err = v9fs_co_chmod(pdu, &fidp->path, v9iattr.mode);
1672         if (err < 0) {
1673             goto out;
1674         }
1675     }
1676     if (v9iattr.valid & (P9_ATTR_ATIME | P9_ATTR_MTIME)) {
1677         struct timespec times[2];
1678         if (v9iattr.valid & P9_ATTR_ATIME) {
1679             if (v9iattr.valid & P9_ATTR_ATIME_SET) {
1680                 times[0].tv_sec = v9iattr.atime_sec;
1681                 times[0].tv_nsec = v9iattr.atime_nsec;
1682             } else {
1683                 times[0].tv_nsec = UTIME_NOW;
1684             }
1685         } else {
1686             times[0].tv_nsec = UTIME_OMIT;
1687         }
1688         if (v9iattr.valid & P9_ATTR_MTIME) {
1689             if (v9iattr.valid & P9_ATTR_MTIME_SET) {
1690                 times[1].tv_sec = v9iattr.mtime_sec;
1691                 times[1].tv_nsec = v9iattr.mtime_nsec;
1692             } else {
1693                 times[1].tv_nsec = UTIME_NOW;
1694             }
1695         } else {
1696             times[1].tv_nsec = UTIME_OMIT;
1697         }
1698         err = v9fs_co_utimensat(pdu, &fidp->path, times);
1699         if (err < 0) {
1700             goto out;
1701         }
1702     }
1703     /*
1704      * If the only valid entry in iattr is ctime we can call
1705      * chown(-1,-1) to update the ctime of the file
1706      */
1707     if ((v9iattr.valid & (P9_ATTR_UID | P9_ATTR_GID)) ||
1708         ((v9iattr.valid & P9_ATTR_CTIME)
1709          && !((v9iattr.valid & P9_ATTR_MASK) & ~P9_ATTR_CTIME))) {
1710         if (!(v9iattr.valid & P9_ATTR_UID)) {
1711             v9iattr.uid = -1;
1712         }
1713         if (!(v9iattr.valid & P9_ATTR_GID)) {
1714             v9iattr.gid = -1;
1715         }
1716         err = v9fs_co_chown(pdu, &fidp->path, v9iattr.uid,
1717                             v9iattr.gid);
1718         if (err < 0) {
1719             goto out;
1720         }
1721     }
1722     if (v9iattr.valid & (P9_ATTR_SIZE)) {
1723         err = v9fs_co_truncate(pdu, &fidp->path, v9iattr.size);
1724         if (err < 0) {
1725             goto out;
1726         }
1727     }
1728     err = offset;
1729     trace_v9fs_setattr_return(pdu->tag, pdu->id);
1730 out:
1731     put_fid(pdu, fidp);
1732 out_nofid:
1733     pdu_complete(pdu, err);
1734 }
1735 
1736 static int v9fs_walk_marshal(V9fsPDU *pdu, uint16_t nwnames, V9fsQID *qids)
1737 {
1738     int i;
1739     ssize_t err;
1740     size_t offset = 7;
1741 
1742     err = pdu_marshal(pdu, offset, "w", nwnames);
1743     if (err < 0) {
1744         return err;
1745     }
1746     offset += err;
1747     for (i = 0; i < nwnames; i++) {
1748         err = pdu_marshal(pdu, offset, "Q", &qids[i]);
1749         if (err < 0) {
1750             return err;
1751         }
1752         offset += err;
1753     }
1754     return offset;
1755 }
1756 
1757 static bool name_is_illegal(const char *name)
1758 {
1759     return !*name || strchr(name, '/') != NULL;
1760 }
1761 
1762 static bool same_stat_id(const struct stat *a, const struct stat *b)
1763 {
1764     return a->st_dev == b->st_dev && a->st_ino == b->st_ino;
1765 }
1766 
1767 static void coroutine_fn v9fs_walk(void *opaque)
1768 {
1769     int name_idx, nwalked;
1770     g_autofree V9fsQID *qids = NULL;
1771     int i, err = 0, any_err = 0;
1772     V9fsPath dpath, path;
1773     P9ARRAY_REF(V9fsPath) pathes = NULL;
1774     uint16_t nwnames;
1775     struct stat stbuf, fidst;
1776     g_autofree struct stat *stbufs = NULL;
1777     size_t offset = 7;
1778     int32_t fid, newfid;
1779     P9ARRAY_REF(V9fsString) wnames = NULL;
1780     V9fsFidState *fidp;
1781     V9fsFidState *newfidp = NULL;
1782     V9fsPDU *pdu = opaque;
1783     V9fsState *s = pdu->s;
1784     V9fsQID qid;
1785 
1786     err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames);
1787     if (err < 0) {
1788         pdu_complete(pdu, err);
1789         return ;
1790     }
1791     offset += err;
1792 
1793     trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames);
1794 
1795     if (nwnames > P9_MAXWELEM) {
1796         err = -EINVAL;
1797         goto out_nofid;
1798     }
1799     if (nwnames) {
1800         P9ARRAY_NEW(V9fsString, wnames, nwnames);
1801         qids   = g_new0(V9fsQID, nwnames);
1802         stbufs = g_new0(struct stat, nwnames);
1803         P9ARRAY_NEW(V9fsPath, pathes, nwnames);
1804         for (i = 0; i < nwnames; i++) {
1805             err = pdu_unmarshal(pdu, offset, "s", &wnames[i]);
1806             if (err < 0) {
1807                 goto out_nofid;
1808             }
1809             if (name_is_illegal(wnames[i].data)) {
1810                 err = -ENOENT;
1811                 goto out_nofid;
1812             }
1813             offset += err;
1814         }
1815     }
1816     fidp = get_fid(pdu, fid);
1817     if (fidp == NULL) {
1818         err = -ENOENT;
1819         goto out_nofid;
1820     }
1821 
1822     v9fs_path_init(&dpath);
1823     v9fs_path_init(&path);
1824     /*
1825      * Both dpath and path initially point to fidp.
1826      * Needed to handle request with nwnames == 0
1827      */
1828     v9fs_path_copy(&dpath, &fidp->path);
1829     v9fs_path_copy(&path, &fidp->path);
1830 
1831     /*
1832      * To keep latency (i.e. overall execution time for processing this
1833      * Twalk client request) as small as possible, run all the required fs
1834      * driver code altogether inside the following block.
1835      */
1836     v9fs_co_run_in_worker({
1837         nwalked = 0;
1838         if (v9fs_request_cancelled(pdu)) {
1839             any_err |= err = -EINTR;
1840             break;
1841         }
1842         err = s->ops->lstat(&s->ctx, &dpath, &fidst);
1843         if (err < 0) {
1844             any_err |= err = -errno;
1845             break;
1846         }
1847         stbuf = fidst;
1848         for (; nwalked < nwnames; nwalked++) {
1849             if (v9fs_request_cancelled(pdu)) {
1850                 any_err |= err = -EINTR;
1851                 break;
1852             }
1853             if (!same_stat_id(&pdu->s->root_st, &stbuf) ||
1854                 strcmp("..", wnames[nwalked].data))
1855             {
1856                 err = s->ops->name_to_path(&s->ctx, &dpath,
1857                                            wnames[nwalked].data,
1858                                            &pathes[nwalked]);
1859                 if (err < 0) {
1860                     any_err |= err = -errno;
1861                     break;
1862                 }
1863                 if (v9fs_request_cancelled(pdu)) {
1864                     any_err |= err = -EINTR;
1865                     break;
1866                 }
1867                 err = s->ops->lstat(&s->ctx, &pathes[nwalked], &stbuf);
1868                 if (err < 0) {
1869                     any_err |= err = -errno;
1870                     break;
1871                 }
1872                 stbufs[nwalked] = stbuf;
1873                 v9fs_path_copy(&dpath, &pathes[nwalked]);
1874             }
1875         }
1876     });
1877     /*
1878      * Handle all the rest of this Twalk request on main thread ...
1879      *
1880      * NOTE: -EINTR is an exception where we deviate from the protocol spec
1881      * and simply send a (R)Lerror response instead of bothering to assemble
1882      * a (deducted) Rwalk response; because -EINTR is always the result of a
1883      * Tflush request, so client would no longer wait for a response in this
1884      * case anyway.
1885      */
1886     if ((err < 0 && !nwalked) || err == -EINTR) {
1887         goto out;
1888     }
1889 
1890     any_err |= err = stat_to_qid(pdu, &fidst, &qid);
1891     if (err < 0 && !nwalked) {
1892         goto out;
1893     }
1894     stbuf = fidst;
1895 
1896     /* reset dpath and path */
1897     v9fs_path_copy(&dpath, &fidp->path);
1898     v9fs_path_copy(&path, &fidp->path);
1899 
1900     for (name_idx = 0; name_idx < nwalked; name_idx++) {
1901         if (!same_stat_id(&pdu->s->root_st, &stbuf) ||
1902             strcmp("..", wnames[name_idx].data))
1903         {
1904             stbuf = stbufs[name_idx];
1905             any_err |= err = stat_to_qid(pdu, &stbuf, &qid);
1906             if (err < 0) {
1907                 break;
1908             }
1909             v9fs_path_copy(&path, &pathes[name_idx]);
1910             v9fs_path_copy(&dpath, &path);
1911         }
1912         memcpy(&qids[name_idx], &qid, sizeof(qid));
1913     }
1914     if (any_err < 0) {
1915         if (!name_idx) {
1916             /* don't send any QIDs, send Rlerror instead */
1917             goto out;
1918         } else {
1919             /* send QIDs (not Rlerror), but fid MUST remain unaffected */
1920             goto send_qids;
1921         }
1922     }
1923     if (fid == newfid) {
1924         if (fidp->fid_type != P9_FID_NONE) {
1925             err = -EINVAL;
1926             goto out;
1927         }
1928         v9fs_path_write_lock(s);
1929         v9fs_path_copy(&fidp->path, &path);
1930         v9fs_path_unlock(s);
1931     } else {
1932         newfidp = alloc_fid(s, newfid);
1933         if (newfidp == NULL) {
1934             err = -EINVAL;
1935             goto out;
1936         }
1937         newfidp->uid = fidp->uid;
1938         v9fs_path_copy(&newfidp->path, &path);
1939     }
1940 send_qids:
1941     err = v9fs_walk_marshal(pdu, name_idx, qids);
1942     trace_v9fs_walk_return(pdu->tag, pdu->id, name_idx, qids);
1943 out:
1944     put_fid(pdu, fidp);
1945     if (newfidp) {
1946         put_fid(pdu, newfidp);
1947     }
1948     v9fs_path_free(&dpath);
1949     v9fs_path_free(&path);
1950 out_nofid:
1951     pdu_complete(pdu, err);
1952 }
1953 
1954 static int32_t coroutine_fn get_iounit(V9fsPDU *pdu, V9fsPath *path)
1955 {
1956     struct statfs stbuf;
1957     int err = v9fs_co_statfs(pdu, path, &stbuf);
1958 
1959     return blksize_to_iounit(pdu, (err >= 0) ? stbuf.f_bsize : 0);
1960 }
1961 
1962 static void coroutine_fn v9fs_open(void *opaque)
1963 {
1964     int flags;
1965     int32_t fid;
1966     int32_t mode;
1967     V9fsQID qid;
1968     int iounit = 0;
1969     ssize_t err = 0;
1970     size_t offset = 7;
1971     struct stat stbuf;
1972     V9fsFidState *fidp;
1973     V9fsPDU *pdu = opaque;
1974     V9fsState *s = pdu->s;
1975 
1976     if (s->proto_version == V9FS_PROTO_2000L) {
1977         err = pdu_unmarshal(pdu, offset, "dd", &fid, &mode);
1978     } else {
1979         uint8_t modebyte;
1980         err = pdu_unmarshal(pdu, offset, "db", &fid, &modebyte);
1981         mode = modebyte;
1982     }
1983     if (err < 0) {
1984         goto out_nofid;
1985     }
1986     trace_v9fs_open(pdu->tag, pdu->id, fid, mode);
1987 
1988     fidp = get_fid(pdu, fid);
1989     if (fidp == NULL) {
1990         err = -ENOENT;
1991         goto out_nofid;
1992     }
1993     if (fidp->fid_type != P9_FID_NONE) {
1994         err = -EINVAL;
1995         goto out;
1996     }
1997 
1998     err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1999     if (err < 0) {
2000         goto out;
2001     }
2002     err = stat_to_qid(pdu, &stbuf, &qid);
2003     if (err < 0) {
2004         goto out;
2005     }
2006     if (S_ISDIR(stbuf.st_mode)) {
2007         err = v9fs_co_opendir(pdu, fidp);
2008         if (err < 0) {
2009             goto out;
2010         }
2011         fidp->fid_type = P9_FID_DIR;
2012         err = pdu_marshal(pdu, offset, "Qd", &qid, 0);
2013         if (err < 0) {
2014             goto out;
2015         }
2016         err += offset;
2017     } else {
2018         if (s->proto_version == V9FS_PROTO_2000L) {
2019             flags = get_dotl_openflags(s, mode);
2020         } else {
2021             flags = omode_to_uflags(mode);
2022         }
2023         if (is_ro_export(&s->ctx)) {
2024             if (mode & O_WRONLY || mode & O_RDWR ||
2025                 mode & O_APPEND || mode & O_TRUNC) {
2026                 err = -EROFS;
2027                 goto out;
2028             }
2029         }
2030         err = v9fs_co_open(pdu, fidp, flags);
2031         if (err < 0) {
2032             goto out;
2033         }
2034         fidp->fid_type = P9_FID_FILE;
2035         fidp->open_flags = flags;
2036         if (flags & O_EXCL) {
2037             /*
2038              * We let the host file system do O_EXCL check
2039              * We should not reclaim such fd
2040              */
2041             fidp->flags |= FID_NON_RECLAIMABLE;
2042         }
2043         iounit = get_iounit(pdu, &fidp->path);
2044         err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2045         if (err < 0) {
2046             goto out;
2047         }
2048         err += offset;
2049     }
2050     trace_v9fs_open_return(pdu->tag, pdu->id,
2051                            qid.type, qid.version, qid.path, iounit);
2052 out:
2053     put_fid(pdu, fidp);
2054 out_nofid:
2055     pdu_complete(pdu, err);
2056 }
2057 
2058 static void coroutine_fn v9fs_lcreate(void *opaque)
2059 {
2060     int32_t dfid, flags, mode;
2061     gid_t gid;
2062     ssize_t err = 0;
2063     ssize_t offset = 7;
2064     V9fsString name;
2065     V9fsFidState *fidp;
2066     struct stat stbuf;
2067     V9fsQID qid;
2068     int32_t iounit;
2069     V9fsPDU *pdu = opaque;
2070 
2071     v9fs_string_init(&name);
2072     err = pdu_unmarshal(pdu, offset, "dsddd", &dfid,
2073                         &name, &flags, &mode, &gid);
2074     if (err < 0) {
2075         goto out_nofid;
2076     }
2077     trace_v9fs_lcreate(pdu->tag, pdu->id, dfid, flags, mode, gid);
2078 
2079     if (name_is_illegal(name.data)) {
2080         err = -ENOENT;
2081         goto out_nofid;
2082     }
2083 
2084     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
2085         err = -EEXIST;
2086         goto out_nofid;
2087     }
2088 
2089     fidp = get_fid(pdu, dfid);
2090     if (fidp == NULL) {
2091         err = -ENOENT;
2092         goto out_nofid;
2093     }
2094     if (fidp->fid_type != P9_FID_NONE) {
2095         err = -EINVAL;
2096         goto out;
2097     }
2098 
2099     flags = get_dotl_openflags(pdu->s, flags);
2100     err = v9fs_co_open2(pdu, fidp, &name, gid,
2101                         flags | O_CREAT, mode, &stbuf);
2102     if (err < 0) {
2103         goto out;
2104     }
2105     fidp->fid_type = P9_FID_FILE;
2106     fidp->open_flags = flags;
2107     if (flags & O_EXCL) {
2108         /*
2109          * We let the host file system do O_EXCL check
2110          * We should not reclaim such fd
2111          */
2112         fidp->flags |= FID_NON_RECLAIMABLE;
2113     }
2114     iounit =  get_iounit(pdu, &fidp->path);
2115     err = stat_to_qid(pdu, &stbuf, &qid);
2116     if (err < 0) {
2117         goto out;
2118     }
2119     err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2120     if (err < 0) {
2121         goto out;
2122     }
2123     err += offset;
2124     trace_v9fs_lcreate_return(pdu->tag, pdu->id,
2125                               qid.type, qid.version, qid.path, iounit);
2126 out:
2127     put_fid(pdu, fidp);
2128 out_nofid:
2129     pdu_complete(pdu, err);
2130     v9fs_string_free(&name);
2131 }
2132 
2133 static void coroutine_fn v9fs_fsync(void *opaque)
2134 {
2135     int err;
2136     int32_t fid;
2137     int datasync;
2138     size_t offset = 7;
2139     V9fsFidState *fidp;
2140     V9fsPDU *pdu = opaque;
2141 
2142     err = pdu_unmarshal(pdu, offset, "dd", &fid, &datasync);
2143     if (err < 0) {
2144         goto out_nofid;
2145     }
2146     trace_v9fs_fsync(pdu->tag, pdu->id, fid, datasync);
2147 
2148     fidp = get_fid(pdu, fid);
2149     if (fidp == NULL) {
2150         err = -ENOENT;
2151         goto out_nofid;
2152     }
2153     err = v9fs_co_fsync(pdu, fidp, datasync);
2154     if (!err) {
2155         err = offset;
2156     }
2157     put_fid(pdu, fidp);
2158 out_nofid:
2159     pdu_complete(pdu, err);
2160 }
2161 
2162 static void coroutine_fn v9fs_clunk(void *opaque)
2163 {
2164     int err;
2165     int32_t fid;
2166     size_t offset = 7;
2167     V9fsFidState *fidp;
2168     V9fsPDU *pdu = opaque;
2169     V9fsState *s = pdu->s;
2170 
2171     err = pdu_unmarshal(pdu, offset, "d", &fid);
2172     if (err < 0) {
2173         goto out_nofid;
2174     }
2175     trace_v9fs_clunk(pdu->tag, pdu->id, fid);
2176 
2177     fidp = clunk_fid(s, fid);
2178     if (fidp == NULL) {
2179         err = -ENOENT;
2180         goto out_nofid;
2181     }
2182     /*
2183      * Bump the ref so that put_fid will
2184      * free the fid.
2185      */
2186     fidp->ref++;
2187     err = put_fid(pdu, fidp);
2188     if (!err) {
2189         err = offset;
2190     }
2191 out_nofid:
2192     pdu_complete(pdu, err);
2193 }
2194 
2195 /*
2196  * Create a QEMUIOVector for a sub-region of PDU iovecs
2197  *
2198  * @qiov:       uninitialized QEMUIOVector
2199  * @skip:       number of bytes to skip from beginning of PDU
2200  * @size:       number of bytes to include
2201  * @is_write:   true - write, false - read
2202  *
2203  * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
2204  * with qemu_iovec_destroy().
2205  */
2206 static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
2207                                     size_t skip, size_t size,
2208                                     bool is_write)
2209 {
2210     QEMUIOVector elem;
2211     struct iovec *iov;
2212     unsigned int niov;
2213 
2214     if (is_write) {
2215         pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + skip);
2216     } else {
2217         pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip);
2218     }
2219 
2220     qemu_iovec_init_external(&elem, iov, niov);
2221     qemu_iovec_init(qiov, niov);
2222     qemu_iovec_concat(qiov, &elem, skip, size);
2223 }
2224 
2225 static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
2226                            uint64_t off, uint32_t max_count)
2227 {
2228     ssize_t err;
2229     size_t offset = 7;
2230     uint64_t read_count;
2231     QEMUIOVector qiov_full;
2232 
2233     if (fidp->fs.xattr.len < off) {
2234         read_count = 0;
2235     } else {
2236         read_count = fidp->fs.xattr.len - off;
2237     }
2238     if (read_count > max_count) {
2239         read_count = max_count;
2240     }
2241     err = pdu_marshal(pdu, offset, "d", read_count);
2242     if (err < 0) {
2243         return err;
2244     }
2245     offset += err;
2246 
2247     v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
2248     err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
2249                     ((char *)fidp->fs.xattr.value) + off,
2250                     read_count);
2251     qemu_iovec_destroy(&qiov_full);
2252     if (err < 0) {
2253         return err;
2254     }
2255     offset += err;
2256     return offset;
2257 }
2258 
2259 static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu,
2260                                                   V9fsFidState *fidp,
2261                                                   uint32_t max_count)
2262 {
2263     V9fsPath path;
2264     V9fsStat v9stat;
2265     int len, err = 0;
2266     int32_t count = 0;
2267     struct stat stbuf;
2268     off_t saved_dir_pos;
2269     struct dirent *dent;
2270 
2271     /* save the directory position */
2272     saved_dir_pos = v9fs_co_telldir(pdu, fidp);
2273     if (saved_dir_pos < 0) {
2274         return saved_dir_pos;
2275     }
2276 
2277     while (1) {
2278         v9fs_path_init(&path);
2279 
2280         v9fs_readdir_lock(&fidp->fs.dir);
2281 
2282         err = v9fs_co_readdir(pdu, fidp, &dent);
2283         if (err || !dent) {
2284             break;
2285         }
2286         err = v9fs_co_name_to_path(pdu, &fidp->path, dent->d_name, &path);
2287         if (err < 0) {
2288             break;
2289         }
2290         err = v9fs_co_lstat(pdu, &path, &stbuf);
2291         if (err < 0) {
2292             break;
2293         }
2294         err = stat_to_v9stat(pdu, &path, dent->d_name, &stbuf, &v9stat);
2295         if (err < 0) {
2296             break;
2297         }
2298         if ((count + v9stat.size + 2) > max_count) {
2299             v9fs_readdir_unlock(&fidp->fs.dir);
2300 
2301             /* Ran out of buffer. Set dir back to old position and return */
2302             v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
2303             v9fs_stat_free(&v9stat);
2304             v9fs_path_free(&path);
2305             return count;
2306         }
2307 
2308         /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */
2309         len = pdu_marshal(pdu, 11 + count, "S", &v9stat);
2310 
2311         v9fs_readdir_unlock(&fidp->fs.dir);
2312 
2313         if (len < 0) {
2314             v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
2315             v9fs_stat_free(&v9stat);
2316             v9fs_path_free(&path);
2317             return len;
2318         }
2319         count += len;
2320         v9fs_stat_free(&v9stat);
2321         v9fs_path_free(&path);
2322         saved_dir_pos = qemu_dirent_off(dent);
2323     }
2324 
2325     v9fs_readdir_unlock(&fidp->fs.dir);
2326 
2327     v9fs_path_free(&path);
2328     if (err < 0) {
2329         return err;
2330     }
2331     return count;
2332 }
2333 
2334 static void coroutine_fn v9fs_read(void *opaque)
2335 {
2336     int32_t fid;
2337     uint64_t off;
2338     ssize_t err = 0;
2339     int32_t count = 0;
2340     size_t offset = 7;
2341     uint32_t max_count;
2342     V9fsFidState *fidp;
2343     V9fsPDU *pdu = opaque;
2344     V9fsState *s = pdu->s;
2345 
2346     err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &max_count);
2347     if (err < 0) {
2348         goto out_nofid;
2349     }
2350     trace_v9fs_read(pdu->tag, pdu->id, fid, off, max_count);
2351 
2352     fidp = get_fid(pdu, fid);
2353     if (fidp == NULL) {
2354         err = -EINVAL;
2355         goto out_nofid;
2356     }
2357     if (fidp->fid_type == P9_FID_DIR) {
2358         if (s->proto_version != V9FS_PROTO_2000U) {
2359             warn_report_once(
2360                 "9p: bad client: T_read request on directory only expected "
2361                 "with 9P2000.u protocol version"
2362             );
2363             err = -EOPNOTSUPP;
2364             goto out;
2365         }
2366         if (off == 0) {
2367             v9fs_co_rewinddir(pdu, fidp);
2368         }
2369         count = v9fs_do_readdir_with_stat(pdu, fidp, max_count);
2370         if (count < 0) {
2371             err = count;
2372             goto out;
2373         }
2374         err = pdu_marshal(pdu, offset, "d", count);
2375         if (err < 0) {
2376             goto out;
2377         }
2378         err += offset + count;
2379     } else if (fidp->fid_type == P9_FID_FILE) {
2380         QEMUIOVector qiov_full;
2381         QEMUIOVector qiov;
2382         int32_t len;
2383 
2384         v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false);
2385         qemu_iovec_init(&qiov, qiov_full.niov);
2386         do {
2387             qemu_iovec_reset(&qiov);
2388             qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count);
2389             if (0) {
2390                 print_sg(qiov.iov, qiov.niov);
2391             }
2392             /* Loop in case of EINTR */
2393             do {
2394                 len = v9fs_co_preadv(pdu, fidp, qiov.iov, qiov.niov, off);
2395                 if (len >= 0) {
2396                     off   += len;
2397                     count += len;
2398                 }
2399             } while (len == -EINTR && !pdu->cancelled);
2400             if (len < 0) {
2401                 /* IO error return the error */
2402                 err = len;
2403                 goto out_free_iovec;
2404             }
2405         } while (count < max_count && len > 0);
2406         err = pdu_marshal(pdu, offset, "d", count);
2407         if (err < 0) {
2408             goto out_free_iovec;
2409         }
2410         err += offset + count;
2411 out_free_iovec:
2412         qemu_iovec_destroy(&qiov);
2413         qemu_iovec_destroy(&qiov_full);
2414     } else if (fidp->fid_type == P9_FID_XATTR) {
2415         err = v9fs_xattr_read(s, pdu, fidp, off, max_count);
2416     } else {
2417         err = -EINVAL;
2418     }
2419     trace_v9fs_read_return(pdu->tag, pdu->id, count, err);
2420 out:
2421     put_fid(pdu, fidp);
2422 out_nofid:
2423     pdu_complete(pdu, err);
2424 }
2425 
2426 /**
2427  * v9fs_readdir_response_size() - Returns size required in Rreaddir response
2428  * for the passed dirent @name.
2429  *
2430  * @name: directory entry's name (i.e. file name, directory name)
2431  * Return: required size in bytes
2432  */
2433 size_t v9fs_readdir_response_size(V9fsString *name)
2434 {
2435     /*
2436      * Size of each dirent on the wire: size of qid (13) + size of offset (8)
2437      * size of type (1) + size of name.size (2) + strlen(name.data)
2438      */
2439     return 24 + v9fs_string_size(name);
2440 }
2441 
2442 static void v9fs_free_dirents(struct V9fsDirEnt *e)
2443 {
2444     struct V9fsDirEnt *next = NULL;
2445 
2446     for (; e; e = next) {
2447         next = e->next;
2448         g_free(e->dent);
2449         g_free(e->st);
2450         g_free(e);
2451     }
2452 }
2453 
2454 static int coroutine_fn v9fs_do_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
2455                                         off_t offset, int32_t max_count)
2456 {
2457     size_t size;
2458     V9fsQID qid;
2459     V9fsString name;
2460     int len, err = 0;
2461     int32_t count = 0;
2462     off_t off;
2463     struct dirent *dent;
2464     struct stat *st;
2465     struct V9fsDirEnt *entries = NULL;
2466 
2467     /*
2468      * inode remapping requires the device id, which in turn might be
2469      * different for different directory entries, so if inode remapping is
2470      * enabled we have to make a full stat for each directory entry
2471      */
2472     const bool dostat = pdu->s->ctx.export_flags & V9FS_REMAP_INODES;
2473 
2474     /*
2475      * Fetch all required directory entries altogether on a background IO
2476      * thread from fs driver. We don't want to do that for each entry
2477      * individually, because hopping between threads (this main IO thread
2478      * and background IO driver thread) would sum up to huge latencies.
2479      */
2480     count = v9fs_co_readdir_many(pdu, fidp, &entries, offset, max_count,
2481                                  dostat);
2482     if (count < 0) {
2483         err = count;
2484         count = 0;
2485         goto out;
2486     }
2487     count = 0;
2488 
2489     for (struct V9fsDirEnt *e = entries; e; e = e->next) {
2490         dent = e->dent;
2491 
2492         if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) {
2493             st = e->st;
2494             /* e->st should never be NULL, but just to be sure */
2495             if (!st) {
2496                 err = -1;
2497                 break;
2498             }
2499 
2500             /* remap inode */
2501             err = stat_to_qid(pdu, st, &qid);
2502             if (err < 0) {
2503                 break;
2504             }
2505         } else {
2506             /*
2507              * Fill up just the path field of qid because the client uses
2508              * only that. To fill the entire qid structure we will have
2509              * to stat each dirent found, which is expensive. For the
2510              * latter reason we don't call stat_to_qid() here. Only drawback
2511              * is that no multi-device export detection of stat_to_qid()
2512              * would be done and provided as error to the user here. But
2513              * user would get that error anyway when accessing those
2514              * files/dirs through other ways.
2515              */
2516             size = MIN(sizeof(dent->d_ino), sizeof(qid.path));
2517             memcpy(&qid.path, &dent->d_ino, size);
2518             /* Fill the other fields with dummy values */
2519             qid.type = 0;
2520             qid.version = 0;
2521         }
2522 
2523         off = qemu_dirent_off(dent);
2524         v9fs_string_init(&name);
2525         v9fs_string_sprintf(&name, "%s", dent->d_name);
2526 
2527         /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */
2528         len = pdu_marshal(pdu, 11 + count, "Qqbs",
2529                           &qid, off,
2530                           dent->d_type, &name);
2531 
2532         v9fs_string_free(&name);
2533 
2534         if (len < 0) {
2535             err = len;
2536             break;
2537         }
2538 
2539         count += len;
2540     }
2541 
2542 out:
2543     v9fs_free_dirents(entries);
2544     if (err < 0) {
2545         return err;
2546     }
2547     return count;
2548 }
2549 
2550 static void coroutine_fn v9fs_readdir(void *opaque)
2551 {
2552     int32_t fid;
2553     V9fsFidState *fidp;
2554     ssize_t retval = 0;
2555     size_t offset = 7;
2556     uint64_t initial_offset;
2557     int32_t count;
2558     uint32_t max_count;
2559     V9fsPDU *pdu = opaque;
2560     V9fsState *s = pdu->s;
2561 
2562     retval = pdu_unmarshal(pdu, offset, "dqd", &fid,
2563                            &initial_offset, &max_count);
2564     if (retval < 0) {
2565         goto out_nofid;
2566     }
2567     trace_v9fs_readdir(pdu->tag, pdu->id, fid, initial_offset, max_count);
2568 
2569     /* Enough space for a R_readdir header: size[4] Rreaddir tag[2] count[4] */
2570     if (max_count > s->msize - 11) {
2571         max_count = s->msize - 11;
2572         warn_report_once(
2573             "9p: bad client: T_readdir with count > msize - 11"
2574         );
2575     }
2576 
2577     fidp = get_fid(pdu, fid);
2578     if (fidp == NULL) {
2579         retval = -EINVAL;
2580         goto out_nofid;
2581     }
2582     if (!fidp->fs.dir.stream) {
2583         retval = -EINVAL;
2584         goto out;
2585     }
2586     if (s->proto_version != V9FS_PROTO_2000L) {
2587         warn_report_once(
2588             "9p: bad client: T_readdir request only expected with 9P2000.L "
2589             "protocol version"
2590         );
2591         retval = -EOPNOTSUPP;
2592         goto out;
2593     }
2594     count = v9fs_do_readdir(pdu, fidp, (off_t) initial_offset, max_count);
2595     if (count < 0) {
2596         retval = count;
2597         goto out;
2598     }
2599     retval = pdu_marshal(pdu, offset, "d", count);
2600     if (retval < 0) {
2601         goto out;
2602     }
2603     retval += count + offset;
2604     trace_v9fs_readdir_return(pdu->tag, pdu->id, count, retval);
2605 out:
2606     put_fid(pdu, fidp);
2607 out_nofid:
2608     pdu_complete(pdu, retval);
2609 }
2610 
2611 static int v9fs_xattr_write(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
2612                             uint64_t off, uint32_t count,
2613                             struct iovec *sg, int cnt)
2614 {
2615     int i, to_copy;
2616     ssize_t err = 0;
2617     uint64_t write_count;
2618     size_t offset = 7;
2619 
2620 
2621     if (fidp->fs.xattr.len < off) {
2622         return -ENOSPC;
2623     }
2624     write_count = fidp->fs.xattr.len - off;
2625     if (write_count > count) {
2626         write_count = count;
2627     }
2628     err = pdu_marshal(pdu, offset, "d", write_count);
2629     if (err < 0) {
2630         return err;
2631     }
2632     err += offset;
2633     fidp->fs.xattr.copied_len += write_count;
2634     /*
2635      * Now copy the content from sg list
2636      */
2637     for (i = 0; i < cnt; i++) {
2638         if (write_count > sg[i].iov_len) {
2639             to_copy = sg[i].iov_len;
2640         } else {
2641             to_copy = write_count;
2642         }
2643         memcpy((char *)fidp->fs.xattr.value + off, sg[i].iov_base, to_copy);
2644         /* updating vs->off since we are not using below */
2645         off += to_copy;
2646         write_count -= to_copy;
2647     }
2648 
2649     return err;
2650 }
2651 
2652 static void coroutine_fn v9fs_write(void *opaque)
2653 {
2654     ssize_t err;
2655     int32_t fid;
2656     uint64_t off;
2657     uint32_t count;
2658     int32_t len = 0;
2659     int32_t total = 0;
2660     size_t offset = 7;
2661     V9fsFidState *fidp;
2662     V9fsPDU *pdu = opaque;
2663     V9fsState *s = pdu->s;
2664     QEMUIOVector qiov_full;
2665     QEMUIOVector qiov;
2666 
2667     err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &count);
2668     if (err < 0) {
2669         pdu_complete(pdu, err);
2670         return;
2671     }
2672     offset += err;
2673     v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true);
2674     trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov);
2675 
2676     fidp = get_fid(pdu, fid);
2677     if (fidp == NULL) {
2678         err = -EINVAL;
2679         goto out_nofid;
2680     }
2681     if (fidp->fid_type == P9_FID_FILE) {
2682         if (fidp->fs.fd == -1) {
2683             err = -EINVAL;
2684             goto out;
2685         }
2686     } else if (fidp->fid_type == P9_FID_XATTR) {
2687         /*
2688          * setxattr operation
2689          */
2690         err = v9fs_xattr_write(s, pdu, fidp, off, count,
2691                                qiov_full.iov, qiov_full.niov);
2692         goto out;
2693     } else {
2694         err = -EINVAL;
2695         goto out;
2696     }
2697     qemu_iovec_init(&qiov, qiov_full.niov);
2698     do {
2699         qemu_iovec_reset(&qiov);
2700         qemu_iovec_concat(&qiov, &qiov_full, total, qiov_full.size - total);
2701         if (0) {
2702             print_sg(qiov.iov, qiov.niov);
2703         }
2704         /* Loop in case of EINTR */
2705         do {
2706             len = v9fs_co_pwritev(pdu, fidp, qiov.iov, qiov.niov, off);
2707             if (len >= 0) {
2708                 off   += len;
2709                 total += len;
2710             }
2711         } while (len == -EINTR && !pdu->cancelled);
2712         if (len < 0) {
2713             /* IO error return the error */
2714             err = len;
2715             goto out_qiov;
2716         }
2717     } while (total < count && len > 0);
2718 
2719     offset = 7;
2720     err = pdu_marshal(pdu, offset, "d", total);
2721     if (err < 0) {
2722         goto out_qiov;
2723     }
2724     err += offset;
2725     trace_v9fs_write_return(pdu->tag, pdu->id, total, err);
2726 out_qiov:
2727     qemu_iovec_destroy(&qiov);
2728 out:
2729     put_fid(pdu, fidp);
2730 out_nofid:
2731     qemu_iovec_destroy(&qiov_full);
2732     pdu_complete(pdu, err);
2733 }
2734 
2735 static void coroutine_fn v9fs_create(void *opaque)
2736 {
2737     int32_t fid;
2738     int err = 0;
2739     size_t offset = 7;
2740     V9fsFidState *fidp;
2741     V9fsQID qid;
2742     int32_t perm;
2743     int8_t mode;
2744     V9fsPath path;
2745     struct stat stbuf;
2746     V9fsString name;
2747     V9fsString extension;
2748     int iounit;
2749     V9fsPDU *pdu = opaque;
2750     V9fsState *s = pdu->s;
2751 
2752     v9fs_path_init(&path);
2753     v9fs_string_init(&name);
2754     v9fs_string_init(&extension);
2755     err = pdu_unmarshal(pdu, offset, "dsdbs", &fid, &name,
2756                         &perm, &mode, &extension);
2757     if (err < 0) {
2758         goto out_nofid;
2759     }
2760     trace_v9fs_create(pdu->tag, pdu->id, fid, name.data, perm, mode);
2761 
2762     if (name_is_illegal(name.data)) {
2763         err = -ENOENT;
2764         goto out_nofid;
2765     }
2766 
2767     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
2768         err = -EEXIST;
2769         goto out_nofid;
2770     }
2771 
2772     fidp = get_fid(pdu, fid);
2773     if (fidp == NULL) {
2774         err = -EINVAL;
2775         goto out_nofid;
2776     }
2777     if (fidp->fid_type != P9_FID_NONE) {
2778         err = -EINVAL;
2779         goto out;
2780     }
2781     if (perm & P9_STAT_MODE_DIR) {
2782         err = v9fs_co_mkdir(pdu, fidp, &name, perm & 0777,
2783                             fidp->uid, -1, &stbuf);
2784         if (err < 0) {
2785             goto out;
2786         }
2787         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2788         if (err < 0) {
2789             goto out;
2790         }
2791         v9fs_path_write_lock(s);
2792         v9fs_path_copy(&fidp->path, &path);
2793         v9fs_path_unlock(s);
2794         err = v9fs_co_opendir(pdu, fidp);
2795         if (err < 0) {
2796             goto out;
2797         }
2798         fidp->fid_type = P9_FID_DIR;
2799     } else if (perm & P9_STAT_MODE_SYMLINK) {
2800         err = v9fs_co_symlink(pdu, fidp, &name,
2801                               extension.data, -1 , &stbuf);
2802         if (err < 0) {
2803             goto out;
2804         }
2805         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2806         if (err < 0) {
2807             goto out;
2808         }
2809         v9fs_path_write_lock(s);
2810         v9fs_path_copy(&fidp->path, &path);
2811         v9fs_path_unlock(s);
2812     } else if (perm & P9_STAT_MODE_LINK) {
2813         int32_t ofid = atoi(extension.data);
2814         V9fsFidState *ofidp = get_fid(pdu, ofid);
2815         if (ofidp == NULL) {
2816             err = -EINVAL;
2817             goto out;
2818         }
2819         err = v9fs_co_link(pdu, ofidp, fidp, &name);
2820         put_fid(pdu, ofidp);
2821         if (err < 0) {
2822             goto out;
2823         }
2824         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2825         if (err < 0) {
2826             fidp->fid_type = P9_FID_NONE;
2827             goto out;
2828         }
2829         v9fs_path_write_lock(s);
2830         v9fs_path_copy(&fidp->path, &path);
2831         v9fs_path_unlock(s);
2832         err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
2833         if (err < 0) {
2834             fidp->fid_type = P9_FID_NONE;
2835             goto out;
2836         }
2837     } else if (perm & P9_STAT_MODE_DEVICE) {
2838         char ctype;
2839         uint32_t major, minor;
2840         mode_t nmode = 0;
2841 
2842         if (sscanf(extension.data, "%c %u %u", &ctype, &major, &minor) != 3) {
2843             err = -errno;
2844             goto out;
2845         }
2846 
2847         switch (ctype) {
2848         case 'c':
2849             nmode = S_IFCHR;
2850             break;
2851         case 'b':
2852             nmode = S_IFBLK;
2853             break;
2854         default:
2855             err = -EIO;
2856             goto out;
2857         }
2858 
2859         nmode |= perm & 0777;
2860         err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2861                             makedev(major, minor), nmode, &stbuf);
2862         if (err < 0) {
2863             goto out;
2864         }
2865         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2866         if (err < 0) {
2867             goto out;
2868         }
2869         v9fs_path_write_lock(s);
2870         v9fs_path_copy(&fidp->path, &path);
2871         v9fs_path_unlock(s);
2872     } else if (perm & P9_STAT_MODE_NAMED_PIPE) {
2873         err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2874                             0, S_IFIFO | (perm & 0777), &stbuf);
2875         if (err < 0) {
2876             goto out;
2877         }
2878         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2879         if (err < 0) {
2880             goto out;
2881         }
2882         v9fs_path_write_lock(s);
2883         v9fs_path_copy(&fidp->path, &path);
2884         v9fs_path_unlock(s);
2885     } else if (perm & P9_STAT_MODE_SOCKET) {
2886         err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2887                             0, S_IFSOCK | (perm & 0777), &stbuf);
2888         if (err < 0) {
2889             goto out;
2890         }
2891         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2892         if (err < 0) {
2893             goto out;
2894         }
2895         v9fs_path_write_lock(s);
2896         v9fs_path_copy(&fidp->path, &path);
2897         v9fs_path_unlock(s);
2898     } else {
2899         err = v9fs_co_open2(pdu, fidp, &name, -1,
2900                             omode_to_uflags(mode) | O_CREAT, perm, &stbuf);
2901         if (err < 0) {
2902             goto out;
2903         }
2904         fidp->fid_type = P9_FID_FILE;
2905         fidp->open_flags = omode_to_uflags(mode);
2906         if (fidp->open_flags & O_EXCL) {
2907             /*
2908              * We let the host file system do O_EXCL check
2909              * We should not reclaim such fd
2910              */
2911             fidp->flags |= FID_NON_RECLAIMABLE;
2912         }
2913     }
2914     iounit = get_iounit(pdu, &fidp->path);
2915     err = stat_to_qid(pdu, &stbuf, &qid);
2916     if (err < 0) {
2917         goto out;
2918     }
2919     err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2920     if (err < 0) {
2921         goto out;
2922     }
2923     err += offset;
2924     trace_v9fs_create_return(pdu->tag, pdu->id,
2925                              qid.type, qid.version, qid.path, iounit);
2926 out:
2927     put_fid(pdu, fidp);
2928 out_nofid:
2929    pdu_complete(pdu, err);
2930    v9fs_string_free(&name);
2931    v9fs_string_free(&extension);
2932    v9fs_path_free(&path);
2933 }
2934 
2935 static void coroutine_fn v9fs_symlink(void *opaque)
2936 {
2937     V9fsPDU *pdu = opaque;
2938     V9fsString name;
2939     V9fsString symname;
2940     V9fsFidState *dfidp;
2941     V9fsQID qid;
2942     struct stat stbuf;
2943     int32_t dfid;
2944     int err = 0;
2945     gid_t gid;
2946     size_t offset = 7;
2947 
2948     v9fs_string_init(&name);
2949     v9fs_string_init(&symname);
2950     err = pdu_unmarshal(pdu, offset, "dssd", &dfid, &name, &symname, &gid);
2951     if (err < 0) {
2952         goto out_nofid;
2953     }
2954     trace_v9fs_symlink(pdu->tag, pdu->id, dfid, name.data, symname.data, gid);
2955 
2956     if (name_is_illegal(name.data)) {
2957         err = -ENOENT;
2958         goto out_nofid;
2959     }
2960 
2961     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
2962         err = -EEXIST;
2963         goto out_nofid;
2964     }
2965 
2966     dfidp = get_fid(pdu, dfid);
2967     if (dfidp == NULL) {
2968         err = -EINVAL;
2969         goto out_nofid;
2970     }
2971     err = v9fs_co_symlink(pdu, dfidp, &name, symname.data, gid, &stbuf);
2972     if (err < 0) {
2973         goto out;
2974     }
2975     err = stat_to_qid(pdu, &stbuf, &qid);
2976     if (err < 0) {
2977         goto out;
2978     }
2979     err =  pdu_marshal(pdu, offset, "Q", &qid);
2980     if (err < 0) {
2981         goto out;
2982     }
2983     err += offset;
2984     trace_v9fs_symlink_return(pdu->tag, pdu->id,
2985                               qid.type, qid.version, qid.path);
2986 out:
2987     put_fid(pdu, dfidp);
2988 out_nofid:
2989     pdu_complete(pdu, err);
2990     v9fs_string_free(&name);
2991     v9fs_string_free(&symname);
2992 }
2993 
2994 static void coroutine_fn v9fs_flush(void *opaque)
2995 {
2996     ssize_t err;
2997     int16_t tag;
2998     size_t offset = 7;
2999     V9fsPDU *cancel_pdu = NULL;
3000     V9fsPDU *pdu = opaque;
3001     V9fsState *s = pdu->s;
3002 
3003     err = pdu_unmarshal(pdu, offset, "w", &tag);
3004     if (err < 0) {
3005         pdu_complete(pdu, err);
3006         return;
3007     }
3008     trace_v9fs_flush(pdu->tag, pdu->id, tag);
3009 
3010     if (pdu->tag == tag) {
3011         warn_report("the guest sent a self-referencing 9P flush request");
3012     } else {
3013         QLIST_FOREACH(cancel_pdu, &s->active_list, next) {
3014             if (cancel_pdu->tag == tag) {
3015                 break;
3016             }
3017         }
3018     }
3019     if (cancel_pdu) {
3020         cancel_pdu->cancelled = 1;
3021         /*
3022          * Wait for pdu to complete.
3023          */
3024         qemu_co_queue_wait(&cancel_pdu->complete, NULL);
3025         if (!qemu_co_queue_next(&cancel_pdu->complete)) {
3026             cancel_pdu->cancelled = 0;
3027             pdu_free(cancel_pdu);
3028         }
3029     }
3030     pdu_complete(pdu, 7);
3031 }
3032 
3033 static void coroutine_fn v9fs_link(void *opaque)
3034 {
3035     V9fsPDU *pdu = opaque;
3036     int32_t dfid, oldfid;
3037     V9fsFidState *dfidp, *oldfidp;
3038     V9fsString name;
3039     size_t offset = 7;
3040     int err = 0;
3041 
3042     v9fs_string_init(&name);
3043     err = pdu_unmarshal(pdu, offset, "dds", &dfid, &oldfid, &name);
3044     if (err < 0) {
3045         goto out_nofid;
3046     }
3047     trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data);
3048 
3049     if (name_is_illegal(name.data)) {
3050         err = -ENOENT;
3051         goto out_nofid;
3052     }
3053 
3054     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3055         err = -EEXIST;
3056         goto out_nofid;
3057     }
3058 
3059     dfidp = get_fid(pdu, dfid);
3060     if (dfidp == NULL) {
3061         err = -ENOENT;
3062         goto out_nofid;
3063     }
3064 
3065     oldfidp = get_fid(pdu, oldfid);
3066     if (oldfidp == NULL) {
3067         err = -ENOENT;
3068         goto out;
3069     }
3070     err = v9fs_co_link(pdu, oldfidp, dfidp, &name);
3071     if (!err) {
3072         err = offset;
3073     }
3074     put_fid(pdu, oldfidp);
3075 out:
3076     put_fid(pdu, dfidp);
3077 out_nofid:
3078     v9fs_string_free(&name);
3079     pdu_complete(pdu, err);
3080 }
3081 
3082 /* Only works with path name based fid */
3083 static void coroutine_fn v9fs_remove(void *opaque)
3084 {
3085     int32_t fid;
3086     int err = 0;
3087     size_t offset = 7;
3088     V9fsFidState *fidp;
3089     V9fsPDU *pdu = opaque;
3090 
3091     err = pdu_unmarshal(pdu, offset, "d", &fid);
3092     if (err < 0) {
3093         goto out_nofid;
3094     }
3095     trace_v9fs_remove(pdu->tag, pdu->id, fid);
3096 
3097     fidp = get_fid(pdu, fid);
3098     if (fidp == NULL) {
3099         err = -EINVAL;
3100         goto out_nofid;
3101     }
3102     /* if fs driver is not path based, return EOPNOTSUPP */
3103     if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
3104         err = -EOPNOTSUPP;
3105         goto out_err;
3106     }
3107     /*
3108      * IF the file is unlinked, we cannot reopen
3109      * the file later. So don't reclaim fd
3110      */
3111     err = v9fs_mark_fids_unreclaim(pdu, &fidp->path);
3112     if (err < 0) {
3113         goto out_err;
3114     }
3115     err = v9fs_co_remove(pdu, &fidp->path);
3116     if (!err) {
3117         err = offset;
3118     }
3119 out_err:
3120     /* For TREMOVE we need to clunk the fid even on failed remove */
3121     clunk_fid(pdu->s, fidp->fid);
3122     put_fid(pdu, fidp);
3123 out_nofid:
3124     pdu_complete(pdu, err);
3125 }
3126 
3127 static void coroutine_fn v9fs_unlinkat(void *opaque)
3128 {
3129     int err = 0;
3130     V9fsString name;
3131     int32_t dfid, flags, rflags = 0;
3132     size_t offset = 7;
3133     V9fsPath path;
3134     V9fsFidState *dfidp;
3135     V9fsPDU *pdu = opaque;
3136 
3137     v9fs_string_init(&name);
3138     err = pdu_unmarshal(pdu, offset, "dsd", &dfid, &name, &flags);
3139     if (err < 0) {
3140         goto out_nofid;
3141     }
3142 
3143     if (name_is_illegal(name.data)) {
3144         err = -ENOENT;
3145         goto out_nofid;
3146     }
3147 
3148     if (!strcmp(".", name.data)) {
3149         err = -EINVAL;
3150         goto out_nofid;
3151     }
3152 
3153     if (!strcmp("..", name.data)) {
3154         err = -ENOTEMPTY;
3155         goto out_nofid;
3156     }
3157 
3158     if (flags & ~P9_DOTL_AT_REMOVEDIR) {
3159         err = -EINVAL;
3160         goto out_nofid;
3161     }
3162 
3163     if (flags & P9_DOTL_AT_REMOVEDIR) {
3164         rflags |= AT_REMOVEDIR;
3165     }
3166 
3167     dfidp = get_fid(pdu, dfid);
3168     if (dfidp == NULL) {
3169         err = -EINVAL;
3170         goto out_nofid;
3171     }
3172     /*
3173      * IF the file is unlinked, we cannot reopen
3174      * the file later. So don't reclaim fd
3175      */
3176     v9fs_path_init(&path);
3177     err = v9fs_co_name_to_path(pdu, &dfidp->path, name.data, &path);
3178     if (err < 0) {
3179         goto out_err;
3180     }
3181     err = v9fs_mark_fids_unreclaim(pdu, &path);
3182     if (err < 0) {
3183         goto out_err;
3184     }
3185     err = v9fs_co_unlinkat(pdu, &dfidp->path, &name, rflags);
3186     if (!err) {
3187         err = offset;
3188     }
3189 out_err:
3190     put_fid(pdu, dfidp);
3191     v9fs_path_free(&path);
3192 out_nofid:
3193     pdu_complete(pdu, err);
3194     v9fs_string_free(&name);
3195 }
3196 
3197 
3198 /* Only works with path name based fid */
3199 static int coroutine_fn v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp,
3200                                              int32_t newdirfid,
3201                                              V9fsString *name)
3202 {
3203     int err = 0;
3204     V9fsPath new_path;
3205     V9fsFidState *tfidp;
3206     V9fsState *s = pdu->s;
3207     V9fsFidState *dirfidp = NULL;
3208 
3209     v9fs_path_init(&new_path);
3210     if (newdirfid != -1) {
3211         dirfidp = get_fid(pdu, newdirfid);
3212         if (dirfidp == NULL) {
3213             return -ENOENT;
3214         }
3215         if (fidp->fid_type != P9_FID_NONE) {
3216             err = -EINVAL;
3217             goto out;
3218         }
3219         err = v9fs_co_name_to_path(pdu, &dirfidp->path, name->data, &new_path);
3220         if (err < 0) {
3221             goto out;
3222         }
3223     } else {
3224         char *dir_name = g_path_get_dirname(fidp->path.data);
3225         V9fsPath dir_path;
3226 
3227         v9fs_path_init(&dir_path);
3228         v9fs_path_sprintf(&dir_path, "%s", dir_name);
3229         g_free(dir_name);
3230 
3231         err = v9fs_co_name_to_path(pdu, &dir_path, name->data, &new_path);
3232         v9fs_path_free(&dir_path);
3233         if (err < 0) {
3234             goto out;
3235         }
3236     }
3237     err = v9fs_co_rename(pdu, &fidp->path, &new_path);
3238     if (err < 0) {
3239         goto out;
3240     }
3241     /*
3242      * Fixup fid's pointing to the old name to
3243      * start pointing to the new name
3244      */
3245     QSIMPLEQ_FOREACH(tfidp, &s->fid_list, next) {
3246         if (v9fs_path_is_ancestor(&fidp->path, &tfidp->path)) {
3247             /* replace the name */
3248             v9fs_fix_path(&tfidp->path, &new_path, strlen(fidp->path.data));
3249         }
3250     }
3251 out:
3252     if (dirfidp) {
3253         put_fid(pdu, dirfidp);
3254     }
3255     v9fs_path_free(&new_path);
3256     return err;
3257 }
3258 
3259 /* Only works with path name based fid */
3260 static void coroutine_fn v9fs_rename(void *opaque)
3261 {
3262     int32_t fid;
3263     ssize_t err = 0;
3264     size_t offset = 7;
3265     V9fsString name;
3266     int32_t newdirfid;
3267     V9fsFidState *fidp;
3268     V9fsPDU *pdu = opaque;
3269     V9fsState *s = pdu->s;
3270 
3271     v9fs_string_init(&name);
3272     err = pdu_unmarshal(pdu, offset, "dds", &fid, &newdirfid, &name);
3273     if (err < 0) {
3274         goto out_nofid;
3275     }
3276 
3277     if (name_is_illegal(name.data)) {
3278         err = -ENOENT;
3279         goto out_nofid;
3280     }
3281 
3282     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3283         err = -EISDIR;
3284         goto out_nofid;
3285     }
3286 
3287     fidp = get_fid(pdu, fid);
3288     if (fidp == NULL) {
3289         err = -ENOENT;
3290         goto out_nofid;
3291     }
3292     if (fidp->fid_type != P9_FID_NONE) {
3293         err = -EINVAL;
3294         goto out;
3295     }
3296     /* if fs driver is not path based, return EOPNOTSUPP */
3297     if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
3298         err = -EOPNOTSUPP;
3299         goto out;
3300     }
3301     v9fs_path_write_lock(s);
3302     err = v9fs_complete_rename(pdu, fidp, newdirfid, &name);
3303     v9fs_path_unlock(s);
3304     if (!err) {
3305         err = offset;
3306     }
3307 out:
3308     put_fid(pdu, fidp);
3309 out_nofid:
3310     pdu_complete(pdu, err);
3311     v9fs_string_free(&name);
3312 }
3313 
3314 static int coroutine_fn v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir,
3315                                            V9fsString *old_name,
3316                                            V9fsPath *newdir,
3317                                            V9fsString *new_name)
3318 {
3319     V9fsFidState *tfidp;
3320     V9fsPath oldpath, newpath;
3321     V9fsState *s = pdu->s;
3322     int err;
3323 
3324     v9fs_path_init(&oldpath);
3325     v9fs_path_init(&newpath);
3326     err = v9fs_co_name_to_path(pdu, olddir, old_name->data, &oldpath);
3327     if (err < 0) {
3328         goto out;
3329     }
3330     err = v9fs_co_name_to_path(pdu, newdir, new_name->data, &newpath);
3331     if (err < 0) {
3332         goto out;
3333     }
3334 
3335     /*
3336      * Fixup fid's pointing to the old name to
3337      * start pointing to the new name
3338      */
3339     QSIMPLEQ_FOREACH(tfidp, &s->fid_list, next) {
3340         if (v9fs_path_is_ancestor(&oldpath, &tfidp->path)) {
3341             /* replace the name */
3342             v9fs_fix_path(&tfidp->path, &newpath, strlen(oldpath.data));
3343         }
3344     }
3345 out:
3346     v9fs_path_free(&oldpath);
3347     v9fs_path_free(&newpath);
3348     return err;
3349 }
3350 
3351 static int coroutine_fn v9fs_complete_renameat(V9fsPDU *pdu, int32_t olddirfid,
3352                                                V9fsString *old_name,
3353                                                int32_t newdirfid,
3354                                                V9fsString *new_name)
3355 {
3356     int err = 0;
3357     V9fsState *s = pdu->s;
3358     V9fsFidState *newdirfidp = NULL, *olddirfidp = NULL;
3359 
3360     olddirfidp = get_fid(pdu, olddirfid);
3361     if (olddirfidp == NULL) {
3362         err = -ENOENT;
3363         goto out;
3364     }
3365     if (newdirfid != -1) {
3366         newdirfidp = get_fid(pdu, newdirfid);
3367         if (newdirfidp == NULL) {
3368             err = -ENOENT;
3369             goto out;
3370         }
3371     } else {
3372         newdirfidp = get_fid(pdu, olddirfid);
3373     }
3374 
3375     err = v9fs_co_renameat(pdu, &olddirfidp->path, old_name,
3376                            &newdirfidp->path, new_name);
3377     if (err < 0) {
3378         goto out;
3379     }
3380     if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) {
3381         /* Only for path based fid  we need to do the below fixup */
3382         err = v9fs_fix_fid_paths(pdu, &olddirfidp->path, old_name,
3383                                  &newdirfidp->path, new_name);
3384     }
3385 out:
3386     if (olddirfidp) {
3387         put_fid(pdu, olddirfidp);
3388     }
3389     if (newdirfidp) {
3390         put_fid(pdu, newdirfidp);
3391     }
3392     return err;
3393 }
3394 
3395 static void coroutine_fn v9fs_renameat(void *opaque)
3396 {
3397     ssize_t err = 0;
3398     size_t offset = 7;
3399     V9fsPDU *pdu = opaque;
3400     V9fsState *s = pdu->s;
3401     int32_t olddirfid, newdirfid;
3402     V9fsString old_name, new_name;
3403 
3404     v9fs_string_init(&old_name);
3405     v9fs_string_init(&new_name);
3406     err = pdu_unmarshal(pdu, offset, "dsds", &olddirfid,
3407                         &old_name, &newdirfid, &new_name);
3408     if (err < 0) {
3409         goto out_err;
3410     }
3411 
3412     if (name_is_illegal(old_name.data) || name_is_illegal(new_name.data)) {
3413         err = -ENOENT;
3414         goto out_err;
3415     }
3416 
3417     if (!strcmp(".", old_name.data) || !strcmp("..", old_name.data) ||
3418         !strcmp(".", new_name.data) || !strcmp("..", new_name.data)) {
3419         err = -EISDIR;
3420         goto out_err;
3421     }
3422 
3423     v9fs_path_write_lock(s);
3424     err = v9fs_complete_renameat(pdu, olddirfid,
3425                                  &old_name, newdirfid, &new_name);
3426     v9fs_path_unlock(s);
3427     if (!err) {
3428         err = offset;
3429     }
3430 
3431 out_err:
3432     pdu_complete(pdu, err);
3433     v9fs_string_free(&old_name);
3434     v9fs_string_free(&new_name);
3435 }
3436 
3437 static void coroutine_fn v9fs_wstat(void *opaque)
3438 {
3439     int32_t fid;
3440     int err = 0;
3441     int16_t unused;
3442     V9fsStat v9stat;
3443     size_t offset = 7;
3444     struct stat stbuf;
3445     V9fsFidState *fidp;
3446     V9fsPDU *pdu = opaque;
3447     V9fsState *s = pdu->s;
3448 
3449     v9fs_stat_init(&v9stat);
3450     err = pdu_unmarshal(pdu, offset, "dwS", &fid, &unused, &v9stat);
3451     if (err < 0) {
3452         goto out_nofid;
3453     }
3454     trace_v9fs_wstat(pdu->tag, pdu->id, fid,
3455                      v9stat.mode, v9stat.atime, v9stat.mtime);
3456 
3457     fidp = get_fid(pdu, fid);
3458     if (fidp == NULL) {
3459         err = -EINVAL;
3460         goto out_nofid;
3461     }
3462     /* do we need to sync the file? */
3463     if (donttouch_stat(&v9stat)) {
3464         err = v9fs_co_fsync(pdu, fidp, 0);
3465         goto out;
3466     }
3467     if (v9stat.mode != -1) {
3468         uint32_t v9_mode;
3469         err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
3470         if (err < 0) {
3471             goto out;
3472         }
3473         v9_mode = stat_to_v9mode(&stbuf);
3474         if ((v9stat.mode & P9_STAT_MODE_TYPE_BITS) !=
3475             (v9_mode & P9_STAT_MODE_TYPE_BITS)) {
3476             /* Attempting to change the type */
3477             err = -EIO;
3478             goto out;
3479         }
3480         err = v9fs_co_chmod(pdu, &fidp->path,
3481                             v9mode_to_mode(v9stat.mode,
3482                                            &v9stat.extension));
3483         if (err < 0) {
3484             goto out;
3485         }
3486     }
3487     if (v9stat.mtime != -1 || v9stat.atime != -1) {
3488         struct timespec times[2];
3489         if (v9stat.atime != -1) {
3490             times[0].tv_sec = v9stat.atime;
3491             times[0].tv_nsec = 0;
3492         } else {
3493             times[0].tv_nsec = UTIME_OMIT;
3494         }
3495         if (v9stat.mtime != -1) {
3496             times[1].tv_sec = v9stat.mtime;
3497             times[1].tv_nsec = 0;
3498         } else {
3499             times[1].tv_nsec = UTIME_OMIT;
3500         }
3501         err = v9fs_co_utimensat(pdu, &fidp->path, times);
3502         if (err < 0) {
3503             goto out;
3504         }
3505     }
3506     if (v9stat.n_gid != -1 || v9stat.n_uid != -1) {
3507         err = v9fs_co_chown(pdu, &fidp->path, v9stat.n_uid, v9stat.n_gid);
3508         if (err < 0) {
3509             goto out;
3510         }
3511     }
3512     if (v9stat.name.size != 0) {
3513         v9fs_path_write_lock(s);
3514         err = v9fs_complete_rename(pdu, fidp, -1, &v9stat.name);
3515         v9fs_path_unlock(s);
3516         if (err < 0) {
3517             goto out;
3518         }
3519     }
3520     if (v9stat.length != -1) {
3521         err = v9fs_co_truncate(pdu, &fidp->path, v9stat.length);
3522         if (err < 0) {
3523             goto out;
3524         }
3525     }
3526     err = offset;
3527 out:
3528     put_fid(pdu, fidp);
3529 out_nofid:
3530     v9fs_stat_free(&v9stat);
3531     pdu_complete(pdu, err);
3532 }
3533 
3534 static int v9fs_fill_statfs(V9fsState *s, V9fsPDU *pdu, struct statfs *stbuf)
3535 {
3536     uint32_t f_type;
3537     uint32_t f_bsize;
3538     uint64_t f_blocks;
3539     uint64_t f_bfree;
3540     uint64_t f_bavail;
3541     uint64_t f_files;
3542     uint64_t f_ffree;
3543     uint64_t fsid_val;
3544     uint32_t f_namelen;
3545     size_t offset = 7;
3546     int32_t bsize_factor;
3547 
3548     /*
3549      * compute bsize factor based on host file system block size
3550      * and client msize
3551      */
3552     bsize_factor = (s->msize - P9_IOHDRSZ) / stbuf->f_bsize;
3553     if (!bsize_factor) {
3554         bsize_factor = 1;
3555     }
3556     f_type  = stbuf->f_type;
3557     f_bsize = stbuf->f_bsize;
3558     f_bsize *= bsize_factor;
3559     /*
3560      * f_bsize is adjusted(multiplied) by bsize factor, so we need to
3561      * adjust(divide) the number of blocks, free blocks and available
3562      * blocks by bsize factor
3563      */
3564     f_blocks = stbuf->f_blocks / bsize_factor;
3565     f_bfree  = stbuf->f_bfree / bsize_factor;
3566     f_bavail = stbuf->f_bavail / bsize_factor;
3567     f_files  = stbuf->f_files;
3568     f_ffree  = stbuf->f_ffree;
3569 #ifdef CONFIG_DARWIN
3570     fsid_val = (unsigned int)stbuf->f_fsid.val[0] |
3571                (unsigned long long)stbuf->f_fsid.val[1] << 32;
3572     f_namelen = NAME_MAX;
3573 #else
3574     fsid_val = (unsigned int) stbuf->f_fsid.__val[0] |
3575                (unsigned long long)stbuf->f_fsid.__val[1] << 32;
3576     f_namelen = stbuf->f_namelen;
3577 #endif
3578 
3579     return pdu_marshal(pdu, offset, "ddqqqqqqd",
3580                        f_type, f_bsize, f_blocks, f_bfree,
3581                        f_bavail, f_files, f_ffree,
3582                        fsid_val, f_namelen);
3583 }
3584 
3585 static void coroutine_fn v9fs_statfs(void *opaque)
3586 {
3587     int32_t fid;
3588     ssize_t retval = 0;
3589     size_t offset = 7;
3590     V9fsFidState *fidp;
3591     struct statfs stbuf;
3592     V9fsPDU *pdu = opaque;
3593     V9fsState *s = pdu->s;
3594 
3595     retval = pdu_unmarshal(pdu, offset, "d", &fid);
3596     if (retval < 0) {
3597         goto out_nofid;
3598     }
3599     fidp = get_fid(pdu, fid);
3600     if (fidp == NULL) {
3601         retval = -ENOENT;
3602         goto out_nofid;
3603     }
3604     retval = v9fs_co_statfs(pdu, &fidp->path, &stbuf);
3605     if (retval < 0) {
3606         goto out;
3607     }
3608     retval = v9fs_fill_statfs(s, pdu, &stbuf);
3609     if (retval < 0) {
3610         goto out;
3611     }
3612     retval += offset;
3613 out:
3614     put_fid(pdu, fidp);
3615 out_nofid:
3616     pdu_complete(pdu, retval);
3617 }
3618 
3619 static void coroutine_fn v9fs_mknod(void *opaque)
3620 {
3621 
3622     int mode;
3623     gid_t gid;
3624     int32_t fid;
3625     V9fsQID qid;
3626     int err = 0;
3627     int major, minor;
3628     size_t offset = 7;
3629     V9fsString name;
3630     struct stat stbuf;
3631     V9fsFidState *fidp;
3632     V9fsPDU *pdu = opaque;
3633 
3634     v9fs_string_init(&name);
3635     err = pdu_unmarshal(pdu, offset, "dsdddd", &fid, &name, &mode,
3636                         &major, &minor, &gid);
3637     if (err < 0) {
3638         goto out_nofid;
3639     }
3640     trace_v9fs_mknod(pdu->tag, pdu->id, fid, mode, major, minor);
3641 
3642     if (name_is_illegal(name.data)) {
3643         err = -ENOENT;
3644         goto out_nofid;
3645     }
3646 
3647     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3648         err = -EEXIST;
3649         goto out_nofid;
3650     }
3651 
3652     fidp = get_fid(pdu, fid);
3653     if (fidp == NULL) {
3654         err = -ENOENT;
3655         goto out_nofid;
3656     }
3657     err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, gid,
3658                         makedev(major, minor), mode, &stbuf);
3659     if (err < 0) {
3660         goto out;
3661     }
3662     err = stat_to_qid(pdu, &stbuf, &qid);
3663     if (err < 0) {
3664         goto out;
3665     }
3666     err = pdu_marshal(pdu, offset, "Q", &qid);
3667     if (err < 0) {
3668         goto out;
3669     }
3670     err += offset;
3671     trace_v9fs_mknod_return(pdu->tag, pdu->id,
3672                             qid.type, qid.version, qid.path);
3673 out:
3674     put_fid(pdu, fidp);
3675 out_nofid:
3676     pdu_complete(pdu, err);
3677     v9fs_string_free(&name);
3678 }
3679 
3680 /*
3681  * Implement posix byte range locking code
3682  * Server side handling of locking code is very simple, because 9p server in
3683  * QEMU can handle only one client. And most of the lock handling
3684  * (like conflict, merging) etc is done by the VFS layer itself, so no need to
3685  * do any thing in * qemu 9p server side lock code path.
3686  * So when a TLOCK request comes, always return success
3687  */
3688 static void coroutine_fn v9fs_lock(void *opaque)
3689 {
3690     V9fsFlock flock;
3691     size_t offset = 7;
3692     struct stat stbuf;
3693     V9fsFidState *fidp;
3694     int32_t fid, err = 0;
3695     V9fsPDU *pdu = opaque;
3696 
3697     v9fs_string_init(&flock.client_id);
3698     err = pdu_unmarshal(pdu, offset, "dbdqqds", &fid, &flock.type,
3699                         &flock.flags, &flock.start, &flock.length,
3700                         &flock.proc_id, &flock.client_id);
3701     if (err < 0) {
3702         goto out_nofid;
3703     }
3704     trace_v9fs_lock(pdu->tag, pdu->id, fid,
3705                     flock.type, flock.start, flock.length);
3706 
3707 
3708     /* We support only block flag now (that too ignored currently) */
3709     if (flock.flags & ~P9_LOCK_FLAGS_BLOCK) {
3710         err = -EINVAL;
3711         goto out_nofid;
3712     }
3713     fidp = get_fid(pdu, fid);
3714     if (fidp == NULL) {
3715         err = -ENOENT;
3716         goto out_nofid;
3717     }
3718     err = v9fs_co_fstat(pdu, fidp, &stbuf);
3719     if (err < 0) {
3720         goto out;
3721     }
3722     err = pdu_marshal(pdu, offset, "b", P9_LOCK_SUCCESS);
3723     if (err < 0) {
3724         goto out;
3725     }
3726     err += offset;
3727     trace_v9fs_lock_return(pdu->tag, pdu->id, P9_LOCK_SUCCESS);
3728 out:
3729     put_fid(pdu, fidp);
3730 out_nofid:
3731     pdu_complete(pdu, err);
3732     v9fs_string_free(&flock.client_id);
3733 }
3734 
3735 /*
3736  * When a TGETLOCK request comes, always return success because all lock
3737  * handling is done by client's VFS layer.
3738  */
3739 static void coroutine_fn v9fs_getlock(void *opaque)
3740 {
3741     size_t offset = 7;
3742     struct stat stbuf;
3743     V9fsFidState *fidp;
3744     V9fsGetlock glock;
3745     int32_t fid, err = 0;
3746     V9fsPDU *pdu = opaque;
3747 
3748     v9fs_string_init(&glock.client_id);
3749     err = pdu_unmarshal(pdu, offset, "dbqqds", &fid, &glock.type,
3750                         &glock.start, &glock.length, &glock.proc_id,
3751                         &glock.client_id);
3752     if (err < 0) {
3753         goto out_nofid;
3754     }
3755     trace_v9fs_getlock(pdu->tag, pdu->id, fid,
3756                        glock.type, glock.start, glock.length);
3757 
3758     fidp = get_fid(pdu, fid);
3759     if (fidp == NULL) {
3760         err = -ENOENT;
3761         goto out_nofid;
3762     }
3763     err = v9fs_co_fstat(pdu, fidp, &stbuf);
3764     if (err < 0) {
3765         goto out;
3766     }
3767     glock.type = P9_LOCK_TYPE_UNLCK;
3768     err = pdu_marshal(pdu, offset, "bqqds", glock.type,
3769                           glock.start, glock.length, glock.proc_id,
3770                           &glock.client_id);
3771     if (err < 0) {
3772         goto out;
3773     }
3774     err += offset;
3775     trace_v9fs_getlock_return(pdu->tag, pdu->id, glock.type, glock.start,
3776                               glock.length, glock.proc_id);
3777 out:
3778     put_fid(pdu, fidp);
3779 out_nofid:
3780     pdu_complete(pdu, err);
3781     v9fs_string_free(&glock.client_id);
3782 }
3783 
3784 static void coroutine_fn v9fs_mkdir(void *opaque)
3785 {
3786     V9fsPDU *pdu = opaque;
3787     size_t offset = 7;
3788     int32_t fid;
3789     struct stat stbuf;
3790     V9fsQID qid;
3791     V9fsString name;
3792     V9fsFidState *fidp;
3793     gid_t gid;
3794     int mode;
3795     int err = 0;
3796 
3797     v9fs_string_init(&name);
3798     err = pdu_unmarshal(pdu, offset, "dsdd", &fid, &name, &mode, &gid);
3799     if (err < 0) {
3800         goto out_nofid;
3801     }
3802     trace_v9fs_mkdir(pdu->tag, pdu->id, fid, name.data, mode, gid);
3803 
3804     if (name_is_illegal(name.data)) {
3805         err = -ENOENT;
3806         goto out_nofid;
3807     }
3808 
3809     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3810         err = -EEXIST;
3811         goto out_nofid;
3812     }
3813 
3814     fidp = get_fid(pdu, fid);
3815     if (fidp == NULL) {
3816         err = -ENOENT;
3817         goto out_nofid;
3818     }
3819     err = v9fs_co_mkdir(pdu, fidp, &name, mode, fidp->uid, gid, &stbuf);
3820     if (err < 0) {
3821         goto out;
3822     }
3823     err = stat_to_qid(pdu, &stbuf, &qid);
3824     if (err < 0) {
3825         goto out;
3826     }
3827     err = pdu_marshal(pdu, offset, "Q", &qid);
3828     if (err < 0) {
3829         goto out;
3830     }
3831     err += offset;
3832     trace_v9fs_mkdir_return(pdu->tag, pdu->id,
3833                             qid.type, qid.version, qid.path, err);
3834 out:
3835     put_fid(pdu, fidp);
3836 out_nofid:
3837     pdu_complete(pdu, err);
3838     v9fs_string_free(&name);
3839 }
3840 
3841 static void coroutine_fn v9fs_xattrwalk(void *opaque)
3842 {
3843     int64_t size;
3844     V9fsString name;
3845     ssize_t err = 0;
3846     size_t offset = 7;
3847     int32_t fid, newfid;
3848     V9fsFidState *file_fidp;
3849     V9fsFidState *xattr_fidp = NULL;
3850     V9fsPDU *pdu = opaque;
3851     V9fsState *s = pdu->s;
3852 
3853     v9fs_string_init(&name);
3854     err = pdu_unmarshal(pdu, offset, "dds", &fid, &newfid, &name);
3855     if (err < 0) {
3856         goto out_nofid;
3857     }
3858     trace_v9fs_xattrwalk(pdu->tag, pdu->id, fid, newfid, name.data);
3859 
3860     file_fidp = get_fid(pdu, fid);
3861     if (file_fidp == NULL) {
3862         err = -ENOENT;
3863         goto out_nofid;
3864     }
3865     xattr_fidp = alloc_fid(s, newfid);
3866     if (xattr_fidp == NULL) {
3867         err = -EINVAL;
3868         goto out;
3869     }
3870     v9fs_path_copy(&xattr_fidp->path, &file_fidp->path);
3871     if (!v9fs_string_size(&name)) {
3872         /*
3873          * listxattr request. Get the size first
3874          */
3875         size = v9fs_co_llistxattr(pdu, &xattr_fidp->path, NULL, 0);
3876         if (size < 0) {
3877             err = size;
3878             clunk_fid(s, xattr_fidp->fid);
3879             goto out;
3880         }
3881         /*
3882          * Read the xattr value
3883          */
3884         xattr_fidp->fs.xattr.len = size;
3885         xattr_fidp->fid_type = P9_FID_XATTR;
3886         xattr_fidp->fs.xattr.xattrwalk_fid = true;
3887         xattr_fidp->fs.xattr.value = g_malloc0(size);
3888         if (size) {
3889             err = v9fs_co_llistxattr(pdu, &xattr_fidp->path,
3890                                      xattr_fidp->fs.xattr.value,
3891                                      xattr_fidp->fs.xattr.len);
3892             if (err < 0) {
3893                 clunk_fid(s, xattr_fidp->fid);
3894                 goto out;
3895             }
3896         }
3897         err = pdu_marshal(pdu, offset, "q", size);
3898         if (err < 0) {
3899             goto out;
3900         }
3901         err += offset;
3902     } else {
3903         /*
3904          * specific xattr fid. We check for xattr
3905          * presence also collect the xattr size
3906          */
3907         size = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
3908                                  &name, NULL, 0);
3909         if (size < 0) {
3910             err = size;
3911             clunk_fid(s, xattr_fidp->fid);
3912             goto out;
3913         }
3914         /*
3915          * Read the xattr value
3916          */
3917         xattr_fidp->fs.xattr.len = size;
3918         xattr_fidp->fid_type = P9_FID_XATTR;
3919         xattr_fidp->fs.xattr.xattrwalk_fid = true;
3920         xattr_fidp->fs.xattr.value = g_malloc0(size);
3921         if (size) {
3922             err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
3923                                     &name, xattr_fidp->fs.xattr.value,
3924                                     xattr_fidp->fs.xattr.len);
3925             if (err < 0) {
3926                 clunk_fid(s, xattr_fidp->fid);
3927                 goto out;
3928             }
3929         }
3930         err = pdu_marshal(pdu, offset, "q", size);
3931         if (err < 0) {
3932             goto out;
3933         }
3934         err += offset;
3935     }
3936     trace_v9fs_xattrwalk_return(pdu->tag, pdu->id, size);
3937 out:
3938     put_fid(pdu, file_fidp);
3939     if (xattr_fidp) {
3940         put_fid(pdu, xattr_fidp);
3941     }
3942 out_nofid:
3943     pdu_complete(pdu, err);
3944     v9fs_string_free(&name);
3945 }
3946 
3947 #if defined(CONFIG_LINUX)
3948 /* Currently, only Linux has XATTR_SIZE_MAX */
3949 #define P9_XATTR_SIZE_MAX XATTR_SIZE_MAX
3950 #elif defined(CONFIG_DARWIN)
3951 /*
3952  * Darwin doesn't seem to define a maximum xattr size in its user
3953  * space header, so manually configure it across platforms as 64k.
3954  *
3955  * Having no limit at all can lead to QEMU crashing during large g_malloc()
3956  * calls. Because QEMU does not currently support macOS guests, the below
3957  * preliminary solution only works due to its being a reflection of the limit of
3958  * Linux guests.
3959  */
3960 #define P9_XATTR_SIZE_MAX 65536
3961 #else
3962 #error Missing definition for P9_XATTR_SIZE_MAX for this host system
3963 #endif
3964 
3965 static void coroutine_fn v9fs_xattrcreate(void *opaque)
3966 {
3967     int flags, rflags = 0;
3968     int32_t fid;
3969     uint64_t size;
3970     ssize_t err = 0;
3971     V9fsString name;
3972     size_t offset = 7;
3973     V9fsFidState *file_fidp;
3974     V9fsFidState *xattr_fidp;
3975     V9fsPDU *pdu = opaque;
3976 
3977     v9fs_string_init(&name);
3978     err = pdu_unmarshal(pdu, offset, "dsqd", &fid, &name, &size, &flags);
3979     if (err < 0) {
3980         goto out_nofid;
3981     }
3982     trace_v9fs_xattrcreate(pdu->tag, pdu->id, fid, name.data, size, flags);
3983 
3984     if (flags & ~(P9_XATTR_CREATE | P9_XATTR_REPLACE)) {
3985         err = -EINVAL;
3986         goto out_nofid;
3987     }
3988 
3989     if (flags & P9_XATTR_CREATE) {
3990         rflags |= XATTR_CREATE;
3991     }
3992 
3993     if (flags & P9_XATTR_REPLACE) {
3994         rflags |= XATTR_REPLACE;
3995     }
3996 
3997     if (size > P9_XATTR_SIZE_MAX) {
3998         err = -E2BIG;
3999         goto out_nofid;
4000     }
4001 
4002     file_fidp = get_fid(pdu, fid);
4003     if (file_fidp == NULL) {
4004         err = -EINVAL;
4005         goto out_nofid;
4006     }
4007     if (file_fidp->fid_type != P9_FID_NONE) {
4008         err = -EINVAL;
4009         goto out_put_fid;
4010     }
4011 
4012     /* Make the file fid point to xattr */
4013     xattr_fidp = file_fidp;
4014     xattr_fidp->fid_type = P9_FID_XATTR;
4015     xattr_fidp->fs.xattr.copied_len = 0;
4016     xattr_fidp->fs.xattr.xattrwalk_fid = false;
4017     xattr_fidp->fs.xattr.len = size;
4018     xattr_fidp->fs.xattr.flags = rflags;
4019     v9fs_string_init(&xattr_fidp->fs.xattr.name);
4020     v9fs_string_copy(&xattr_fidp->fs.xattr.name, &name);
4021     xattr_fidp->fs.xattr.value = g_malloc0(size);
4022     err = offset;
4023 out_put_fid:
4024     put_fid(pdu, file_fidp);
4025 out_nofid:
4026     pdu_complete(pdu, err);
4027     v9fs_string_free(&name);
4028 }
4029 
4030 static void coroutine_fn v9fs_readlink(void *opaque)
4031 {
4032     V9fsPDU *pdu = opaque;
4033     size_t offset = 7;
4034     V9fsString target;
4035     int32_t fid;
4036     int err = 0;
4037     V9fsFidState *fidp;
4038 
4039     err = pdu_unmarshal(pdu, offset, "d", &fid);
4040     if (err < 0) {
4041         goto out_nofid;
4042     }
4043     trace_v9fs_readlink(pdu->tag, pdu->id, fid);
4044     fidp = get_fid(pdu, fid);
4045     if (fidp == NULL) {
4046         err = -ENOENT;
4047         goto out_nofid;
4048     }
4049 
4050     v9fs_string_init(&target);
4051     err = v9fs_co_readlink(pdu, &fidp->path, &target);
4052     if (err < 0) {
4053         goto out;
4054     }
4055     err = pdu_marshal(pdu, offset, "s", &target);
4056     if (err < 0) {
4057         v9fs_string_free(&target);
4058         goto out;
4059     }
4060     err += offset;
4061     trace_v9fs_readlink_return(pdu->tag, pdu->id, target.data);
4062     v9fs_string_free(&target);
4063 out:
4064     put_fid(pdu, fidp);
4065 out_nofid:
4066     pdu_complete(pdu, err);
4067 }
4068 
4069 static CoroutineEntry *pdu_co_handlers[] = {
4070     [P9_TREADDIR] = v9fs_readdir,
4071     [P9_TSTATFS] = v9fs_statfs,
4072     [P9_TGETATTR] = v9fs_getattr,
4073     [P9_TSETATTR] = v9fs_setattr,
4074     [P9_TXATTRWALK] = v9fs_xattrwalk,
4075     [P9_TXATTRCREATE] = v9fs_xattrcreate,
4076     [P9_TMKNOD] = v9fs_mknod,
4077     [P9_TRENAME] = v9fs_rename,
4078     [P9_TLOCK] = v9fs_lock,
4079     [P9_TGETLOCK] = v9fs_getlock,
4080     [P9_TRENAMEAT] = v9fs_renameat,
4081     [P9_TREADLINK] = v9fs_readlink,
4082     [P9_TUNLINKAT] = v9fs_unlinkat,
4083     [P9_TMKDIR] = v9fs_mkdir,
4084     [P9_TVERSION] = v9fs_version,
4085     [P9_TLOPEN] = v9fs_open,
4086     [P9_TATTACH] = v9fs_attach,
4087     [P9_TSTAT] = v9fs_stat,
4088     [P9_TWALK] = v9fs_walk,
4089     [P9_TCLUNK] = v9fs_clunk,
4090     [P9_TFSYNC] = v9fs_fsync,
4091     [P9_TOPEN] = v9fs_open,
4092     [P9_TREAD] = v9fs_read,
4093 #if 0
4094     [P9_TAUTH] = v9fs_auth,
4095 #endif
4096     [P9_TFLUSH] = v9fs_flush,
4097     [P9_TLINK] = v9fs_link,
4098     [P9_TSYMLINK] = v9fs_symlink,
4099     [P9_TCREATE] = v9fs_create,
4100     [P9_TLCREATE] = v9fs_lcreate,
4101     [P9_TWRITE] = v9fs_write,
4102     [P9_TWSTAT] = v9fs_wstat,
4103     [P9_TREMOVE] = v9fs_remove,
4104 };
4105 
4106 static void coroutine_fn v9fs_op_not_supp(void *opaque)
4107 {
4108     V9fsPDU *pdu = opaque;
4109     pdu_complete(pdu, -EOPNOTSUPP);
4110 }
4111 
4112 static void coroutine_fn v9fs_fs_ro(void *opaque)
4113 {
4114     V9fsPDU *pdu = opaque;
4115     pdu_complete(pdu, -EROFS);
4116 }
4117 
4118 static inline bool is_read_only_op(V9fsPDU *pdu)
4119 {
4120     switch (pdu->id) {
4121     case P9_TREADDIR:
4122     case P9_TSTATFS:
4123     case P9_TGETATTR:
4124     case P9_TXATTRWALK:
4125     case P9_TLOCK:
4126     case P9_TGETLOCK:
4127     case P9_TREADLINK:
4128     case P9_TVERSION:
4129     case P9_TLOPEN:
4130     case P9_TATTACH:
4131     case P9_TSTAT:
4132     case P9_TWALK:
4133     case P9_TCLUNK:
4134     case P9_TFSYNC:
4135     case P9_TOPEN:
4136     case P9_TREAD:
4137     case P9_TAUTH:
4138     case P9_TFLUSH:
4139         return 1;
4140     default:
4141         return 0;
4142     }
4143 }
4144 
4145 void pdu_submit(V9fsPDU *pdu, P9MsgHeader *hdr)
4146 {
4147     Coroutine *co;
4148     CoroutineEntry *handler;
4149     V9fsState *s = pdu->s;
4150 
4151     pdu->size = le32_to_cpu(hdr->size_le);
4152     pdu->id = hdr->id;
4153     pdu->tag = le16_to_cpu(hdr->tag_le);
4154 
4155     if (pdu->id >= ARRAY_SIZE(pdu_co_handlers) ||
4156         (pdu_co_handlers[pdu->id] == NULL)) {
4157         handler = v9fs_op_not_supp;
4158     } else if (is_ro_export(&s->ctx) && !is_read_only_op(pdu)) {
4159         handler = v9fs_fs_ro;
4160     } else {
4161         handler = pdu_co_handlers[pdu->id];
4162     }
4163 
4164     qemu_co_queue_init(&pdu->complete);
4165     co = qemu_coroutine_create(handler, pdu);
4166     qemu_coroutine_enter(co);
4167 }
4168 
4169 /* Returns 0 on success, 1 on failure. */
4170 int v9fs_device_realize_common(V9fsState *s, const V9fsTransport *t,
4171                                Error **errp)
4172 {
4173     ERRP_GUARD();
4174     int i, len;
4175     struct stat stat;
4176     FsDriverEntry *fse;
4177     V9fsPath path;
4178     int rc = 1;
4179 
4180     assert(!s->transport);
4181     s->transport = t;
4182 
4183     /* initialize pdu allocator */
4184     QLIST_INIT(&s->free_list);
4185     QLIST_INIT(&s->active_list);
4186     for (i = 0; i < MAX_REQ; i++) {
4187         QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next);
4188         s->pdus[i].s = s;
4189         s->pdus[i].idx = i;
4190     }
4191 
4192     v9fs_path_init(&path);
4193 
4194     fse = get_fsdev_fsentry(s->fsconf.fsdev_id);
4195 
4196     if (!fse) {
4197         /* We don't have a fsdev identified by fsdev_id */
4198         error_setg(errp, "9pfs device couldn't find fsdev with the "
4199                    "id = %s",
4200                    s->fsconf.fsdev_id ? s->fsconf.fsdev_id : "NULL");
4201         goto out;
4202     }
4203 
4204     if (!s->fsconf.tag) {
4205         /* we haven't specified a mount_tag */
4206         error_setg(errp, "fsdev with id %s needs mount_tag arguments",
4207                    s->fsconf.fsdev_id);
4208         goto out;
4209     }
4210 
4211     s->ctx.export_flags = fse->export_flags;
4212     s->ctx.fs_root = g_strdup(fse->path);
4213     s->ctx.exops.get_st_gen = NULL;
4214     len = strlen(s->fsconf.tag);
4215     if (len > MAX_TAG_LEN - 1) {
4216         error_setg(errp, "mount tag '%s' (%d bytes) is longer than "
4217                    "maximum (%d bytes)", s->fsconf.tag, len, MAX_TAG_LEN - 1);
4218         goto out;
4219     }
4220 
4221     s->tag = g_strdup(s->fsconf.tag);
4222     s->ctx.uid = -1;
4223 
4224     s->ops = fse->ops;
4225 
4226     s->ctx.fmode = fse->fmode;
4227     s->ctx.dmode = fse->dmode;
4228 
4229     QSIMPLEQ_INIT(&s->fid_list);
4230     qemu_co_rwlock_init(&s->rename_lock);
4231 
4232     if (s->ops->init(&s->ctx, errp) < 0) {
4233         error_prepend(errp, "cannot initialize fsdev '%s': ",
4234                       s->fsconf.fsdev_id);
4235         goto out;
4236     }
4237 
4238     /*
4239      * Check details of export path, We need to use fs driver
4240      * call back to do that. Since we are in the init path, we don't
4241      * use co-routines here.
4242      */
4243     if (s->ops->name_to_path(&s->ctx, NULL, "/", &path) < 0) {
4244         error_setg(errp,
4245                    "error in converting name to path %s", strerror(errno));
4246         goto out;
4247     }
4248     if (s->ops->lstat(&s->ctx, &path, &stat)) {
4249         error_setg(errp, "share path %s does not exist", fse->path);
4250         goto out;
4251     } else if (!S_ISDIR(stat.st_mode)) {
4252         error_setg(errp, "share path %s is not a directory", fse->path);
4253         goto out;
4254     }
4255 
4256     s->dev_id = stat.st_dev;
4257 
4258     /* init inode remapping : */
4259     /* hash table for variable length inode suffixes */
4260     qpd_table_init(&s->qpd_table);
4261     /* hash table for slow/full inode remapping (most users won't need it) */
4262     qpf_table_init(&s->qpf_table);
4263     /* hash table for quick inode remapping */
4264     qpp_table_init(&s->qpp_table);
4265     s->qp_ndevices = 0;
4266     s->qp_affix_next = 1; /* reserve 0 to detect overflow */
4267     s->qp_fullpath_next = 1;
4268 
4269     s->ctx.fst = &fse->fst;
4270     fsdev_throttle_init(s->ctx.fst);
4271 
4272     rc = 0;
4273 out:
4274     if (rc) {
4275         v9fs_device_unrealize_common(s);
4276     }
4277     v9fs_path_free(&path);
4278     return rc;
4279 }
4280 
4281 void v9fs_device_unrealize_common(V9fsState *s)
4282 {
4283     if (s->ops && s->ops->cleanup) {
4284         s->ops->cleanup(&s->ctx);
4285     }
4286     if (s->ctx.fst) {
4287         fsdev_throttle_cleanup(s->ctx.fst);
4288     }
4289     g_free(s->tag);
4290     qp_table_destroy(&s->qpd_table);
4291     qp_table_destroy(&s->qpp_table);
4292     qp_table_destroy(&s->qpf_table);
4293     g_free(s->ctx.fs_root);
4294 }
4295 
4296 typedef struct VirtfsCoResetData {
4297     V9fsPDU pdu;
4298     bool done;
4299 } VirtfsCoResetData;
4300 
4301 static void coroutine_fn virtfs_co_reset(void *opaque)
4302 {
4303     VirtfsCoResetData *data = opaque;
4304 
4305     virtfs_reset(&data->pdu);
4306     data->done = true;
4307 }
4308 
4309 void v9fs_reset(V9fsState *s)
4310 {
4311     VirtfsCoResetData data = { .pdu = { .s = s }, .done = false };
4312     Coroutine *co;
4313 
4314     while (!QLIST_EMPTY(&s->active_list)) {
4315         aio_poll(qemu_get_aio_context(), true);
4316     }
4317 
4318     co = qemu_coroutine_create(virtfs_co_reset, &data);
4319     qemu_coroutine_enter(co);
4320 
4321     while (!data.done) {
4322         aio_poll(qemu_get_aio_context(), true);
4323     }
4324 }
4325 
4326 static void __attribute__((__constructor__)) v9fs_set_fd_limit(void)
4327 {
4328     struct rlimit rlim;
4329     if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) {
4330         error_report("Failed to get the resource limit");
4331         exit(1);
4332     }
4333     open_fd_hw = rlim.rlim_cur - MIN(400, rlim.rlim_cur / 3);
4334     open_fd_rc = rlim.rlim_cur / 2;
4335 }
4336