xref: /openbmc/qemu/hw/9pfs/9p.c (revision 8c6631e6)
1 /*
2  * Virtio 9p backend
3  *
4  * Copyright IBM, Corp. 2010
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * Not so fast! You might want to read the 9p developer docs first:
16  * https://wiki.qemu.org/Documentation/9p
17  */
18 
19 #include "qemu/osdep.h"
20 #ifdef CONFIG_LINUX
21 #include <linux/limits.h>
22 #endif
23 #include <glib/gprintf.h>
24 #include "hw/virtio/virtio.h"
25 #include "qapi/error.h"
26 #include "qemu/error-report.h"
27 #include "qemu/iov.h"
28 #include "qemu/main-loop.h"
29 #include "qemu/sockets.h"
30 #include "virtio-9p.h"
31 #include "fsdev/qemu-fsdev.h"
32 #include "9p-xattr.h"
33 #include "9p-util.h"
34 #include "coth.h"
35 #include "trace.h"
36 #include "migration/blocker.h"
37 #include "qemu/xxhash.h"
38 #include <math.h>
39 
40 int open_fd_hw;
41 int total_open_fd;
42 static int open_fd_rc;
43 
44 enum {
45     Oread   = 0x00,
46     Owrite  = 0x01,
47     Ordwr   = 0x02,
48     Oexec   = 0x03,
49     Oexcl   = 0x04,
50     Otrunc  = 0x10,
51     Orexec  = 0x20,
52     Orclose = 0x40,
53     Oappend = 0x80,
54 };
55 
56 P9ARRAY_DEFINE_TYPE(V9fsPath, v9fs_path_free);
57 
58 static ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
59 {
60     ssize_t ret;
61     va_list ap;
62 
63     va_start(ap, fmt);
64     ret = pdu->s->transport->pdu_vmarshal(pdu, offset, fmt, ap);
65     va_end(ap);
66 
67     return ret;
68 }
69 
70 static ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
71 {
72     ssize_t ret;
73     va_list ap;
74 
75     va_start(ap, fmt);
76     ret = pdu->s->transport->pdu_vunmarshal(pdu, offset, fmt, ap);
77     va_end(ap);
78 
79     return ret;
80 }
81 
82 static int omode_to_uflags(int8_t mode)
83 {
84     int ret = 0;
85 
86     switch (mode & 3) {
87     case Oread:
88         ret = O_RDONLY;
89         break;
90     case Ordwr:
91         ret = O_RDWR;
92         break;
93     case Owrite:
94         ret = O_WRONLY;
95         break;
96     case Oexec:
97         ret = O_RDONLY;
98         break;
99     }
100 
101     if (mode & Otrunc) {
102         ret |= O_TRUNC;
103     }
104 
105     if (mode & Oappend) {
106         ret |= O_APPEND;
107     }
108 
109     if (mode & Oexcl) {
110         ret |= O_EXCL;
111     }
112 
113     return ret;
114 }
115 
116 typedef struct DotlOpenflagMap {
117     int dotl_flag;
118     int open_flag;
119 } DotlOpenflagMap;
120 
121 static int dotl_to_open_flags(int flags)
122 {
123     int i;
124     /*
125      * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
126      * and P9_DOTL_NOACCESS
127      */
128     int oflags = flags & O_ACCMODE;
129 
130     DotlOpenflagMap dotl_oflag_map[] = {
131         { P9_DOTL_CREATE, O_CREAT },
132         { P9_DOTL_EXCL, O_EXCL },
133         { P9_DOTL_NOCTTY , O_NOCTTY },
134         { P9_DOTL_TRUNC, O_TRUNC },
135         { P9_DOTL_APPEND, O_APPEND },
136         { P9_DOTL_NONBLOCK, O_NONBLOCK } ,
137         { P9_DOTL_DSYNC, O_DSYNC },
138         { P9_DOTL_FASYNC, FASYNC },
139 #ifndef CONFIG_DARWIN
140         { P9_DOTL_NOATIME, O_NOATIME },
141         /*
142          *  On Darwin, we could map to F_NOCACHE, which is
143          *  similar, but doesn't quite have the same
144          *  semantics. However, we don't support O_DIRECT
145          *  even on linux at the moment, so we just ignore
146          *  it here.
147          */
148         { P9_DOTL_DIRECT, O_DIRECT },
149 #endif
150         { P9_DOTL_LARGEFILE, O_LARGEFILE },
151         { P9_DOTL_DIRECTORY, O_DIRECTORY },
152         { P9_DOTL_NOFOLLOW, O_NOFOLLOW },
153         { P9_DOTL_SYNC, O_SYNC },
154     };
155 
156     for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
157         if (flags & dotl_oflag_map[i].dotl_flag) {
158             oflags |= dotl_oflag_map[i].open_flag;
159         }
160     }
161 
162     return oflags;
163 }
164 
165 void cred_init(FsCred *credp)
166 {
167     credp->fc_uid = -1;
168     credp->fc_gid = -1;
169     credp->fc_mode = -1;
170     credp->fc_rdev = -1;
171 }
172 
173 static int get_dotl_openflags(V9fsState *s, int oflags)
174 {
175     int flags;
176     /*
177      * Filter the client open flags
178      */
179     flags = dotl_to_open_flags(oflags);
180     flags &= ~(O_NOCTTY | O_ASYNC | O_CREAT);
181 #ifndef CONFIG_DARWIN
182     /*
183      * Ignore direct disk access hint until the server supports it.
184      */
185     flags &= ~O_DIRECT;
186 #endif
187     return flags;
188 }
189 
190 void v9fs_path_init(V9fsPath *path)
191 {
192     path->data = NULL;
193     path->size = 0;
194 }
195 
196 void v9fs_path_free(V9fsPath *path)
197 {
198     g_free(path->data);
199     path->data = NULL;
200     path->size = 0;
201 }
202 
203 
204 void G_GNUC_PRINTF(2, 3)
205 v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...)
206 {
207     va_list ap;
208 
209     v9fs_path_free(path);
210 
211     va_start(ap, fmt);
212     /* Bump the size for including terminating NULL */
213     path->size = g_vasprintf(&path->data, fmt, ap) + 1;
214     va_end(ap);
215 }
216 
217 void v9fs_path_copy(V9fsPath *dst, const V9fsPath *src)
218 {
219     v9fs_path_free(dst);
220     dst->size = src->size;
221     dst->data = g_memdup(src->data, src->size);
222 }
223 
224 int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath,
225                       const char *name, V9fsPath *path)
226 {
227     int err;
228     err = s->ops->name_to_path(&s->ctx, dirpath, name, path);
229     if (err < 0) {
230         err = -errno;
231     }
232     return err;
233 }
234 
235 /*
236  * Return TRUE if s1 is an ancestor of s2.
237  *
238  * E.g. "a/b" is an ancestor of "a/b/c" but not of "a/bc/d".
239  * As a special case, We treat s1 as ancestor of s2 if they are same!
240  */
241 static int v9fs_path_is_ancestor(V9fsPath *s1, V9fsPath *s2)
242 {
243     if (!strncmp(s1->data, s2->data, s1->size - 1)) {
244         if (s2->data[s1->size - 1] == '\0' || s2->data[s1->size - 1] == '/') {
245             return 1;
246         }
247     }
248     return 0;
249 }
250 
251 static size_t v9fs_string_size(V9fsString *str)
252 {
253     return str->size;
254 }
255 
256 /*
257  * returns 0 if fid got re-opened, 1 if not, < 0 on error
258  */
259 static int coroutine_fn v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f)
260 {
261     int err = 1;
262     if (f->fid_type == P9_FID_FILE) {
263         if (f->fs.fd == -1) {
264             do {
265                 err = v9fs_co_open(pdu, f, f->open_flags);
266             } while (err == -EINTR && !pdu->cancelled);
267         }
268     } else if (f->fid_type == P9_FID_DIR) {
269         if (f->fs.dir.stream == NULL) {
270             do {
271                 err = v9fs_co_opendir(pdu, f);
272             } while (err == -EINTR && !pdu->cancelled);
273         }
274     }
275     return err;
276 }
277 
278 static V9fsFidState *coroutine_fn get_fid(V9fsPDU *pdu, int32_t fid)
279 {
280     int err;
281     V9fsFidState *f;
282     V9fsState *s = pdu->s;
283 
284     f = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid));
285     if (f) {
286         BUG_ON(f->clunked);
287         /*
288          * Update the fid ref upfront so that
289          * we don't get reclaimed when we yield
290          * in open later.
291          */
292         f->ref++;
293         /*
294          * check whether we need to reopen the
295          * file. We might have closed the fd
296          * while trying to free up some file
297          * descriptors.
298          */
299         err = v9fs_reopen_fid(pdu, f);
300         if (err < 0) {
301             f->ref--;
302             return NULL;
303         }
304         /*
305          * Mark the fid as referenced so that the LRU
306          * reclaim won't close the file descriptor
307          */
308         f->flags |= FID_REFERENCED;
309         return f;
310     }
311     return NULL;
312 }
313 
314 static V9fsFidState *alloc_fid(V9fsState *s, int32_t fid)
315 {
316     V9fsFidState *f;
317 
318     f = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid));
319     if (f) {
320         /* If fid is already there return NULL */
321         BUG_ON(f->clunked);
322         return NULL;
323     }
324     f = g_new0(V9fsFidState, 1);
325     f->fid = fid;
326     f->fid_type = P9_FID_NONE;
327     f->ref = 1;
328     /*
329      * Mark the fid as referenced so that the LRU
330      * reclaim won't close the file descriptor
331      */
332     f->flags |= FID_REFERENCED;
333     g_hash_table_insert(s->fids, GINT_TO_POINTER(fid), f);
334 
335     v9fs_readdir_init(s->proto_version, &f->fs.dir);
336     v9fs_readdir_init(s->proto_version, &f->fs_reclaim.dir);
337 
338     return f;
339 }
340 
341 static int coroutine_fn v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp)
342 {
343     int retval = 0;
344 
345     if (fidp->fs.xattr.xattrwalk_fid) {
346         /* getxattr/listxattr fid */
347         goto free_value;
348     }
349     /*
350      * if this is fid for setxattr. clunk should
351      * result in setxattr localcall
352      */
353     if (fidp->fs.xattr.len != fidp->fs.xattr.copied_len) {
354         /* clunk after partial write */
355         retval = -EINVAL;
356         goto free_out;
357     }
358     if (fidp->fs.xattr.len) {
359         retval = v9fs_co_lsetxattr(pdu, &fidp->path, &fidp->fs.xattr.name,
360                                    fidp->fs.xattr.value,
361                                    fidp->fs.xattr.len,
362                                    fidp->fs.xattr.flags);
363     } else {
364         retval = v9fs_co_lremovexattr(pdu, &fidp->path, &fidp->fs.xattr.name);
365     }
366 free_out:
367     v9fs_string_free(&fidp->fs.xattr.name);
368 free_value:
369     g_free(fidp->fs.xattr.value);
370     return retval;
371 }
372 
373 static int coroutine_fn free_fid(V9fsPDU *pdu, V9fsFidState *fidp)
374 {
375     int retval = 0;
376 
377     if (fidp->fid_type == P9_FID_FILE) {
378         /* If we reclaimed the fd no need to close */
379         if (fidp->fs.fd != -1) {
380             retval = v9fs_co_close(pdu, &fidp->fs);
381         }
382     } else if (fidp->fid_type == P9_FID_DIR) {
383         if (fidp->fs.dir.stream != NULL) {
384             retval = v9fs_co_closedir(pdu, &fidp->fs);
385         }
386     } else if (fidp->fid_type == P9_FID_XATTR) {
387         retval = v9fs_xattr_fid_clunk(pdu, fidp);
388     }
389     v9fs_path_free(&fidp->path);
390     g_free(fidp);
391     return retval;
392 }
393 
394 static int coroutine_fn put_fid(V9fsPDU *pdu, V9fsFidState *fidp)
395 {
396     BUG_ON(!fidp->ref);
397     fidp->ref--;
398     /*
399      * Don't free the fid if it is in reclaim list
400      */
401     if (!fidp->ref && fidp->clunked) {
402         if (fidp->fid == pdu->s->root_fid) {
403             /*
404              * if the clunked fid is root fid then we
405              * have unmounted the fs on the client side.
406              * delete the migration blocker. Ideally, this
407              * should be hooked to transport close notification
408              */
409             if (pdu->s->migration_blocker) {
410                 migrate_del_blocker(pdu->s->migration_blocker);
411                 error_free(pdu->s->migration_blocker);
412                 pdu->s->migration_blocker = NULL;
413             }
414         }
415         return free_fid(pdu, fidp);
416     }
417     return 0;
418 }
419 
420 static V9fsFidState *clunk_fid(V9fsState *s, int32_t fid)
421 {
422     V9fsFidState *fidp;
423 
424     /* TODO: Use g_hash_table_steal_extended() instead? */
425     fidp = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid));
426     if (fidp) {
427         g_hash_table_remove(s->fids, GINT_TO_POINTER(fid));
428         fidp->clunked = true;
429         return fidp;
430     }
431     return NULL;
432 }
433 
434 void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu)
435 {
436     int reclaim_count = 0;
437     V9fsState *s = pdu->s;
438     V9fsFidState *f;
439     GHashTableIter iter;
440     gpointer fid;
441 
442     g_hash_table_iter_init(&iter, s->fids);
443 
444     QSLIST_HEAD(, V9fsFidState) reclaim_list =
445         QSLIST_HEAD_INITIALIZER(reclaim_list);
446 
447     while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &f)) {
448         /*
449          * Unlink fids cannot be reclaimed. Check
450          * for them and skip them. Also skip fids
451          * currently being operated on.
452          */
453         if (f->ref || f->flags & FID_NON_RECLAIMABLE) {
454             continue;
455         }
456         /*
457          * if it is a recently referenced fid
458          * we leave the fid untouched and clear the
459          * reference bit. We come back to it later
460          * in the next iteration. (a simple LRU without
461          * moving list elements around)
462          */
463         if (f->flags & FID_REFERENCED) {
464             f->flags &= ~FID_REFERENCED;
465             continue;
466         }
467         /*
468          * Add fids to reclaim list.
469          */
470         if (f->fid_type == P9_FID_FILE) {
471             if (f->fs.fd != -1) {
472                 /*
473                  * Up the reference count so that
474                  * a clunk request won't free this fid
475                  */
476                 f->ref++;
477                 QSLIST_INSERT_HEAD(&reclaim_list, f, reclaim_next);
478                 f->fs_reclaim.fd = f->fs.fd;
479                 f->fs.fd = -1;
480                 reclaim_count++;
481             }
482         } else if (f->fid_type == P9_FID_DIR) {
483             if (f->fs.dir.stream != NULL) {
484                 /*
485                  * Up the reference count so that
486                  * a clunk request won't free this fid
487                  */
488                 f->ref++;
489                 QSLIST_INSERT_HEAD(&reclaim_list, f, reclaim_next);
490                 f->fs_reclaim.dir.stream = f->fs.dir.stream;
491                 f->fs.dir.stream = NULL;
492                 reclaim_count++;
493             }
494         }
495         if (reclaim_count >= open_fd_rc) {
496             break;
497         }
498     }
499     /*
500      * Now close the fid in reclaim list. Free them if they
501      * are already clunked.
502      */
503     while (!QSLIST_EMPTY(&reclaim_list)) {
504         f = QSLIST_FIRST(&reclaim_list);
505         QSLIST_REMOVE(&reclaim_list, f, V9fsFidState, reclaim_next);
506         if (f->fid_type == P9_FID_FILE) {
507             v9fs_co_close(pdu, &f->fs_reclaim);
508         } else if (f->fid_type == P9_FID_DIR) {
509             v9fs_co_closedir(pdu, &f->fs_reclaim);
510         }
511         /*
512          * Now drop the fid reference, free it
513          * if clunked.
514          */
515         put_fid(pdu, f);
516     }
517 }
518 
519 /*
520  * This is used when a path is removed from the directory tree. Any
521  * fids that still reference it must not be closed from then on, since
522  * they cannot be reopened.
523  */
524 static int coroutine_fn v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
525 {
526     int err = 0;
527     V9fsState *s = pdu->s;
528     V9fsFidState *fidp;
529     gpointer fid;
530     GHashTableIter iter;
531     /*
532      * The most common case is probably that we have exactly one
533      * fid for the given path, so preallocate exactly one.
534      */
535     g_autoptr(GArray) to_reopen = g_array_sized_new(FALSE, FALSE,
536             sizeof(V9fsFidState *), 1);
537     gint i;
538 
539     g_hash_table_iter_init(&iter, s->fids);
540 
541     /*
542      * We iterate over the fid table looking for the entries we need
543      * to reopen, and store them in to_reopen. This is because
544      * v9fs_reopen_fid() and put_fid() yield. This allows the fid table
545      * to be modified in the meantime, invalidating our iterator.
546      */
547     while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &fidp)) {
548         if (fidp->path.size == path->size &&
549             !memcmp(fidp->path.data, path->data, path->size)) {
550             /*
551              * Ensure the fid survives a potential clunk request during
552              * v9fs_reopen_fid or put_fid.
553              */
554             fidp->ref++;
555             fidp->flags |= FID_NON_RECLAIMABLE;
556             g_array_append_val(to_reopen, fidp);
557         }
558     }
559 
560     for (i = 0; i < to_reopen->len; i++) {
561         fidp = g_array_index(to_reopen, V9fsFidState*, i);
562         /* reopen the file/dir if already closed */
563         err = v9fs_reopen_fid(pdu, fidp);
564         if (err < 0) {
565             break;
566         }
567     }
568 
569     for (i = 0; i < to_reopen->len; i++) {
570         put_fid(pdu, g_array_index(to_reopen, V9fsFidState*, i));
571     }
572     return err;
573 }
574 
575 static void coroutine_fn virtfs_reset(V9fsPDU *pdu)
576 {
577     V9fsState *s = pdu->s;
578     V9fsFidState *fidp;
579     GList *freeing;
580     /*
581      * Get a list of all the values (fid states) in the table, which
582      * we then...
583      */
584     g_autoptr(GList) fids = g_hash_table_get_values(s->fids);
585 
586     /* ... remove from the table, taking over ownership. */
587     g_hash_table_steal_all(s->fids);
588 
589     /*
590      * This allows us to release our references to them asynchronously without
591      * iterating over the hash table and risking iterator invalidation
592      * through concurrent modifications.
593      */
594     for (freeing = fids; freeing; freeing = freeing->next) {
595         fidp = freeing->data;
596         fidp->ref++;
597         fidp->clunked = true;
598         put_fid(pdu, fidp);
599     }
600 }
601 
602 #define P9_QID_TYPE_DIR         0x80
603 #define P9_QID_TYPE_SYMLINK     0x02
604 
605 #define P9_STAT_MODE_DIR        0x80000000
606 #define P9_STAT_MODE_APPEND     0x40000000
607 #define P9_STAT_MODE_EXCL       0x20000000
608 #define P9_STAT_MODE_MOUNT      0x10000000
609 #define P9_STAT_MODE_AUTH       0x08000000
610 #define P9_STAT_MODE_TMP        0x04000000
611 #define P9_STAT_MODE_SYMLINK    0x02000000
612 #define P9_STAT_MODE_LINK       0x01000000
613 #define P9_STAT_MODE_DEVICE     0x00800000
614 #define P9_STAT_MODE_NAMED_PIPE 0x00200000
615 #define P9_STAT_MODE_SOCKET     0x00100000
616 #define P9_STAT_MODE_SETUID     0x00080000
617 #define P9_STAT_MODE_SETGID     0x00040000
618 #define P9_STAT_MODE_SETVTX     0x00010000
619 
620 #define P9_STAT_MODE_TYPE_BITS (P9_STAT_MODE_DIR |          \
621                                 P9_STAT_MODE_SYMLINK |      \
622                                 P9_STAT_MODE_LINK |         \
623                                 P9_STAT_MODE_DEVICE |       \
624                                 P9_STAT_MODE_NAMED_PIPE |   \
625                                 P9_STAT_MODE_SOCKET)
626 
627 /* Mirrors all bits of a byte. So e.g. binary 10100000 would become 00000101. */
628 static inline uint8_t mirror8bit(uint8_t byte)
629 {
630     return (byte * 0x0202020202ULL & 0x010884422010ULL) % 1023;
631 }
632 
633 /* Same as mirror8bit() just for a 64 bit data type instead for a byte. */
634 static inline uint64_t mirror64bit(uint64_t value)
635 {
636     return ((uint64_t)mirror8bit(value         & 0xff) << 56) |
637            ((uint64_t)mirror8bit((value >> 8)  & 0xff) << 48) |
638            ((uint64_t)mirror8bit((value >> 16) & 0xff) << 40) |
639            ((uint64_t)mirror8bit((value >> 24) & 0xff) << 32) |
640            ((uint64_t)mirror8bit((value >> 32) & 0xff) << 24) |
641            ((uint64_t)mirror8bit((value >> 40) & 0xff) << 16) |
642            ((uint64_t)mirror8bit((value >> 48) & 0xff) << 8)  |
643            ((uint64_t)mirror8bit((value >> 56) & 0xff));
644 }
645 
646 /*
647  * Parameter k for the Exponential Golomb algorihm to be used.
648  *
649  * The smaller this value, the smaller the minimum bit count for the Exp.
650  * Golomb generated affixes will be (at lowest index) however for the
651  * price of having higher maximum bit count of generated affixes (at highest
652  * index). Likewise increasing this parameter yields in smaller maximum bit
653  * count for the price of having higher minimum bit count.
654  *
655  * In practice that means: a good value for k depends on the expected amount
656  * of devices to be exposed by one export. For a small amount of devices k
657  * should be small, for a large amount of devices k might be increased
658  * instead. The default of k=0 should be fine for most users though.
659  *
660  * IMPORTANT: In case this ever becomes a runtime parameter; the value of
661  * k should not change as long as guest is still running! Because that would
662  * cause completely different inode numbers to be generated on guest.
663  */
664 #define EXP_GOLOMB_K    0
665 
666 /**
667  * expGolombEncode() - Exponential Golomb algorithm for arbitrary k
668  *                     (including k=0).
669  *
670  * @n: natural number (or index) of the prefix to be generated
671  *     (1, 2, 3, ...)
672  * @k: parameter k of Exp. Golomb algorithm to be used
673  *     (see comment on EXP_GOLOMB_K macro for details about k)
674  * Return: prefix for given @n and @k
675  *
676  * The Exponential Golomb algorithm generates prefixes (NOT suffixes!)
677  * with growing length and with the mathematical property of being
678  * "prefix-free". The latter means the generated prefixes can be prepended
679  * in front of arbitrary numbers and the resulting concatenated numbers are
680  * guaranteed to be always unique.
681  *
682  * This is a minor adjustment to the original Exp. Golomb algorithm in the
683  * sense that lowest allowed index (@n) starts with 1, not with zero.
684  */
685 static VariLenAffix expGolombEncode(uint64_t n, int k)
686 {
687     const uint64_t value = n + (1 << k) - 1;
688     const int bits = (int) log2(value) + 1;
689     return (VariLenAffix) {
690         .type = AffixType_Prefix,
691         .value = value,
692         .bits = bits + MAX((bits - 1 - k), 0)
693     };
694 }
695 
696 /**
697  * invertAffix() - Converts a suffix into a prefix, or a prefix into a suffix.
698  * @affix: either suffix or prefix to be inverted
699  * Return: inversion of passed @affix
700  *
701  * Simply mirror all bits of the affix value, for the purpose to preserve
702  * respectively the mathematical "prefix-free" or "suffix-free" property
703  * after the conversion.
704  *
705  * If a passed prefix is suitable to create unique numbers, then the
706  * returned suffix is suitable to create unique numbers as well (and vice
707  * versa).
708  */
709 static VariLenAffix invertAffix(const VariLenAffix *affix)
710 {
711     return (VariLenAffix) {
712         .type =
713             (affix->type == AffixType_Suffix) ?
714                 AffixType_Prefix : AffixType_Suffix,
715         .value =
716             mirror64bit(affix->value) >>
717             ((sizeof(affix->value) * 8) - affix->bits),
718         .bits = affix->bits
719     };
720 }
721 
722 /**
723  * affixForIndex() - Generates suffix numbers with "suffix-free" property.
724  * @index: natural number (or index) of the suffix to be generated
725  *         (1, 2, 3, ...)
726  * Return: Suffix suitable to assemble unique number.
727  *
728  * This is just a wrapper function on top of the Exp. Golomb algorithm.
729  *
730  * Since the Exp. Golomb algorithm generates prefixes, but we need suffixes,
731  * this function converts the Exp. Golomb prefixes into appropriate suffixes
732  * which are still suitable for generating unique numbers.
733  */
734 static VariLenAffix affixForIndex(uint64_t index)
735 {
736     VariLenAffix prefix;
737     prefix = expGolombEncode(index, EXP_GOLOMB_K);
738     return invertAffix(&prefix); /* convert prefix to suffix */
739 }
740 
741 /* creative abuse of tb_hash_func7, which is based on xxhash */
742 static uint32_t qpp_hash(QppEntry e)
743 {
744     return qemu_xxhash7(e.ino_prefix, e.dev, 0, 0, 0);
745 }
746 
747 static uint32_t qpf_hash(QpfEntry e)
748 {
749     return qemu_xxhash7(e.ino, e.dev, 0, 0, 0);
750 }
751 
752 static bool qpd_cmp_func(const void *obj, const void *userp)
753 {
754     const QpdEntry *e1 = obj, *e2 = userp;
755     return e1->dev == e2->dev;
756 }
757 
758 static bool qpp_cmp_func(const void *obj, const void *userp)
759 {
760     const QppEntry *e1 = obj, *e2 = userp;
761     return e1->dev == e2->dev && e1->ino_prefix == e2->ino_prefix;
762 }
763 
764 static bool qpf_cmp_func(const void *obj, const void *userp)
765 {
766     const QpfEntry *e1 = obj, *e2 = userp;
767     return e1->dev == e2->dev && e1->ino == e2->ino;
768 }
769 
770 static void qp_table_remove(void *p, uint32_t h, void *up)
771 {
772     g_free(p);
773 }
774 
775 static void qp_table_destroy(struct qht *ht)
776 {
777     if (!ht || !ht->map) {
778         return;
779     }
780     qht_iter(ht, qp_table_remove, NULL);
781     qht_destroy(ht);
782 }
783 
784 static void qpd_table_init(struct qht *ht)
785 {
786     qht_init(ht, qpd_cmp_func, 1, QHT_MODE_AUTO_RESIZE);
787 }
788 
789 static void qpp_table_init(struct qht *ht)
790 {
791     qht_init(ht, qpp_cmp_func, 1, QHT_MODE_AUTO_RESIZE);
792 }
793 
794 static void qpf_table_init(struct qht *ht)
795 {
796     qht_init(ht, qpf_cmp_func, 1 << 16, QHT_MODE_AUTO_RESIZE);
797 }
798 
799 /*
800  * Returns how many (high end) bits of inode numbers of the passed fs
801  * device shall be used (in combination with the device number) to
802  * generate hash values for qpp_table entries.
803  *
804  * This function is required if variable length suffixes are used for inode
805  * number mapping on guest level. Since a device may end up having multiple
806  * entries in qpp_table, each entry most probably with a different suffix
807  * length, we thus need this function in conjunction with qpd_table to
808  * "agree" about a fix amount of bits (per device) to be always used for
809  * generating hash values for the purpose of accessing qpp_table in order
810  * get consistent behaviour when accessing qpp_table.
811  */
812 static int qid_inode_prefix_hash_bits(V9fsPDU *pdu, dev_t dev)
813 {
814     QpdEntry lookup = {
815         .dev = dev
816     }, *val;
817     uint32_t hash = dev;
818     VariLenAffix affix;
819 
820     val = qht_lookup(&pdu->s->qpd_table, &lookup, hash);
821     if (!val) {
822         val = g_new0(QpdEntry, 1);
823         *val = lookup;
824         affix = affixForIndex(pdu->s->qp_affix_next);
825         val->prefix_bits = affix.bits;
826         qht_insert(&pdu->s->qpd_table, val, hash, NULL);
827         pdu->s->qp_ndevices++;
828     }
829     return val->prefix_bits;
830 }
831 
832 /*
833  * Slow / full mapping host inode nr -> guest inode nr.
834  *
835  * This function performs a slower and much more costly remapping of an
836  * original file inode number on host to an appropriate different inode
837  * number on guest. For every (dev, inode) combination on host a new
838  * sequential number is generated, cached and exposed as inode number on
839  * guest.
840  *
841  * This is just a "last resort" fallback solution if the much faster/cheaper
842  * qid_path_suffixmap() failed. In practice this slow / full mapping is not
843  * expected ever to be used at all though.
844  *
845  * See qid_path_suffixmap() for details
846  *
847  */
848 static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf,
849                             uint64_t *path)
850 {
851     QpfEntry lookup = {
852         .dev = stbuf->st_dev,
853         .ino = stbuf->st_ino
854     }, *val;
855     uint32_t hash = qpf_hash(lookup);
856     VariLenAffix affix;
857 
858     val = qht_lookup(&pdu->s->qpf_table, &lookup, hash);
859 
860     if (!val) {
861         if (pdu->s->qp_fullpath_next == 0) {
862             /* no more files can be mapped :'( */
863             error_report_once(
864                 "9p: No more prefixes available for remapping inodes from "
865                 "host to guest."
866             );
867             return -ENFILE;
868         }
869 
870         val = g_new0(QpfEntry, 1);
871         *val = lookup;
872 
873         /* new unique inode and device combo */
874         affix = affixForIndex(
875             1ULL << (sizeof(pdu->s->qp_affix_next) * 8)
876         );
877         val->path = (pdu->s->qp_fullpath_next++ << affix.bits) | affix.value;
878         pdu->s->qp_fullpath_next &= ((1ULL << (64 - affix.bits)) - 1);
879         qht_insert(&pdu->s->qpf_table, val, hash, NULL);
880     }
881 
882     *path = val->path;
883     return 0;
884 }
885 
886 /*
887  * Quick mapping host inode nr -> guest inode nr.
888  *
889  * This function performs quick remapping of an original file inode number
890  * on host to an appropriate different inode number on guest. This remapping
891  * of inodes is required to avoid inode nr collisions on guest which would
892  * happen if the 9p export contains more than 1 exported file system (or
893  * more than 1 file system data set), because unlike on host level where the
894  * files would have different device nrs, all files exported by 9p would
895  * share the same device nr on guest (the device nr of the virtual 9p device
896  * that is).
897  *
898  * Inode remapping is performed by chopping off high end bits of the original
899  * inode number from host, shifting the result upwards and then assigning a
900  * generated suffix number for the low end bits, where the same suffix number
901  * will be shared by all inodes with the same device id AND the same high end
902  * bits that have been chopped off. That approach utilizes the fact that inode
903  * numbers very likely share the same high end bits (i.e. due to their common
904  * sequential generation by file systems) and hence we only have to generate
905  * and track a very limited amount of suffixes in practice due to that.
906  *
907  * We generate variable size suffixes for that purpose. The 1st generated
908  * suffix will only have 1 bit and hence we only need to chop off 1 bit from
909  * the original inode number. The subsequent suffixes being generated will
910  * grow in (bit) size subsequently, i.e. the 2nd and 3rd suffix being
911  * generated will have 3 bits and hence we have to chop off 3 bits from their
912  * original inodes, and so on. That approach of using variable length suffixes
913  * (i.e. over fixed size ones) utilizes the fact that in practice only a very
914  * limited amount of devices are shared by the same export (e.g. typically
915  * less than 2 dozen devices per 9p export), so in practice we need to chop
916  * off less bits than with fixed size prefixes and yet are flexible to add
917  * new devices at runtime below host's export directory at any time without
918  * having to reboot guest nor requiring to reconfigure guest for that. And due
919  * to the very limited amount of original high end bits that we chop off that
920  * way, the total amount of suffixes we need to generate is less than by using
921  * fixed size prefixes and hence it also improves performance of the inode
922  * remapping algorithm, and finally has the nice side effect that the inode
923  * numbers on guest will be much smaller & human friendly. ;-)
924  */
925 static int qid_path_suffixmap(V9fsPDU *pdu, const struct stat *stbuf,
926                               uint64_t *path)
927 {
928     const int ino_hash_bits = qid_inode_prefix_hash_bits(pdu, stbuf->st_dev);
929     QppEntry lookup = {
930         .dev = stbuf->st_dev,
931         .ino_prefix = (uint16_t) (stbuf->st_ino >> (64 - ino_hash_bits))
932     }, *val;
933     uint32_t hash = qpp_hash(lookup);
934 
935     val = qht_lookup(&pdu->s->qpp_table, &lookup, hash);
936 
937     if (!val) {
938         if (pdu->s->qp_affix_next == 0) {
939             /* we ran out of affixes */
940             warn_report_once(
941                 "9p: Potential degraded performance of inode remapping"
942             );
943             return -ENFILE;
944         }
945 
946         val = g_new0(QppEntry, 1);
947         *val = lookup;
948 
949         /* new unique inode affix and device combo */
950         val->qp_affix_index = pdu->s->qp_affix_next++;
951         val->qp_affix = affixForIndex(val->qp_affix_index);
952         qht_insert(&pdu->s->qpp_table, val, hash, NULL);
953     }
954     /* assuming generated affix to be suffix type, not prefix */
955     *path = (stbuf->st_ino << val->qp_affix.bits) | val->qp_affix.value;
956     return 0;
957 }
958 
959 static int stat_to_qid(V9fsPDU *pdu, const struct stat *stbuf, V9fsQID *qidp)
960 {
961     int err;
962     size_t size;
963 
964     if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) {
965         /* map inode+device to qid path (fast path) */
966         err = qid_path_suffixmap(pdu, stbuf, &qidp->path);
967         if (err == -ENFILE) {
968             /* fast path didn't work, fall back to full map */
969             err = qid_path_fullmap(pdu, stbuf, &qidp->path);
970         }
971         if (err) {
972             return err;
973         }
974     } else {
975         if (pdu->s->dev_id != stbuf->st_dev) {
976             if (pdu->s->ctx.export_flags & V9FS_FORBID_MULTIDEVS) {
977                 error_report_once(
978                     "9p: Multiple devices detected in same VirtFS export. "
979                     "Access of guest to additional devices is (partly) "
980                     "denied due to virtfs option 'multidevs=forbid' being "
981                     "effective."
982                 );
983                 return -ENODEV;
984             } else {
985                 warn_report_once(
986                     "9p: Multiple devices detected in same VirtFS export, "
987                     "which might lead to file ID collisions and severe "
988                     "misbehaviours on guest! You should either use a "
989                     "separate export for each device shared from host or "
990                     "use virtfs option 'multidevs=remap'!"
991                 );
992             }
993         }
994         memset(&qidp->path, 0, sizeof(qidp->path));
995         size = MIN(sizeof(stbuf->st_ino), sizeof(qidp->path));
996         memcpy(&qidp->path, &stbuf->st_ino, size);
997     }
998 
999     qidp->version = stbuf->st_mtime ^ (stbuf->st_size << 8);
1000     qidp->type = 0;
1001     if (S_ISDIR(stbuf->st_mode)) {
1002         qidp->type |= P9_QID_TYPE_DIR;
1003     }
1004     if (S_ISLNK(stbuf->st_mode)) {
1005         qidp->type |= P9_QID_TYPE_SYMLINK;
1006     }
1007 
1008     return 0;
1009 }
1010 
1011 V9fsPDU *pdu_alloc(V9fsState *s)
1012 {
1013     V9fsPDU *pdu = NULL;
1014 
1015     if (!QLIST_EMPTY(&s->free_list)) {
1016         pdu = QLIST_FIRST(&s->free_list);
1017         QLIST_REMOVE(pdu, next);
1018         QLIST_INSERT_HEAD(&s->active_list, pdu, next);
1019     }
1020     return pdu;
1021 }
1022 
1023 void pdu_free(V9fsPDU *pdu)
1024 {
1025     V9fsState *s = pdu->s;
1026 
1027     g_assert(!pdu->cancelled);
1028     QLIST_REMOVE(pdu, next);
1029     QLIST_INSERT_HEAD(&s->free_list, pdu, next);
1030 }
1031 
1032 static void coroutine_fn pdu_complete(V9fsPDU *pdu, ssize_t len)
1033 {
1034     int8_t id = pdu->id + 1; /* Response */
1035     V9fsState *s = pdu->s;
1036     int ret;
1037 
1038     /*
1039      * The 9p spec requires that successfully cancelled pdus receive no reply.
1040      * Sending a reply would confuse clients because they would
1041      * assume that any EINTR is the actual result of the operation,
1042      * rather than a consequence of the cancellation. However, if
1043      * the operation completed (succesfully or with an error other
1044      * than caused be cancellation), we do send out that reply, both
1045      * for efficiency and to avoid confusing the rest of the state machine
1046      * that assumes passing a non-error here will mean a successful
1047      * transmission of the reply.
1048      */
1049     bool discard = pdu->cancelled && len == -EINTR;
1050     if (discard) {
1051         trace_v9fs_rcancel(pdu->tag, pdu->id);
1052         pdu->size = 0;
1053         goto out_notify;
1054     }
1055 
1056     if (len < 0) {
1057         int err = -len;
1058         len = 7;
1059 
1060         if (s->proto_version != V9FS_PROTO_2000L) {
1061             V9fsString str;
1062 
1063             str.data = strerror(err);
1064             str.size = strlen(str.data);
1065 
1066             ret = pdu_marshal(pdu, len, "s", &str);
1067             if (ret < 0) {
1068                 goto out_notify;
1069             }
1070             len += ret;
1071             id = P9_RERROR;
1072         } else {
1073             err = errno_to_dotl(err);
1074         }
1075 
1076         ret = pdu_marshal(pdu, len, "d", err);
1077         if (ret < 0) {
1078             goto out_notify;
1079         }
1080         len += ret;
1081 
1082         if (s->proto_version == V9FS_PROTO_2000L) {
1083             id = P9_RLERROR;
1084         }
1085         trace_v9fs_rerror(pdu->tag, pdu->id, err); /* Trace ERROR */
1086     }
1087 
1088     /* fill out the header */
1089     if (pdu_marshal(pdu, 0, "dbw", (int32_t)len, id, pdu->tag) < 0) {
1090         goto out_notify;
1091     }
1092 
1093     /* keep these in sync */
1094     pdu->size = len;
1095     pdu->id = id;
1096 
1097 out_notify:
1098     pdu->s->transport->push_and_notify(pdu);
1099 
1100     /* Now wakeup anybody waiting in flush for this request */
1101     if (!qemu_co_queue_next(&pdu->complete)) {
1102         pdu_free(pdu);
1103     }
1104 }
1105 
1106 static mode_t v9mode_to_mode(uint32_t mode, V9fsString *extension)
1107 {
1108     mode_t ret;
1109 
1110     ret = mode & 0777;
1111     if (mode & P9_STAT_MODE_DIR) {
1112         ret |= S_IFDIR;
1113     }
1114 
1115     if (mode & P9_STAT_MODE_SYMLINK) {
1116         ret |= S_IFLNK;
1117     }
1118     if (mode & P9_STAT_MODE_SOCKET) {
1119         ret |= S_IFSOCK;
1120     }
1121     if (mode & P9_STAT_MODE_NAMED_PIPE) {
1122         ret |= S_IFIFO;
1123     }
1124     if (mode & P9_STAT_MODE_DEVICE) {
1125         if (extension->size && extension->data[0] == 'c') {
1126             ret |= S_IFCHR;
1127         } else {
1128             ret |= S_IFBLK;
1129         }
1130     }
1131 
1132     if (!(ret & ~0777)) {
1133         ret |= S_IFREG;
1134     }
1135 
1136     if (mode & P9_STAT_MODE_SETUID) {
1137         ret |= S_ISUID;
1138     }
1139     if (mode & P9_STAT_MODE_SETGID) {
1140         ret |= S_ISGID;
1141     }
1142     if (mode & P9_STAT_MODE_SETVTX) {
1143         ret |= S_ISVTX;
1144     }
1145 
1146     return ret;
1147 }
1148 
1149 static int donttouch_stat(V9fsStat *stat)
1150 {
1151     if (stat->type == -1 &&
1152         stat->dev == -1 &&
1153         stat->qid.type == 0xff &&
1154         stat->qid.version == (uint32_t) -1 &&
1155         stat->qid.path == (uint64_t) -1 &&
1156         stat->mode == -1 &&
1157         stat->atime == -1 &&
1158         stat->mtime == -1 &&
1159         stat->length == -1 &&
1160         !stat->name.size &&
1161         !stat->uid.size &&
1162         !stat->gid.size &&
1163         !stat->muid.size &&
1164         stat->n_uid == -1 &&
1165         stat->n_gid == -1 &&
1166         stat->n_muid == -1) {
1167         return 1;
1168     }
1169 
1170     return 0;
1171 }
1172 
1173 static void v9fs_stat_init(V9fsStat *stat)
1174 {
1175     v9fs_string_init(&stat->name);
1176     v9fs_string_init(&stat->uid);
1177     v9fs_string_init(&stat->gid);
1178     v9fs_string_init(&stat->muid);
1179     v9fs_string_init(&stat->extension);
1180 }
1181 
1182 static void v9fs_stat_free(V9fsStat *stat)
1183 {
1184     v9fs_string_free(&stat->name);
1185     v9fs_string_free(&stat->uid);
1186     v9fs_string_free(&stat->gid);
1187     v9fs_string_free(&stat->muid);
1188     v9fs_string_free(&stat->extension);
1189 }
1190 
1191 static uint32_t stat_to_v9mode(const struct stat *stbuf)
1192 {
1193     uint32_t mode;
1194 
1195     mode = stbuf->st_mode & 0777;
1196     if (S_ISDIR(stbuf->st_mode)) {
1197         mode |= P9_STAT_MODE_DIR;
1198     }
1199 
1200     if (S_ISLNK(stbuf->st_mode)) {
1201         mode |= P9_STAT_MODE_SYMLINK;
1202     }
1203 
1204     if (S_ISSOCK(stbuf->st_mode)) {
1205         mode |= P9_STAT_MODE_SOCKET;
1206     }
1207 
1208     if (S_ISFIFO(stbuf->st_mode)) {
1209         mode |= P9_STAT_MODE_NAMED_PIPE;
1210     }
1211 
1212     if (S_ISBLK(stbuf->st_mode) || S_ISCHR(stbuf->st_mode)) {
1213         mode |= P9_STAT_MODE_DEVICE;
1214     }
1215 
1216     if (stbuf->st_mode & S_ISUID) {
1217         mode |= P9_STAT_MODE_SETUID;
1218     }
1219 
1220     if (stbuf->st_mode & S_ISGID) {
1221         mode |= P9_STAT_MODE_SETGID;
1222     }
1223 
1224     if (stbuf->st_mode & S_ISVTX) {
1225         mode |= P9_STAT_MODE_SETVTX;
1226     }
1227 
1228     return mode;
1229 }
1230 
1231 static int coroutine_fn stat_to_v9stat(V9fsPDU *pdu, V9fsPath *path,
1232                                        const char *basename,
1233                                        const struct stat *stbuf,
1234                                        V9fsStat *v9stat)
1235 {
1236     int err;
1237 
1238     memset(v9stat, 0, sizeof(*v9stat));
1239 
1240     err = stat_to_qid(pdu, stbuf, &v9stat->qid);
1241     if (err < 0) {
1242         return err;
1243     }
1244     v9stat->mode = stat_to_v9mode(stbuf);
1245     v9stat->atime = stbuf->st_atime;
1246     v9stat->mtime = stbuf->st_mtime;
1247     v9stat->length = stbuf->st_size;
1248 
1249     v9fs_string_free(&v9stat->uid);
1250     v9fs_string_free(&v9stat->gid);
1251     v9fs_string_free(&v9stat->muid);
1252 
1253     v9stat->n_uid = stbuf->st_uid;
1254     v9stat->n_gid = stbuf->st_gid;
1255     v9stat->n_muid = 0;
1256 
1257     v9fs_string_free(&v9stat->extension);
1258 
1259     if (v9stat->mode & P9_STAT_MODE_SYMLINK) {
1260         err = v9fs_co_readlink(pdu, path, &v9stat->extension);
1261         if (err < 0) {
1262             return err;
1263         }
1264     } else if (v9stat->mode & P9_STAT_MODE_DEVICE) {
1265         v9fs_string_sprintf(&v9stat->extension, "%c %u %u",
1266                 S_ISCHR(stbuf->st_mode) ? 'c' : 'b',
1267                 major(stbuf->st_rdev), minor(stbuf->st_rdev));
1268     } else if (S_ISDIR(stbuf->st_mode) || S_ISREG(stbuf->st_mode)) {
1269         v9fs_string_sprintf(&v9stat->extension, "%s %lu",
1270                 "HARDLINKCOUNT", (unsigned long)stbuf->st_nlink);
1271     }
1272 
1273     v9fs_string_sprintf(&v9stat->name, "%s", basename);
1274 
1275     v9stat->size = 61 +
1276         v9fs_string_size(&v9stat->name) +
1277         v9fs_string_size(&v9stat->uid) +
1278         v9fs_string_size(&v9stat->gid) +
1279         v9fs_string_size(&v9stat->muid) +
1280         v9fs_string_size(&v9stat->extension);
1281     return 0;
1282 }
1283 
1284 #define P9_STATS_MODE          0x00000001ULL
1285 #define P9_STATS_NLINK         0x00000002ULL
1286 #define P9_STATS_UID           0x00000004ULL
1287 #define P9_STATS_GID           0x00000008ULL
1288 #define P9_STATS_RDEV          0x00000010ULL
1289 #define P9_STATS_ATIME         0x00000020ULL
1290 #define P9_STATS_MTIME         0x00000040ULL
1291 #define P9_STATS_CTIME         0x00000080ULL
1292 #define P9_STATS_INO           0x00000100ULL
1293 #define P9_STATS_SIZE          0x00000200ULL
1294 #define P9_STATS_BLOCKS        0x00000400ULL
1295 
1296 #define P9_STATS_BTIME         0x00000800ULL
1297 #define P9_STATS_GEN           0x00001000ULL
1298 #define P9_STATS_DATA_VERSION  0x00002000ULL
1299 
1300 #define P9_STATS_BASIC         0x000007ffULL /* Mask for fields up to BLOCKS */
1301 #define P9_STATS_ALL           0x00003fffULL /* Mask for All fields above */
1302 
1303 
1304 /**
1305  * blksize_to_iounit() - Block size exposed to 9p client.
1306  * Return: block size
1307  *
1308  * @pdu: 9p client request
1309  * @blksize: host filesystem's block size
1310  *
1311  * Convert host filesystem's block size into an appropriate block size for
1312  * 9p client (guest OS side). The value returned suggests an "optimum" block
1313  * size for 9p I/O, i.e. to maximize performance.
1314  */
1315 static int32_t blksize_to_iounit(const V9fsPDU *pdu, int32_t blksize)
1316 {
1317     int32_t iounit = 0;
1318     V9fsState *s = pdu->s;
1319 
1320     /*
1321      * iounit should be multiples of blksize (host filesystem block size)
1322      * as well as less than (client msize - P9_IOHDRSZ)
1323      */
1324     if (blksize) {
1325         iounit = QEMU_ALIGN_DOWN(s->msize - P9_IOHDRSZ, blksize);
1326     }
1327     if (!iounit) {
1328         iounit = s->msize - P9_IOHDRSZ;
1329     }
1330     return iounit;
1331 }
1332 
1333 static int32_t stat_to_iounit(const V9fsPDU *pdu, const struct stat *stbuf)
1334 {
1335     return blksize_to_iounit(pdu, stbuf->st_blksize);
1336 }
1337 
1338 static int stat_to_v9stat_dotl(V9fsPDU *pdu, const struct stat *stbuf,
1339                                 V9fsStatDotl *v9lstat)
1340 {
1341     memset(v9lstat, 0, sizeof(*v9lstat));
1342 
1343     v9lstat->st_mode = stbuf->st_mode;
1344     v9lstat->st_nlink = stbuf->st_nlink;
1345     v9lstat->st_uid = stbuf->st_uid;
1346     v9lstat->st_gid = stbuf->st_gid;
1347     v9lstat->st_rdev = host_dev_to_dotl_dev(stbuf->st_rdev);
1348     v9lstat->st_size = stbuf->st_size;
1349     v9lstat->st_blksize = stat_to_iounit(pdu, stbuf);
1350     v9lstat->st_blocks = stbuf->st_blocks;
1351     v9lstat->st_atime_sec = stbuf->st_atime;
1352     v9lstat->st_mtime_sec = stbuf->st_mtime;
1353     v9lstat->st_ctime_sec = stbuf->st_ctime;
1354 #ifdef CONFIG_DARWIN
1355     v9lstat->st_atime_nsec = stbuf->st_atimespec.tv_nsec;
1356     v9lstat->st_mtime_nsec = stbuf->st_mtimespec.tv_nsec;
1357     v9lstat->st_ctime_nsec = stbuf->st_ctimespec.tv_nsec;
1358 #else
1359     v9lstat->st_atime_nsec = stbuf->st_atim.tv_nsec;
1360     v9lstat->st_mtime_nsec = stbuf->st_mtim.tv_nsec;
1361     v9lstat->st_ctime_nsec = stbuf->st_ctim.tv_nsec;
1362 #endif
1363     /* Currently we only support BASIC fields in stat */
1364     v9lstat->st_result_mask = P9_STATS_BASIC;
1365 
1366     return stat_to_qid(pdu, stbuf, &v9lstat->qid);
1367 }
1368 
1369 static void print_sg(struct iovec *sg, int cnt)
1370 {
1371     int i;
1372 
1373     printf("sg[%d]: {", cnt);
1374     for (i = 0; i < cnt; i++) {
1375         if (i) {
1376             printf(", ");
1377         }
1378         printf("(%p, %zd)", sg[i].iov_base, sg[i].iov_len);
1379     }
1380     printf("}\n");
1381 }
1382 
1383 /* Will call this only for path name based fid */
1384 static void v9fs_fix_path(V9fsPath *dst, V9fsPath *src, int len)
1385 {
1386     V9fsPath str;
1387     v9fs_path_init(&str);
1388     v9fs_path_copy(&str, dst);
1389     v9fs_path_sprintf(dst, "%s%s", src->data, str.data + len);
1390     v9fs_path_free(&str);
1391 }
1392 
1393 static inline bool is_ro_export(FsContext *ctx)
1394 {
1395     return ctx->export_flags & V9FS_RDONLY;
1396 }
1397 
1398 static void coroutine_fn v9fs_version(void *opaque)
1399 {
1400     ssize_t err;
1401     V9fsPDU *pdu = opaque;
1402     V9fsState *s = pdu->s;
1403     V9fsString version;
1404     size_t offset = 7;
1405 
1406     v9fs_string_init(&version);
1407     err = pdu_unmarshal(pdu, offset, "ds", &s->msize, &version);
1408     if (err < 0) {
1409         goto out;
1410     }
1411     trace_v9fs_version(pdu->tag, pdu->id, s->msize, version.data);
1412 
1413     virtfs_reset(pdu);
1414 
1415     if (!strcmp(version.data, "9P2000.u")) {
1416         s->proto_version = V9FS_PROTO_2000U;
1417     } else if (!strcmp(version.data, "9P2000.L")) {
1418         s->proto_version = V9FS_PROTO_2000L;
1419     } else {
1420         v9fs_string_sprintf(&version, "unknown");
1421         /* skip min. msize check, reporting invalid version has priority */
1422         goto marshal;
1423     }
1424 
1425     if (s->msize < P9_MIN_MSIZE) {
1426         err = -EMSGSIZE;
1427         error_report(
1428             "9pfs: Client requested msize < minimum msize ("
1429             stringify(P9_MIN_MSIZE) ") supported by this server."
1430         );
1431         goto out;
1432     }
1433 
1434     /* 8192 is the default msize of Linux clients */
1435     if (s->msize <= 8192 && !(s->ctx.export_flags & V9FS_NO_PERF_WARN)) {
1436         warn_report_once(
1437             "9p: degraded performance: a reasonable high msize should be "
1438             "chosen on client/guest side (chosen msize is <= 8192). See "
1439             "https://wiki.qemu.org/Documentation/9psetup#msize for details."
1440         );
1441     }
1442 
1443 marshal:
1444     err = pdu_marshal(pdu, offset, "ds", s->msize, &version);
1445     if (err < 0) {
1446         goto out;
1447     }
1448     err += offset;
1449     trace_v9fs_version_return(pdu->tag, pdu->id, s->msize, version.data);
1450 out:
1451     pdu_complete(pdu, err);
1452     v9fs_string_free(&version);
1453 }
1454 
1455 static void coroutine_fn v9fs_attach(void *opaque)
1456 {
1457     V9fsPDU *pdu = opaque;
1458     V9fsState *s = pdu->s;
1459     int32_t fid, afid, n_uname;
1460     V9fsString uname, aname;
1461     V9fsFidState *fidp;
1462     size_t offset = 7;
1463     V9fsQID qid;
1464     ssize_t err;
1465     struct stat stbuf;
1466 
1467     v9fs_string_init(&uname);
1468     v9fs_string_init(&aname);
1469     err = pdu_unmarshal(pdu, offset, "ddssd", &fid,
1470                         &afid, &uname, &aname, &n_uname);
1471     if (err < 0) {
1472         goto out_nofid;
1473     }
1474     trace_v9fs_attach(pdu->tag, pdu->id, fid, afid, uname.data, aname.data);
1475 
1476     fidp = alloc_fid(s, fid);
1477     if (fidp == NULL) {
1478         err = -EINVAL;
1479         goto out_nofid;
1480     }
1481     fidp->uid = n_uname;
1482     err = v9fs_co_name_to_path(pdu, NULL, "/", &fidp->path);
1483     if (err < 0) {
1484         err = -EINVAL;
1485         clunk_fid(s, fid);
1486         goto out;
1487     }
1488     err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1489     if (err < 0) {
1490         err = -EINVAL;
1491         clunk_fid(s, fid);
1492         goto out;
1493     }
1494     err = stat_to_qid(pdu, &stbuf, &qid);
1495     if (err < 0) {
1496         err = -EINVAL;
1497         clunk_fid(s, fid);
1498         goto out;
1499     }
1500 
1501     /*
1502      * disable migration if we haven't done already.
1503      * attach could get called multiple times for the same export.
1504      */
1505     if (!s->migration_blocker) {
1506         error_setg(&s->migration_blocker,
1507                    "Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'",
1508                    s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag);
1509         err = migrate_add_blocker(s->migration_blocker, NULL);
1510         if (err < 0) {
1511             error_free(s->migration_blocker);
1512             s->migration_blocker = NULL;
1513             clunk_fid(s, fid);
1514             goto out;
1515         }
1516         s->root_fid = fid;
1517     }
1518 
1519     err = pdu_marshal(pdu, offset, "Q", &qid);
1520     if (err < 0) {
1521         clunk_fid(s, fid);
1522         goto out;
1523     }
1524     err += offset;
1525 
1526     memcpy(&s->root_st, &stbuf, sizeof(stbuf));
1527     trace_v9fs_attach_return(pdu->tag, pdu->id,
1528                              qid.type, qid.version, qid.path);
1529 out:
1530     put_fid(pdu, fidp);
1531 out_nofid:
1532     pdu_complete(pdu, err);
1533     v9fs_string_free(&uname);
1534     v9fs_string_free(&aname);
1535 }
1536 
1537 static void coroutine_fn v9fs_stat(void *opaque)
1538 {
1539     int32_t fid;
1540     V9fsStat v9stat;
1541     ssize_t err = 0;
1542     size_t offset = 7;
1543     struct stat stbuf;
1544     V9fsFidState *fidp;
1545     V9fsPDU *pdu = opaque;
1546     char *basename;
1547 
1548     err = pdu_unmarshal(pdu, offset, "d", &fid);
1549     if (err < 0) {
1550         goto out_nofid;
1551     }
1552     trace_v9fs_stat(pdu->tag, pdu->id, fid);
1553 
1554     fidp = get_fid(pdu, fid);
1555     if (fidp == NULL) {
1556         err = -ENOENT;
1557         goto out_nofid;
1558     }
1559     err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1560     if (err < 0) {
1561         goto out;
1562     }
1563     basename = g_path_get_basename(fidp->path.data);
1564     err = stat_to_v9stat(pdu, &fidp->path, basename, &stbuf, &v9stat);
1565     g_free(basename);
1566     if (err < 0) {
1567         goto out;
1568     }
1569     err = pdu_marshal(pdu, offset, "wS", 0, &v9stat);
1570     if (err < 0) {
1571         v9fs_stat_free(&v9stat);
1572         goto out;
1573     }
1574     trace_v9fs_stat_return(pdu->tag, pdu->id, v9stat.mode,
1575                            v9stat.atime, v9stat.mtime, v9stat.length);
1576     err += offset;
1577     v9fs_stat_free(&v9stat);
1578 out:
1579     put_fid(pdu, fidp);
1580 out_nofid:
1581     pdu_complete(pdu, err);
1582 }
1583 
1584 static void coroutine_fn v9fs_getattr(void *opaque)
1585 {
1586     int32_t fid;
1587     size_t offset = 7;
1588     ssize_t retval = 0;
1589     struct stat stbuf;
1590     V9fsFidState *fidp;
1591     uint64_t request_mask;
1592     V9fsStatDotl v9stat_dotl;
1593     V9fsPDU *pdu = opaque;
1594 
1595     retval = pdu_unmarshal(pdu, offset, "dq", &fid, &request_mask);
1596     if (retval < 0) {
1597         goto out_nofid;
1598     }
1599     trace_v9fs_getattr(pdu->tag, pdu->id, fid, request_mask);
1600 
1601     fidp = get_fid(pdu, fid);
1602     if (fidp == NULL) {
1603         retval = -ENOENT;
1604         goto out_nofid;
1605     }
1606     /*
1607      * Currently we only support BASIC fields in stat, so there is no
1608      * need to look at request_mask.
1609      */
1610     retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1611     if (retval < 0) {
1612         goto out;
1613     }
1614     retval = stat_to_v9stat_dotl(pdu, &stbuf, &v9stat_dotl);
1615     if (retval < 0) {
1616         goto out;
1617     }
1618 
1619     /*  fill st_gen if requested and supported by underlying fs */
1620     if (request_mask & P9_STATS_GEN) {
1621         retval = v9fs_co_st_gen(pdu, &fidp->path, stbuf.st_mode, &v9stat_dotl);
1622         switch (retval) {
1623         case 0:
1624             /* we have valid st_gen: update result mask */
1625             v9stat_dotl.st_result_mask |= P9_STATS_GEN;
1626             break;
1627         case -EINTR:
1628             /* request cancelled, e.g. by Tflush */
1629             goto out;
1630         default:
1631             /* failed to get st_gen: not fatal, ignore */
1632             break;
1633         }
1634     }
1635     retval = pdu_marshal(pdu, offset, "A", &v9stat_dotl);
1636     if (retval < 0) {
1637         goto out;
1638     }
1639     retval += offset;
1640     trace_v9fs_getattr_return(pdu->tag, pdu->id, v9stat_dotl.st_result_mask,
1641                               v9stat_dotl.st_mode, v9stat_dotl.st_uid,
1642                               v9stat_dotl.st_gid);
1643 out:
1644     put_fid(pdu, fidp);
1645 out_nofid:
1646     pdu_complete(pdu, retval);
1647 }
1648 
1649 /* Attribute flags */
1650 #define P9_ATTR_MODE       (1 << 0)
1651 #define P9_ATTR_UID        (1 << 1)
1652 #define P9_ATTR_GID        (1 << 2)
1653 #define P9_ATTR_SIZE       (1 << 3)
1654 #define P9_ATTR_ATIME      (1 << 4)
1655 #define P9_ATTR_MTIME      (1 << 5)
1656 #define P9_ATTR_CTIME      (1 << 6)
1657 #define P9_ATTR_ATIME_SET  (1 << 7)
1658 #define P9_ATTR_MTIME_SET  (1 << 8)
1659 
1660 #define P9_ATTR_MASK    127
1661 
1662 static void coroutine_fn v9fs_setattr(void *opaque)
1663 {
1664     int err = 0;
1665     int32_t fid;
1666     V9fsFidState *fidp;
1667     size_t offset = 7;
1668     V9fsIattr v9iattr;
1669     V9fsPDU *pdu = opaque;
1670 
1671     err = pdu_unmarshal(pdu, offset, "dI", &fid, &v9iattr);
1672     if (err < 0) {
1673         goto out_nofid;
1674     }
1675 
1676     trace_v9fs_setattr(pdu->tag, pdu->id, fid,
1677                        v9iattr.valid, v9iattr.mode, v9iattr.uid, v9iattr.gid,
1678                        v9iattr.size, v9iattr.atime_sec, v9iattr.mtime_sec);
1679 
1680     fidp = get_fid(pdu, fid);
1681     if (fidp == NULL) {
1682         err = -EINVAL;
1683         goto out_nofid;
1684     }
1685     if (v9iattr.valid & P9_ATTR_MODE) {
1686         err = v9fs_co_chmod(pdu, &fidp->path, v9iattr.mode);
1687         if (err < 0) {
1688             goto out;
1689         }
1690     }
1691     if (v9iattr.valid & (P9_ATTR_ATIME | P9_ATTR_MTIME)) {
1692         struct timespec times[2];
1693         if (v9iattr.valid & P9_ATTR_ATIME) {
1694             if (v9iattr.valid & P9_ATTR_ATIME_SET) {
1695                 times[0].tv_sec = v9iattr.atime_sec;
1696                 times[0].tv_nsec = v9iattr.atime_nsec;
1697             } else {
1698                 times[0].tv_nsec = UTIME_NOW;
1699             }
1700         } else {
1701             times[0].tv_nsec = UTIME_OMIT;
1702         }
1703         if (v9iattr.valid & P9_ATTR_MTIME) {
1704             if (v9iattr.valid & P9_ATTR_MTIME_SET) {
1705                 times[1].tv_sec = v9iattr.mtime_sec;
1706                 times[1].tv_nsec = v9iattr.mtime_nsec;
1707             } else {
1708                 times[1].tv_nsec = UTIME_NOW;
1709             }
1710         } else {
1711             times[1].tv_nsec = UTIME_OMIT;
1712         }
1713         err = v9fs_co_utimensat(pdu, &fidp->path, times);
1714         if (err < 0) {
1715             goto out;
1716         }
1717     }
1718     /*
1719      * If the only valid entry in iattr is ctime we can call
1720      * chown(-1,-1) to update the ctime of the file
1721      */
1722     if ((v9iattr.valid & (P9_ATTR_UID | P9_ATTR_GID)) ||
1723         ((v9iattr.valid & P9_ATTR_CTIME)
1724          && !((v9iattr.valid & P9_ATTR_MASK) & ~P9_ATTR_CTIME))) {
1725         if (!(v9iattr.valid & P9_ATTR_UID)) {
1726             v9iattr.uid = -1;
1727         }
1728         if (!(v9iattr.valid & P9_ATTR_GID)) {
1729             v9iattr.gid = -1;
1730         }
1731         err = v9fs_co_chown(pdu, &fidp->path, v9iattr.uid,
1732                             v9iattr.gid);
1733         if (err < 0) {
1734             goto out;
1735         }
1736     }
1737     if (v9iattr.valid & (P9_ATTR_SIZE)) {
1738         err = v9fs_co_truncate(pdu, &fidp->path, v9iattr.size);
1739         if (err < 0) {
1740             goto out;
1741         }
1742     }
1743     err = offset;
1744     trace_v9fs_setattr_return(pdu->tag, pdu->id);
1745 out:
1746     put_fid(pdu, fidp);
1747 out_nofid:
1748     pdu_complete(pdu, err);
1749 }
1750 
1751 static int v9fs_walk_marshal(V9fsPDU *pdu, uint16_t nwnames, V9fsQID *qids)
1752 {
1753     int i;
1754     ssize_t err;
1755     size_t offset = 7;
1756 
1757     err = pdu_marshal(pdu, offset, "w", nwnames);
1758     if (err < 0) {
1759         return err;
1760     }
1761     offset += err;
1762     for (i = 0; i < nwnames; i++) {
1763         err = pdu_marshal(pdu, offset, "Q", &qids[i]);
1764         if (err < 0) {
1765             return err;
1766         }
1767         offset += err;
1768     }
1769     return offset;
1770 }
1771 
1772 static bool name_is_illegal(const char *name)
1773 {
1774     return !*name || strchr(name, '/') != NULL;
1775 }
1776 
1777 static bool same_stat_id(const struct stat *a, const struct stat *b)
1778 {
1779     return a->st_dev == b->st_dev && a->st_ino == b->st_ino;
1780 }
1781 
1782 static void coroutine_fn v9fs_walk(void *opaque)
1783 {
1784     int name_idx, nwalked;
1785     g_autofree V9fsQID *qids = NULL;
1786     int i, err = 0, any_err = 0;
1787     V9fsPath dpath, path;
1788     P9ARRAY_REF(V9fsPath) pathes = NULL;
1789     uint16_t nwnames;
1790     struct stat stbuf, fidst;
1791     g_autofree struct stat *stbufs = NULL;
1792     size_t offset = 7;
1793     int32_t fid, newfid;
1794     P9ARRAY_REF(V9fsString) wnames = NULL;
1795     V9fsFidState *fidp;
1796     V9fsFidState *newfidp = NULL;
1797     V9fsPDU *pdu = opaque;
1798     V9fsState *s = pdu->s;
1799     V9fsQID qid;
1800 
1801     err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames);
1802     if (err < 0) {
1803         pdu_complete(pdu, err);
1804         return;
1805     }
1806     offset += err;
1807 
1808     trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames);
1809 
1810     if (nwnames > P9_MAXWELEM) {
1811         err = -EINVAL;
1812         goto out_nofid;
1813     }
1814     if (nwnames) {
1815         P9ARRAY_NEW(V9fsString, wnames, nwnames);
1816         qids   = g_new0(V9fsQID, nwnames);
1817         stbufs = g_new0(struct stat, nwnames);
1818         P9ARRAY_NEW(V9fsPath, pathes, nwnames);
1819         for (i = 0; i < nwnames; i++) {
1820             err = pdu_unmarshal(pdu, offset, "s", &wnames[i]);
1821             if (err < 0) {
1822                 goto out_nofid;
1823             }
1824             if (name_is_illegal(wnames[i].data)) {
1825                 err = -ENOENT;
1826                 goto out_nofid;
1827             }
1828             offset += err;
1829         }
1830     }
1831     fidp = get_fid(pdu, fid);
1832     if (fidp == NULL) {
1833         err = -ENOENT;
1834         goto out_nofid;
1835     }
1836 
1837     v9fs_path_init(&dpath);
1838     v9fs_path_init(&path);
1839     /*
1840      * Both dpath and path initially point to fidp.
1841      * Needed to handle request with nwnames == 0
1842      */
1843     v9fs_path_copy(&dpath, &fidp->path);
1844     v9fs_path_copy(&path, &fidp->path);
1845 
1846     /*
1847      * To keep latency (i.e. overall execution time for processing this
1848      * Twalk client request) as small as possible, run all the required fs
1849      * driver code altogether inside the following block.
1850      */
1851     v9fs_co_run_in_worker({
1852         nwalked = 0;
1853         if (v9fs_request_cancelled(pdu)) {
1854             any_err |= err = -EINTR;
1855             break;
1856         }
1857         err = s->ops->lstat(&s->ctx, &dpath, &fidst);
1858         if (err < 0) {
1859             any_err |= err = -errno;
1860             break;
1861         }
1862         stbuf = fidst;
1863         for (; nwalked < nwnames; nwalked++) {
1864             if (v9fs_request_cancelled(pdu)) {
1865                 any_err |= err = -EINTR;
1866                 break;
1867             }
1868             if (!same_stat_id(&pdu->s->root_st, &stbuf) ||
1869                 strcmp("..", wnames[nwalked].data))
1870             {
1871                 err = s->ops->name_to_path(&s->ctx, &dpath,
1872                                            wnames[nwalked].data,
1873                                            &pathes[nwalked]);
1874                 if (err < 0) {
1875                     any_err |= err = -errno;
1876                     break;
1877                 }
1878                 if (v9fs_request_cancelled(pdu)) {
1879                     any_err |= err = -EINTR;
1880                     break;
1881                 }
1882                 err = s->ops->lstat(&s->ctx, &pathes[nwalked], &stbuf);
1883                 if (err < 0) {
1884                     any_err |= err = -errno;
1885                     break;
1886                 }
1887                 stbufs[nwalked] = stbuf;
1888                 v9fs_path_copy(&dpath, &pathes[nwalked]);
1889             }
1890         }
1891     });
1892     /*
1893      * Handle all the rest of this Twalk request on main thread ...
1894      *
1895      * NOTE: -EINTR is an exception where we deviate from the protocol spec
1896      * and simply send a (R)Lerror response instead of bothering to assemble
1897      * a (deducted) Rwalk response; because -EINTR is always the result of a
1898      * Tflush request, so client would no longer wait for a response in this
1899      * case anyway.
1900      */
1901     if ((err < 0 && !nwalked) || err == -EINTR) {
1902         goto out;
1903     }
1904 
1905     any_err |= err = stat_to_qid(pdu, &fidst, &qid);
1906     if (err < 0 && !nwalked) {
1907         goto out;
1908     }
1909     stbuf = fidst;
1910 
1911     /* reset dpath and path */
1912     v9fs_path_copy(&dpath, &fidp->path);
1913     v9fs_path_copy(&path, &fidp->path);
1914 
1915     for (name_idx = 0; name_idx < nwalked; name_idx++) {
1916         if (!same_stat_id(&pdu->s->root_st, &stbuf) ||
1917             strcmp("..", wnames[name_idx].data))
1918         {
1919             stbuf = stbufs[name_idx];
1920             any_err |= err = stat_to_qid(pdu, &stbuf, &qid);
1921             if (err < 0) {
1922                 break;
1923             }
1924             v9fs_path_copy(&path, &pathes[name_idx]);
1925             v9fs_path_copy(&dpath, &path);
1926         }
1927         memcpy(&qids[name_idx], &qid, sizeof(qid));
1928     }
1929     if (any_err < 0) {
1930         if (!name_idx) {
1931             /* don't send any QIDs, send Rlerror instead */
1932             goto out;
1933         } else {
1934             /* send QIDs (not Rlerror), but fid MUST remain unaffected */
1935             goto send_qids;
1936         }
1937     }
1938     if (fid == newfid) {
1939         if (fidp->fid_type != P9_FID_NONE) {
1940             err = -EINVAL;
1941             goto out;
1942         }
1943         v9fs_path_write_lock(s);
1944         v9fs_path_copy(&fidp->path, &path);
1945         v9fs_path_unlock(s);
1946     } else {
1947         newfidp = alloc_fid(s, newfid);
1948         if (newfidp == NULL) {
1949             err = -EINVAL;
1950             goto out;
1951         }
1952         newfidp->uid = fidp->uid;
1953         v9fs_path_copy(&newfidp->path, &path);
1954     }
1955 send_qids:
1956     err = v9fs_walk_marshal(pdu, name_idx, qids);
1957     trace_v9fs_walk_return(pdu->tag, pdu->id, name_idx, qids);
1958 out:
1959     put_fid(pdu, fidp);
1960     if (newfidp) {
1961         put_fid(pdu, newfidp);
1962     }
1963     v9fs_path_free(&dpath);
1964     v9fs_path_free(&path);
1965 out_nofid:
1966     pdu_complete(pdu, err);
1967 }
1968 
1969 static int32_t coroutine_fn get_iounit(V9fsPDU *pdu, V9fsPath *path)
1970 {
1971     struct statfs stbuf;
1972     int err = v9fs_co_statfs(pdu, path, &stbuf);
1973 
1974     return blksize_to_iounit(pdu, (err >= 0) ? stbuf.f_bsize : 0);
1975 }
1976 
1977 static void coroutine_fn v9fs_open(void *opaque)
1978 {
1979     int flags;
1980     int32_t fid;
1981     int32_t mode;
1982     V9fsQID qid;
1983     int iounit = 0;
1984     ssize_t err = 0;
1985     size_t offset = 7;
1986     struct stat stbuf;
1987     V9fsFidState *fidp;
1988     V9fsPDU *pdu = opaque;
1989     V9fsState *s = pdu->s;
1990 
1991     if (s->proto_version == V9FS_PROTO_2000L) {
1992         err = pdu_unmarshal(pdu, offset, "dd", &fid, &mode);
1993     } else {
1994         uint8_t modebyte;
1995         err = pdu_unmarshal(pdu, offset, "db", &fid, &modebyte);
1996         mode = modebyte;
1997     }
1998     if (err < 0) {
1999         goto out_nofid;
2000     }
2001     trace_v9fs_open(pdu->tag, pdu->id, fid, mode);
2002 
2003     fidp = get_fid(pdu, fid);
2004     if (fidp == NULL) {
2005         err = -ENOENT;
2006         goto out_nofid;
2007     }
2008     if (fidp->fid_type != P9_FID_NONE) {
2009         err = -EINVAL;
2010         goto out;
2011     }
2012 
2013     err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
2014     if (err < 0) {
2015         goto out;
2016     }
2017     err = stat_to_qid(pdu, &stbuf, &qid);
2018     if (err < 0) {
2019         goto out;
2020     }
2021     if (S_ISDIR(stbuf.st_mode)) {
2022         err = v9fs_co_opendir(pdu, fidp);
2023         if (err < 0) {
2024             goto out;
2025         }
2026         fidp->fid_type = P9_FID_DIR;
2027         err = pdu_marshal(pdu, offset, "Qd", &qid, 0);
2028         if (err < 0) {
2029             goto out;
2030         }
2031         err += offset;
2032     } else {
2033         if (s->proto_version == V9FS_PROTO_2000L) {
2034             flags = get_dotl_openflags(s, mode);
2035         } else {
2036             flags = omode_to_uflags(mode);
2037         }
2038         if (is_ro_export(&s->ctx)) {
2039             if (mode & O_WRONLY || mode & O_RDWR ||
2040                 mode & O_APPEND || mode & O_TRUNC) {
2041                 err = -EROFS;
2042                 goto out;
2043             }
2044         }
2045         err = v9fs_co_open(pdu, fidp, flags);
2046         if (err < 0) {
2047             goto out;
2048         }
2049         fidp->fid_type = P9_FID_FILE;
2050         fidp->open_flags = flags;
2051         if (flags & O_EXCL) {
2052             /*
2053              * We let the host file system do O_EXCL check
2054              * We should not reclaim such fd
2055              */
2056             fidp->flags |= FID_NON_RECLAIMABLE;
2057         }
2058         iounit = get_iounit(pdu, &fidp->path);
2059         err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2060         if (err < 0) {
2061             goto out;
2062         }
2063         err += offset;
2064     }
2065     trace_v9fs_open_return(pdu->tag, pdu->id,
2066                            qid.type, qid.version, qid.path, iounit);
2067 out:
2068     put_fid(pdu, fidp);
2069 out_nofid:
2070     pdu_complete(pdu, err);
2071 }
2072 
2073 static void coroutine_fn v9fs_lcreate(void *opaque)
2074 {
2075     int32_t dfid, flags, mode;
2076     gid_t gid;
2077     ssize_t err = 0;
2078     ssize_t offset = 7;
2079     V9fsString name;
2080     V9fsFidState *fidp;
2081     struct stat stbuf;
2082     V9fsQID qid;
2083     int32_t iounit;
2084     V9fsPDU *pdu = opaque;
2085 
2086     v9fs_string_init(&name);
2087     err = pdu_unmarshal(pdu, offset, "dsddd", &dfid,
2088                         &name, &flags, &mode, &gid);
2089     if (err < 0) {
2090         goto out_nofid;
2091     }
2092     trace_v9fs_lcreate(pdu->tag, pdu->id, dfid, flags, mode, gid);
2093 
2094     if (name_is_illegal(name.data)) {
2095         err = -ENOENT;
2096         goto out_nofid;
2097     }
2098 
2099     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
2100         err = -EEXIST;
2101         goto out_nofid;
2102     }
2103 
2104     fidp = get_fid(pdu, dfid);
2105     if (fidp == NULL) {
2106         err = -ENOENT;
2107         goto out_nofid;
2108     }
2109     if (fidp->fid_type != P9_FID_NONE) {
2110         err = -EINVAL;
2111         goto out;
2112     }
2113 
2114     flags = get_dotl_openflags(pdu->s, flags);
2115     err = v9fs_co_open2(pdu, fidp, &name, gid,
2116                         flags | O_CREAT, mode, &stbuf);
2117     if (err < 0) {
2118         goto out;
2119     }
2120     fidp->fid_type = P9_FID_FILE;
2121     fidp->open_flags = flags;
2122     if (flags & O_EXCL) {
2123         /*
2124          * We let the host file system do O_EXCL check
2125          * We should not reclaim such fd
2126          */
2127         fidp->flags |= FID_NON_RECLAIMABLE;
2128     }
2129     iounit =  get_iounit(pdu, &fidp->path);
2130     err = stat_to_qid(pdu, &stbuf, &qid);
2131     if (err < 0) {
2132         goto out;
2133     }
2134     err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2135     if (err < 0) {
2136         goto out;
2137     }
2138     err += offset;
2139     trace_v9fs_lcreate_return(pdu->tag, pdu->id,
2140                               qid.type, qid.version, qid.path, iounit);
2141 out:
2142     put_fid(pdu, fidp);
2143 out_nofid:
2144     pdu_complete(pdu, err);
2145     v9fs_string_free(&name);
2146 }
2147 
2148 static void coroutine_fn v9fs_fsync(void *opaque)
2149 {
2150     int err;
2151     int32_t fid;
2152     int datasync;
2153     size_t offset = 7;
2154     V9fsFidState *fidp;
2155     V9fsPDU *pdu = opaque;
2156 
2157     err = pdu_unmarshal(pdu, offset, "dd", &fid, &datasync);
2158     if (err < 0) {
2159         goto out_nofid;
2160     }
2161     trace_v9fs_fsync(pdu->tag, pdu->id, fid, datasync);
2162 
2163     fidp = get_fid(pdu, fid);
2164     if (fidp == NULL) {
2165         err = -ENOENT;
2166         goto out_nofid;
2167     }
2168     err = v9fs_co_fsync(pdu, fidp, datasync);
2169     if (!err) {
2170         err = offset;
2171     }
2172     put_fid(pdu, fidp);
2173 out_nofid:
2174     pdu_complete(pdu, err);
2175 }
2176 
2177 static void coroutine_fn v9fs_clunk(void *opaque)
2178 {
2179     int err;
2180     int32_t fid;
2181     size_t offset = 7;
2182     V9fsFidState *fidp;
2183     V9fsPDU *pdu = opaque;
2184     V9fsState *s = pdu->s;
2185 
2186     err = pdu_unmarshal(pdu, offset, "d", &fid);
2187     if (err < 0) {
2188         goto out_nofid;
2189     }
2190     trace_v9fs_clunk(pdu->tag, pdu->id, fid);
2191 
2192     fidp = clunk_fid(s, fid);
2193     if (fidp == NULL) {
2194         err = -ENOENT;
2195         goto out_nofid;
2196     }
2197     /*
2198      * Bump the ref so that put_fid will
2199      * free the fid.
2200      */
2201     fidp->ref++;
2202     err = put_fid(pdu, fidp);
2203     if (!err) {
2204         err = offset;
2205     }
2206 out_nofid:
2207     pdu_complete(pdu, err);
2208 }
2209 
2210 /*
2211  * Create a QEMUIOVector for a sub-region of PDU iovecs
2212  *
2213  * @qiov:       uninitialized QEMUIOVector
2214  * @skip:       number of bytes to skip from beginning of PDU
2215  * @size:       number of bytes to include
2216  * @is_write:   true - write, false - read
2217  *
2218  * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
2219  * with qemu_iovec_destroy().
2220  */
2221 static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
2222                                     size_t skip, size_t size,
2223                                     bool is_write)
2224 {
2225     QEMUIOVector elem;
2226     struct iovec *iov;
2227     unsigned int niov;
2228 
2229     if (is_write) {
2230         pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + skip);
2231     } else {
2232         pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip);
2233     }
2234 
2235     qemu_iovec_init_external(&elem, iov, niov);
2236     qemu_iovec_init(qiov, niov);
2237     qemu_iovec_concat(qiov, &elem, skip, size);
2238 }
2239 
2240 static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
2241                            uint64_t off, uint32_t max_count)
2242 {
2243     ssize_t err;
2244     size_t offset = 7;
2245     uint64_t read_count;
2246     QEMUIOVector qiov_full;
2247 
2248     if (fidp->fs.xattr.len < off) {
2249         read_count = 0;
2250     } else {
2251         read_count = fidp->fs.xattr.len - off;
2252     }
2253     if (read_count > max_count) {
2254         read_count = max_count;
2255     }
2256     err = pdu_marshal(pdu, offset, "d", read_count);
2257     if (err < 0) {
2258         return err;
2259     }
2260     offset += err;
2261 
2262     v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
2263     err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
2264                     ((char *)fidp->fs.xattr.value) + off,
2265                     read_count);
2266     qemu_iovec_destroy(&qiov_full);
2267     if (err < 0) {
2268         return err;
2269     }
2270     offset += err;
2271     return offset;
2272 }
2273 
2274 static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu,
2275                                                   V9fsFidState *fidp,
2276                                                   uint32_t max_count)
2277 {
2278     V9fsPath path;
2279     V9fsStat v9stat;
2280     int len, err = 0;
2281     int32_t count = 0;
2282     struct stat stbuf;
2283     off_t saved_dir_pos;
2284     struct dirent *dent;
2285 
2286     /* save the directory position */
2287     saved_dir_pos = v9fs_co_telldir(pdu, fidp);
2288     if (saved_dir_pos < 0) {
2289         return saved_dir_pos;
2290     }
2291 
2292     while (1) {
2293         v9fs_path_init(&path);
2294 
2295         v9fs_readdir_lock(&fidp->fs.dir);
2296 
2297         err = v9fs_co_readdir(pdu, fidp, &dent);
2298         if (err || !dent) {
2299             break;
2300         }
2301         err = v9fs_co_name_to_path(pdu, &fidp->path, dent->d_name, &path);
2302         if (err < 0) {
2303             break;
2304         }
2305         err = v9fs_co_lstat(pdu, &path, &stbuf);
2306         if (err < 0) {
2307             break;
2308         }
2309         err = stat_to_v9stat(pdu, &path, dent->d_name, &stbuf, &v9stat);
2310         if (err < 0) {
2311             break;
2312         }
2313         if ((count + v9stat.size + 2) > max_count) {
2314             v9fs_readdir_unlock(&fidp->fs.dir);
2315 
2316             /* Ran out of buffer. Set dir back to old position and return */
2317             v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
2318             v9fs_stat_free(&v9stat);
2319             v9fs_path_free(&path);
2320             return count;
2321         }
2322 
2323         /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */
2324         len = pdu_marshal(pdu, 11 + count, "S", &v9stat);
2325 
2326         v9fs_readdir_unlock(&fidp->fs.dir);
2327 
2328         if (len < 0) {
2329             v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
2330             v9fs_stat_free(&v9stat);
2331             v9fs_path_free(&path);
2332             return len;
2333         }
2334         count += len;
2335         v9fs_stat_free(&v9stat);
2336         v9fs_path_free(&path);
2337         saved_dir_pos = qemu_dirent_off(dent);
2338     }
2339 
2340     v9fs_readdir_unlock(&fidp->fs.dir);
2341 
2342     v9fs_path_free(&path);
2343     if (err < 0) {
2344         return err;
2345     }
2346     return count;
2347 }
2348 
2349 static void coroutine_fn v9fs_read(void *opaque)
2350 {
2351     int32_t fid;
2352     uint64_t off;
2353     ssize_t err = 0;
2354     int32_t count = 0;
2355     size_t offset = 7;
2356     uint32_t max_count;
2357     V9fsFidState *fidp;
2358     V9fsPDU *pdu = opaque;
2359     V9fsState *s = pdu->s;
2360 
2361     err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &max_count);
2362     if (err < 0) {
2363         goto out_nofid;
2364     }
2365     trace_v9fs_read(pdu->tag, pdu->id, fid, off, max_count);
2366 
2367     fidp = get_fid(pdu, fid);
2368     if (fidp == NULL) {
2369         err = -EINVAL;
2370         goto out_nofid;
2371     }
2372     if (fidp->fid_type == P9_FID_DIR) {
2373         if (s->proto_version != V9FS_PROTO_2000U) {
2374             warn_report_once(
2375                 "9p: bad client: T_read request on directory only expected "
2376                 "with 9P2000.u protocol version"
2377             );
2378             err = -EOPNOTSUPP;
2379             goto out;
2380         }
2381         if (off == 0) {
2382             v9fs_co_rewinddir(pdu, fidp);
2383         }
2384         count = v9fs_do_readdir_with_stat(pdu, fidp, max_count);
2385         if (count < 0) {
2386             err = count;
2387             goto out;
2388         }
2389         err = pdu_marshal(pdu, offset, "d", count);
2390         if (err < 0) {
2391             goto out;
2392         }
2393         err += offset + count;
2394     } else if (fidp->fid_type == P9_FID_FILE) {
2395         QEMUIOVector qiov_full;
2396         QEMUIOVector qiov;
2397         int32_t len;
2398 
2399         v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false);
2400         qemu_iovec_init(&qiov, qiov_full.niov);
2401         do {
2402             qemu_iovec_reset(&qiov);
2403             qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count);
2404             if (0) {
2405                 print_sg(qiov.iov, qiov.niov);
2406             }
2407             /* Loop in case of EINTR */
2408             do {
2409                 len = v9fs_co_preadv(pdu, fidp, qiov.iov, qiov.niov, off);
2410                 if (len >= 0) {
2411                     off   += len;
2412                     count += len;
2413                 }
2414             } while (len == -EINTR && !pdu->cancelled);
2415             if (len < 0) {
2416                 /* IO error return the error */
2417                 err = len;
2418                 goto out_free_iovec;
2419             }
2420         } while (count < max_count && len > 0);
2421         err = pdu_marshal(pdu, offset, "d", count);
2422         if (err < 0) {
2423             goto out_free_iovec;
2424         }
2425         err += offset + count;
2426 out_free_iovec:
2427         qemu_iovec_destroy(&qiov);
2428         qemu_iovec_destroy(&qiov_full);
2429     } else if (fidp->fid_type == P9_FID_XATTR) {
2430         err = v9fs_xattr_read(s, pdu, fidp, off, max_count);
2431     } else {
2432         err = -EINVAL;
2433     }
2434     trace_v9fs_read_return(pdu->tag, pdu->id, count, err);
2435 out:
2436     put_fid(pdu, fidp);
2437 out_nofid:
2438     pdu_complete(pdu, err);
2439 }
2440 
2441 /**
2442  * v9fs_readdir_response_size() - Returns size required in Rreaddir response
2443  * for the passed dirent @name.
2444  *
2445  * @name: directory entry's name (i.e. file name, directory name)
2446  * Return: required size in bytes
2447  */
2448 size_t v9fs_readdir_response_size(V9fsString *name)
2449 {
2450     /*
2451      * Size of each dirent on the wire: size of qid (13) + size of offset (8)
2452      * size of type (1) + size of name.size (2) + strlen(name.data)
2453      */
2454     return 24 + v9fs_string_size(name);
2455 }
2456 
2457 static void v9fs_free_dirents(struct V9fsDirEnt *e)
2458 {
2459     struct V9fsDirEnt *next = NULL;
2460 
2461     for (; e; e = next) {
2462         next = e->next;
2463         g_free(e->dent);
2464         g_free(e->st);
2465         g_free(e);
2466     }
2467 }
2468 
2469 static int coroutine_fn v9fs_do_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
2470                                         off_t offset, int32_t max_count)
2471 {
2472     size_t size;
2473     V9fsQID qid;
2474     V9fsString name;
2475     int len, err = 0;
2476     int32_t count = 0;
2477     off_t off;
2478     struct dirent *dent;
2479     struct stat *st;
2480     struct V9fsDirEnt *entries = NULL;
2481 
2482     /*
2483      * inode remapping requires the device id, which in turn might be
2484      * different for different directory entries, so if inode remapping is
2485      * enabled we have to make a full stat for each directory entry
2486      */
2487     const bool dostat = pdu->s->ctx.export_flags & V9FS_REMAP_INODES;
2488 
2489     /*
2490      * Fetch all required directory entries altogether on a background IO
2491      * thread from fs driver. We don't want to do that for each entry
2492      * individually, because hopping between threads (this main IO thread
2493      * and background IO driver thread) would sum up to huge latencies.
2494      */
2495     count = v9fs_co_readdir_many(pdu, fidp, &entries, offset, max_count,
2496                                  dostat);
2497     if (count < 0) {
2498         err = count;
2499         count = 0;
2500         goto out;
2501     }
2502     count = 0;
2503 
2504     for (struct V9fsDirEnt *e = entries; e; e = e->next) {
2505         dent = e->dent;
2506 
2507         if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) {
2508             st = e->st;
2509             /* e->st should never be NULL, but just to be sure */
2510             if (!st) {
2511                 err = -1;
2512                 break;
2513             }
2514 
2515             /* remap inode */
2516             err = stat_to_qid(pdu, st, &qid);
2517             if (err < 0) {
2518                 break;
2519             }
2520         } else {
2521             /*
2522              * Fill up just the path field of qid because the client uses
2523              * only that. To fill the entire qid structure we will have
2524              * to stat each dirent found, which is expensive. For the
2525              * latter reason we don't call stat_to_qid() here. Only drawback
2526              * is that no multi-device export detection of stat_to_qid()
2527              * would be done and provided as error to the user here. But
2528              * user would get that error anyway when accessing those
2529              * files/dirs through other ways.
2530              */
2531             size = MIN(sizeof(dent->d_ino), sizeof(qid.path));
2532             memcpy(&qid.path, &dent->d_ino, size);
2533             /* Fill the other fields with dummy values */
2534             qid.type = 0;
2535             qid.version = 0;
2536         }
2537 
2538         off = qemu_dirent_off(dent);
2539         v9fs_string_init(&name);
2540         v9fs_string_sprintf(&name, "%s", dent->d_name);
2541 
2542         /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */
2543         len = pdu_marshal(pdu, 11 + count, "Qqbs",
2544                           &qid, off,
2545                           dent->d_type, &name);
2546 
2547         v9fs_string_free(&name);
2548 
2549         if (len < 0) {
2550             err = len;
2551             break;
2552         }
2553 
2554         count += len;
2555     }
2556 
2557 out:
2558     v9fs_free_dirents(entries);
2559     if (err < 0) {
2560         return err;
2561     }
2562     return count;
2563 }
2564 
2565 static void coroutine_fn v9fs_readdir(void *opaque)
2566 {
2567     int32_t fid;
2568     V9fsFidState *fidp;
2569     ssize_t retval = 0;
2570     size_t offset = 7;
2571     uint64_t initial_offset;
2572     int32_t count;
2573     uint32_t max_count;
2574     V9fsPDU *pdu = opaque;
2575     V9fsState *s = pdu->s;
2576 
2577     retval = pdu_unmarshal(pdu, offset, "dqd", &fid,
2578                            &initial_offset, &max_count);
2579     if (retval < 0) {
2580         goto out_nofid;
2581     }
2582     trace_v9fs_readdir(pdu->tag, pdu->id, fid, initial_offset, max_count);
2583 
2584     /* Enough space for a R_readdir header: size[4] Rreaddir tag[2] count[4] */
2585     if (max_count > s->msize - 11) {
2586         max_count = s->msize - 11;
2587         warn_report_once(
2588             "9p: bad client: T_readdir with count > msize - 11"
2589         );
2590     }
2591 
2592     fidp = get_fid(pdu, fid);
2593     if (fidp == NULL) {
2594         retval = -EINVAL;
2595         goto out_nofid;
2596     }
2597     if (!fidp->fs.dir.stream) {
2598         retval = -EINVAL;
2599         goto out;
2600     }
2601     if (s->proto_version != V9FS_PROTO_2000L) {
2602         warn_report_once(
2603             "9p: bad client: T_readdir request only expected with 9P2000.L "
2604             "protocol version"
2605         );
2606         retval = -EOPNOTSUPP;
2607         goto out;
2608     }
2609     count = v9fs_do_readdir(pdu, fidp, (off_t) initial_offset, max_count);
2610     if (count < 0) {
2611         retval = count;
2612         goto out;
2613     }
2614     retval = pdu_marshal(pdu, offset, "d", count);
2615     if (retval < 0) {
2616         goto out;
2617     }
2618     retval += count + offset;
2619     trace_v9fs_readdir_return(pdu->tag, pdu->id, count, retval);
2620 out:
2621     put_fid(pdu, fidp);
2622 out_nofid:
2623     pdu_complete(pdu, retval);
2624 }
2625 
2626 static int v9fs_xattr_write(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
2627                             uint64_t off, uint32_t count,
2628                             struct iovec *sg, int cnt)
2629 {
2630     int i, to_copy;
2631     ssize_t err = 0;
2632     uint64_t write_count;
2633     size_t offset = 7;
2634 
2635 
2636     if (fidp->fs.xattr.len < off) {
2637         return -ENOSPC;
2638     }
2639     write_count = fidp->fs.xattr.len - off;
2640     if (write_count > count) {
2641         write_count = count;
2642     }
2643     err = pdu_marshal(pdu, offset, "d", write_count);
2644     if (err < 0) {
2645         return err;
2646     }
2647     err += offset;
2648     fidp->fs.xattr.copied_len += write_count;
2649     /*
2650      * Now copy the content from sg list
2651      */
2652     for (i = 0; i < cnt; i++) {
2653         if (write_count > sg[i].iov_len) {
2654             to_copy = sg[i].iov_len;
2655         } else {
2656             to_copy = write_count;
2657         }
2658         memcpy((char *)fidp->fs.xattr.value + off, sg[i].iov_base, to_copy);
2659         /* updating vs->off since we are not using below */
2660         off += to_copy;
2661         write_count -= to_copy;
2662     }
2663 
2664     return err;
2665 }
2666 
2667 static void coroutine_fn v9fs_write(void *opaque)
2668 {
2669     ssize_t err;
2670     int32_t fid;
2671     uint64_t off;
2672     uint32_t count;
2673     int32_t len = 0;
2674     int32_t total = 0;
2675     size_t offset = 7;
2676     V9fsFidState *fidp;
2677     V9fsPDU *pdu = opaque;
2678     V9fsState *s = pdu->s;
2679     QEMUIOVector qiov_full;
2680     QEMUIOVector qiov;
2681 
2682     err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &count);
2683     if (err < 0) {
2684         pdu_complete(pdu, err);
2685         return;
2686     }
2687     offset += err;
2688     v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true);
2689     trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov);
2690 
2691     fidp = get_fid(pdu, fid);
2692     if (fidp == NULL) {
2693         err = -EINVAL;
2694         goto out_nofid;
2695     }
2696     if (fidp->fid_type == P9_FID_FILE) {
2697         if (fidp->fs.fd == -1) {
2698             err = -EINVAL;
2699             goto out;
2700         }
2701     } else if (fidp->fid_type == P9_FID_XATTR) {
2702         /*
2703          * setxattr operation
2704          */
2705         err = v9fs_xattr_write(s, pdu, fidp, off, count,
2706                                qiov_full.iov, qiov_full.niov);
2707         goto out;
2708     } else {
2709         err = -EINVAL;
2710         goto out;
2711     }
2712     qemu_iovec_init(&qiov, qiov_full.niov);
2713     do {
2714         qemu_iovec_reset(&qiov);
2715         qemu_iovec_concat(&qiov, &qiov_full, total, qiov_full.size - total);
2716         if (0) {
2717             print_sg(qiov.iov, qiov.niov);
2718         }
2719         /* Loop in case of EINTR */
2720         do {
2721             len = v9fs_co_pwritev(pdu, fidp, qiov.iov, qiov.niov, off);
2722             if (len >= 0) {
2723                 off   += len;
2724                 total += len;
2725             }
2726         } while (len == -EINTR && !pdu->cancelled);
2727         if (len < 0) {
2728             /* IO error return the error */
2729             err = len;
2730             goto out_qiov;
2731         }
2732     } while (total < count && len > 0);
2733 
2734     offset = 7;
2735     err = pdu_marshal(pdu, offset, "d", total);
2736     if (err < 0) {
2737         goto out_qiov;
2738     }
2739     err += offset;
2740     trace_v9fs_write_return(pdu->tag, pdu->id, total, err);
2741 out_qiov:
2742     qemu_iovec_destroy(&qiov);
2743 out:
2744     put_fid(pdu, fidp);
2745 out_nofid:
2746     qemu_iovec_destroy(&qiov_full);
2747     pdu_complete(pdu, err);
2748 }
2749 
2750 static void coroutine_fn v9fs_create(void *opaque)
2751 {
2752     int32_t fid;
2753     int err = 0;
2754     size_t offset = 7;
2755     V9fsFidState *fidp;
2756     V9fsQID qid;
2757     int32_t perm;
2758     int8_t mode;
2759     V9fsPath path;
2760     struct stat stbuf;
2761     V9fsString name;
2762     V9fsString extension;
2763     int iounit;
2764     V9fsPDU *pdu = opaque;
2765     V9fsState *s = pdu->s;
2766 
2767     v9fs_path_init(&path);
2768     v9fs_string_init(&name);
2769     v9fs_string_init(&extension);
2770     err = pdu_unmarshal(pdu, offset, "dsdbs", &fid, &name,
2771                         &perm, &mode, &extension);
2772     if (err < 0) {
2773         goto out_nofid;
2774     }
2775     trace_v9fs_create(pdu->tag, pdu->id, fid, name.data, perm, mode);
2776 
2777     if (name_is_illegal(name.data)) {
2778         err = -ENOENT;
2779         goto out_nofid;
2780     }
2781 
2782     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
2783         err = -EEXIST;
2784         goto out_nofid;
2785     }
2786 
2787     fidp = get_fid(pdu, fid);
2788     if (fidp == NULL) {
2789         err = -EINVAL;
2790         goto out_nofid;
2791     }
2792     if (fidp->fid_type != P9_FID_NONE) {
2793         err = -EINVAL;
2794         goto out;
2795     }
2796     if (perm & P9_STAT_MODE_DIR) {
2797         err = v9fs_co_mkdir(pdu, fidp, &name, perm & 0777,
2798                             fidp->uid, -1, &stbuf);
2799         if (err < 0) {
2800             goto out;
2801         }
2802         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2803         if (err < 0) {
2804             goto out;
2805         }
2806         v9fs_path_write_lock(s);
2807         v9fs_path_copy(&fidp->path, &path);
2808         v9fs_path_unlock(s);
2809         err = v9fs_co_opendir(pdu, fidp);
2810         if (err < 0) {
2811             goto out;
2812         }
2813         fidp->fid_type = P9_FID_DIR;
2814     } else if (perm & P9_STAT_MODE_SYMLINK) {
2815         err = v9fs_co_symlink(pdu, fidp, &name,
2816                               extension.data, -1 , &stbuf);
2817         if (err < 0) {
2818             goto out;
2819         }
2820         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2821         if (err < 0) {
2822             goto out;
2823         }
2824         v9fs_path_write_lock(s);
2825         v9fs_path_copy(&fidp->path, &path);
2826         v9fs_path_unlock(s);
2827     } else if (perm & P9_STAT_MODE_LINK) {
2828         int32_t ofid = atoi(extension.data);
2829         V9fsFidState *ofidp = get_fid(pdu, ofid);
2830         if (ofidp == NULL) {
2831             err = -EINVAL;
2832             goto out;
2833         }
2834         err = v9fs_co_link(pdu, ofidp, fidp, &name);
2835         put_fid(pdu, ofidp);
2836         if (err < 0) {
2837             goto out;
2838         }
2839         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2840         if (err < 0) {
2841             fidp->fid_type = P9_FID_NONE;
2842             goto out;
2843         }
2844         v9fs_path_write_lock(s);
2845         v9fs_path_copy(&fidp->path, &path);
2846         v9fs_path_unlock(s);
2847         err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
2848         if (err < 0) {
2849             fidp->fid_type = P9_FID_NONE;
2850             goto out;
2851         }
2852     } else if (perm & P9_STAT_MODE_DEVICE) {
2853         char ctype;
2854         uint32_t major, minor;
2855         mode_t nmode = 0;
2856 
2857         if (sscanf(extension.data, "%c %u %u", &ctype, &major, &minor) != 3) {
2858             err = -errno;
2859             goto out;
2860         }
2861 
2862         switch (ctype) {
2863         case 'c':
2864             nmode = S_IFCHR;
2865             break;
2866         case 'b':
2867             nmode = S_IFBLK;
2868             break;
2869         default:
2870             err = -EIO;
2871             goto out;
2872         }
2873 
2874         nmode |= perm & 0777;
2875         err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2876                             makedev(major, minor), nmode, &stbuf);
2877         if (err < 0) {
2878             goto out;
2879         }
2880         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2881         if (err < 0) {
2882             goto out;
2883         }
2884         v9fs_path_write_lock(s);
2885         v9fs_path_copy(&fidp->path, &path);
2886         v9fs_path_unlock(s);
2887     } else if (perm & P9_STAT_MODE_NAMED_PIPE) {
2888         err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2889                             0, S_IFIFO | (perm & 0777), &stbuf);
2890         if (err < 0) {
2891             goto out;
2892         }
2893         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2894         if (err < 0) {
2895             goto out;
2896         }
2897         v9fs_path_write_lock(s);
2898         v9fs_path_copy(&fidp->path, &path);
2899         v9fs_path_unlock(s);
2900     } else if (perm & P9_STAT_MODE_SOCKET) {
2901         err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2902                             0, S_IFSOCK | (perm & 0777), &stbuf);
2903         if (err < 0) {
2904             goto out;
2905         }
2906         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2907         if (err < 0) {
2908             goto out;
2909         }
2910         v9fs_path_write_lock(s);
2911         v9fs_path_copy(&fidp->path, &path);
2912         v9fs_path_unlock(s);
2913     } else {
2914         err = v9fs_co_open2(pdu, fidp, &name, -1,
2915                             omode_to_uflags(mode) | O_CREAT, perm, &stbuf);
2916         if (err < 0) {
2917             goto out;
2918         }
2919         fidp->fid_type = P9_FID_FILE;
2920         fidp->open_flags = omode_to_uflags(mode);
2921         if (fidp->open_flags & O_EXCL) {
2922             /*
2923              * We let the host file system do O_EXCL check
2924              * We should not reclaim such fd
2925              */
2926             fidp->flags |= FID_NON_RECLAIMABLE;
2927         }
2928     }
2929     iounit = get_iounit(pdu, &fidp->path);
2930     err = stat_to_qid(pdu, &stbuf, &qid);
2931     if (err < 0) {
2932         goto out;
2933     }
2934     err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2935     if (err < 0) {
2936         goto out;
2937     }
2938     err += offset;
2939     trace_v9fs_create_return(pdu->tag, pdu->id,
2940                              qid.type, qid.version, qid.path, iounit);
2941 out:
2942     put_fid(pdu, fidp);
2943 out_nofid:
2944    pdu_complete(pdu, err);
2945    v9fs_string_free(&name);
2946    v9fs_string_free(&extension);
2947    v9fs_path_free(&path);
2948 }
2949 
2950 static void coroutine_fn v9fs_symlink(void *opaque)
2951 {
2952     V9fsPDU *pdu = opaque;
2953     V9fsString name;
2954     V9fsString symname;
2955     V9fsFidState *dfidp;
2956     V9fsQID qid;
2957     struct stat stbuf;
2958     int32_t dfid;
2959     int err = 0;
2960     gid_t gid;
2961     size_t offset = 7;
2962 
2963     v9fs_string_init(&name);
2964     v9fs_string_init(&symname);
2965     err = pdu_unmarshal(pdu, offset, "dssd", &dfid, &name, &symname, &gid);
2966     if (err < 0) {
2967         goto out_nofid;
2968     }
2969     trace_v9fs_symlink(pdu->tag, pdu->id, dfid, name.data, symname.data, gid);
2970 
2971     if (name_is_illegal(name.data)) {
2972         err = -ENOENT;
2973         goto out_nofid;
2974     }
2975 
2976     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
2977         err = -EEXIST;
2978         goto out_nofid;
2979     }
2980 
2981     dfidp = get_fid(pdu, dfid);
2982     if (dfidp == NULL) {
2983         err = -EINVAL;
2984         goto out_nofid;
2985     }
2986     err = v9fs_co_symlink(pdu, dfidp, &name, symname.data, gid, &stbuf);
2987     if (err < 0) {
2988         goto out;
2989     }
2990     err = stat_to_qid(pdu, &stbuf, &qid);
2991     if (err < 0) {
2992         goto out;
2993     }
2994     err =  pdu_marshal(pdu, offset, "Q", &qid);
2995     if (err < 0) {
2996         goto out;
2997     }
2998     err += offset;
2999     trace_v9fs_symlink_return(pdu->tag, pdu->id,
3000                               qid.type, qid.version, qid.path);
3001 out:
3002     put_fid(pdu, dfidp);
3003 out_nofid:
3004     pdu_complete(pdu, err);
3005     v9fs_string_free(&name);
3006     v9fs_string_free(&symname);
3007 }
3008 
3009 static void coroutine_fn v9fs_flush(void *opaque)
3010 {
3011     ssize_t err;
3012     int16_t tag;
3013     size_t offset = 7;
3014     V9fsPDU *cancel_pdu = NULL;
3015     V9fsPDU *pdu = opaque;
3016     V9fsState *s = pdu->s;
3017 
3018     err = pdu_unmarshal(pdu, offset, "w", &tag);
3019     if (err < 0) {
3020         pdu_complete(pdu, err);
3021         return;
3022     }
3023     trace_v9fs_flush(pdu->tag, pdu->id, tag);
3024 
3025     if (pdu->tag == tag) {
3026         warn_report("the guest sent a self-referencing 9P flush request");
3027     } else {
3028         QLIST_FOREACH(cancel_pdu, &s->active_list, next) {
3029             if (cancel_pdu->tag == tag) {
3030                 break;
3031             }
3032         }
3033     }
3034     if (cancel_pdu) {
3035         cancel_pdu->cancelled = 1;
3036         /*
3037          * Wait for pdu to complete.
3038          */
3039         qemu_co_queue_wait(&cancel_pdu->complete, NULL);
3040         if (!qemu_co_queue_next(&cancel_pdu->complete)) {
3041             cancel_pdu->cancelled = 0;
3042             pdu_free(cancel_pdu);
3043         }
3044     }
3045     pdu_complete(pdu, 7);
3046 }
3047 
3048 static void coroutine_fn v9fs_link(void *opaque)
3049 {
3050     V9fsPDU *pdu = opaque;
3051     int32_t dfid, oldfid;
3052     V9fsFidState *dfidp, *oldfidp;
3053     V9fsString name;
3054     size_t offset = 7;
3055     int err = 0;
3056 
3057     v9fs_string_init(&name);
3058     err = pdu_unmarshal(pdu, offset, "dds", &dfid, &oldfid, &name);
3059     if (err < 0) {
3060         goto out_nofid;
3061     }
3062     trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data);
3063 
3064     if (name_is_illegal(name.data)) {
3065         err = -ENOENT;
3066         goto out_nofid;
3067     }
3068 
3069     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3070         err = -EEXIST;
3071         goto out_nofid;
3072     }
3073 
3074     dfidp = get_fid(pdu, dfid);
3075     if (dfidp == NULL) {
3076         err = -ENOENT;
3077         goto out_nofid;
3078     }
3079 
3080     oldfidp = get_fid(pdu, oldfid);
3081     if (oldfidp == NULL) {
3082         err = -ENOENT;
3083         goto out;
3084     }
3085     err = v9fs_co_link(pdu, oldfidp, dfidp, &name);
3086     if (!err) {
3087         err = offset;
3088     }
3089     put_fid(pdu, oldfidp);
3090 out:
3091     put_fid(pdu, dfidp);
3092 out_nofid:
3093     v9fs_string_free(&name);
3094     pdu_complete(pdu, err);
3095 }
3096 
3097 /* Only works with path name based fid */
3098 static void coroutine_fn v9fs_remove(void *opaque)
3099 {
3100     int32_t fid;
3101     int err = 0;
3102     size_t offset = 7;
3103     V9fsFidState *fidp;
3104     V9fsPDU *pdu = opaque;
3105 
3106     err = pdu_unmarshal(pdu, offset, "d", &fid);
3107     if (err < 0) {
3108         goto out_nofid;
3109     }
3110     trace_v9fs_remove(pdu->tag, pdu->id, fid);
3111 
3112     fidp = get_fid(pdu, fid);
3113     if (fidp == NULL) {
3114         err = -EINVAL;
3115         goto out_nofid;
3116     }
3117     /* if fs driver is not path based, return EOPNOTSUPP */
3118     if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
3119         err = -EOPNOTSUPP;
3120         goto out_err;
3121     }
3122     /*
3123      * IF the file is unlinked, we cannot reopen
3124      * the file later. So don't reclaim fd
3125      */
3126     err = v9fs_mark_fids_unreclaim(pdu, &fidp->path);
3127     if (err < 0) {
3128         goto out_err;
3129     }
3130     err = v9fs_co_remove(pdu, &fidp->path);
3131     if (!err) {
3132         err = offset;
3133     }
3134 out_err:
3135     /* For TREMOVE we need to clunk the fid even on failed remove */
3136     clunk_fid(pdu->s, fidp->fid);
3137     put_fid(pdu, fidp);
3138 out_nofid:
3139     pdu_complete(pdu, err);
3140 }
3141 
3142 static void coroutine_fn v9fs_unlinkat(void *opaque)
3143 {
3144     int err = 0;
3145     V9fsString name;
3146     int32_t dfid, flags, rflags = 0;
3147     size_t offset = 7;
3148     V9fsPath path;
3149     V9fsFidState *dfidp;
3150     V9fsPDU *pdu = opaque;
3151 
3152     v9fs_string_init(&name);
3153     err = pdu_unmarshal(pdu, offset, "dsd", &dfid, &name, &flags);
3154     if (err < 0) {
3155         goto out_nofid;
3156     }
3157 
3158     if (name_is_illegal(name.data)) {
3159         err = -ENOENT;
3160         goto out_nofid;
3161     }
3162 
3163     if (!strcmp(".", name.data)) {
3164         err = -EINVAL;
3165         goto out_nofid;
3166     }
3167 
3168     if (!strcmp("..", name.data)) {
3169         err = -ENOTEMPTY;
3170         goto out_nofid;
3171     }
3172 
3173     if (flags & ~P9_DOTL_AT_REMOVEDIR) {
3174         err = -EINVAL;
3175         goto out_nofid;
3176     }
3177 
3178     if (flags & P9_DOTL_AT_REMOVEDIR) {
3179         rflags |= AT_REMOVEDIR;
3180     }
3181 
3182     dfidp = get_fid(pdu, dfid);
3183     if (dfidp == NULL) {
3184         err = -EINVAL;
3185         goto out_nofid;
3186     }
3187     /*
3188      * IF the file is unlinked, we cannot reopen
3189      * the file later. So don't reclaim fd
3190      */
3191     v9fs_path_init(&path);
3192     err = v9fs_co_name_to_path(pdu, &dfidp->path, name.data, &path);
3193     if (err < 0) {
3194         goto out_err;
3195     }
3196     err = v9fs_mark_fids_unreclaim(pdu, &path);
3197     if (err < 0) {
3198         goto out_err;
3199     }
3200     err = v9fs_co_unlinkat(pdu, &dfidp->path, &name, rflags);
3201     if (!err) {
3202         err = offset;
3203     }
3204 out_err:
3205     put_fid(pdu, dfidp);
3206     v9fs_path_free(&path);
3207 out_nofid:
3208     pdu_complete(pdu, err);
3209     v9fs_string_free(&name);
3210 }
3211 
3212 
3213 /* Only works with path name based fid */
3214 static int coroutine_fn v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp,
3215                                              int32_t newdirfid,
3216                                              V9fsString *name)
3217 {
3218     int err = 0;
3219     V9fsPath new_path;
3220     V9fsFidState *tfidp;
3221     V9fsState *s = pdu->s;
3222     V9fsFidState *dirfidp = NULL;
3223     GHashTableIter iter;
3224     gpointer fid;
3225 
3226     v9fs_path_init(&new_path);
3227     if (newdirfid != -1) {
3228         dirfidp = get_fid(pdu, newdirfid);
3229         if (dirfidp == NULL) {
3230             return -ENOENT;
3231         }
3232         if (fidp->fid_type != P9_FID_NONE) {
3233             err = -EINVAL;
3234             goto out;
3235         }
3236         err = v9fs_co_name_to_path(pdu, &dirfidp->path, name->data, &new_path);
3237         if (err < 0) {
3238             goto out;
3239         }
3240     } else {
3241         char *dir_name = g_path_get_dirname(fidp->path.data);
3242         V9fsPath dir_path;
3243 
3244         v9fs_path_init(&dir_path);
3245         v9fs_path_sprintf(&dir_path, "%s", dir_name);
3246         g_free(dir_name);
3247 
3248         err = v9fs_co_name_to_path(pdu, &dir_path, name->data, &new_path);
3249         v9fs_path_free(&dir_path);
3250         if (err < 0) {
3251             goto out;
3252         }
3253     }
3254     err = v9fs_co_rename(pdu, &fidp->path, &new_path);
3255     if (err < 0) {
3256         goto out;
3257     }
3258 
3259     /*
3260      * Fixup fid's pointing to the old name to
3261      * start pointing to the new name
3262      */
3263     g_hash_table_iter_init(&iter, s->fids);
3264     while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &tfidp)) {
3265         if (v9fs_path_is_ancestor(&fidp->path, &tfidp->path)) {
3266             /* replace the name */
3267             v9fs_fix_path(&tfidp->path, &new_path, strlen(fidp->path.data));
3268         }
3269     }
3270 out:
3271     if (dirfidp) {
3272         put_fid(pdu, dirfidp);
3273     }
3274     v9fs_path_free(&new_path);
3275     return err;
3276 }
3277 
3278 /* Only works with path name based fid */
3279 static void coroutine_fn v9fs_rename(void *opaque)
3280 {
3281     int32_t fid;
3282     ssize_t err = 0;
3283     size_t offset = 7;
3284     V9fsString name;
3285     int32_t newdirfid;
3286     V9fsFidState *fidp;
3287     V9fsPDU *pdu = opaque;
3288     V9fsState *s = pdu->s;
3289 
3290     v9fs_string_init(&name);
3291     err = pdu_unmarshal(pdu, offset, "dds", &fid, &newdirfid, &name);
3292     if (err < 0) {
3293         goto out_nofid;
3294     }
3295 
3296     if (name_is_illegal(name.data)) {
3297         err = -ENOENT;
3298         goto out_nofid;
3299     }
3300 
3301     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3302         err = -EISDIR;
3303         goto out_nofid;
3304     }
3305 
3306     fidp = get_fid(pdu, fid);
3307     if (fidp == NULL) {
3308         err = -ENOENT;
3309         goto out_nofid;
3310     }
3311     if (fidp->fid_type != P9_FID_NONE) {
3312         err = -EINVAL;
3313         goto out;
3314     }
3315     /* if fs driver is not path based, return EOPNOTSUPP */
3316     if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
3317         err = -EOPNOTSUPP;
3318         goto out;
3319     }
3320     v9fs_path_write_lock(s);
3321     err = v9fs_complete_rename(pdu, fidp, newdirfid, &name);
3322     v9fs_path_unlock(s);
3323     if (!err) {
3324         err = offset;
3325     }
3326 out:
3327     put_fid(pdu, fidp);
3328 out_nofid:
3329     pdu_complete(pdu, err);
3330     v9fs_string_free(&name);
3331 }
3332 
3333 static int coroutine_fn v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir,
3334                                            V9fsString *old_name,
3335                                            V9fsPath *newdir,
3336                                            V9fsString *new_name)
3337 {
3338     V9fsFidState *tfidp;
3339     V9fsPath oldpath, newpath;
3340     V9fsState *s = pdu->s;
3341     int err;
3342     GHashTableIter iter;
3343     gpointer fid;
3344 
3345     v9fs_path_init(&oldpath);
3346     v9fs_path_init(&newpath);
3347     err = v9fs_co_name_to_path(pdu, olddir, old_name->data, &oldpath);
3348     if (err < 0) {
3349         goto out;
3350     }
3351     err = v9fs_co_name_to_path(pdu, newdir, new_name->data, &newpath);
3352     if (err < 0) {
3353         goto out;
3354     }
3355 
3356     /*
3357      * Fixup fid's pointing to the old name to
3358      * start pointing to the new name
3359      */
3360     g_hash_table_iter_init(&iter, s->fids);
3361     while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &tfidp)) {
3362         if (v9fs_path_is_ancestor(&oldpath, &tfidp->path)) {
3363             /* replace the name */
3364             v9fs_fix_path(&tfidp->path, &newpath, strlen(oldpath.data));
3365         }
3366     }
3367 out:
3368     v9fs_path_free(&oldpath);
3369     v9fs_path_free(&newpath);
3370     return err;
3371 }
3372 
3373 static int coroutine_fn v9fs_complete_renameat(V9fsPDU *pdu, int32_t olddirfid,
3374                                                V9fsString *old_name,
3375                                                int32_t newdirfid,
3376                                                V9fsString *new_name)
3377 {
3378     int err = 0;
3379     V9fsState *s = pdu->s;
3380     V9fsFidState *newdirfidp = NULL, *olddirfidp = NULL;
3381 
3382     olddirfidp = get_fid(pdu, olddirfid);
3383     if (olddirfidp == NULL) {
3384         err = -ENOENT;
3385         goto out;
3386     }
3387     if (newdirfid != -1) {
3388         newdirfidp = get_fid(pdu, newdirfid);
3389         if (newdirfidp == NULL) {
3390             err = -ENOENT;
3391             goto out;
3392         }
3393     } else {
3394         newdirfidp = get_fid(pdu, olddirfid);
3395     }
3396 
3397     err = v9fs_co_renameat(pdu, &olddirfidp->path, old_name,
3398                            &newdirfidp->path, new_name);
3399     if (err < 0) {
3400         goto out;
3401     }
3402     if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) {
3403         /* Only for path based fid  we need to do the below fixup */
3404         err = v9fs_fix_fid_paths(pdu, &olddirfidp->path, old_name,
3405                                  &newdirfidp->path, new_name);
3406     }
3407 out:
3408     if (olddirfidp) {
3409         put_fid(pdu, olddirfidp);
3410     }
3411     if (newdirfidp) {
3412         put_fid(pdu, newdirfidp);
3413     }
3414     return err;
3415 }
3416 
3417 static void coroutine_fn v9fs_renameat(void *opaque)
3418 {
3419     ssize_t err = 0;
3420     size_t offset = 7;
3421     V9fsPDU *pdu = opaque;
3422     V9fsState *s = pdu->s;
3423     int32_t olddirfid, newdirfid;
3424     V9fsString old_name, new_name;
3425 
3426     v9fs_string_init(&old_name);
3427     v9fs_string_init(&new_name);
3428     err = pdu_unmarshal(pdu, offset, "dsds", &olddirfid,
3429                         &old_name, &newdirfid, &new_name);
3430     if (err < 0) {
3431         goto out_err;
3432     }
3433 
3434     if (name_is_illegal(old_name.data) || name_is_illegal(new_name.data)) {
3435         err = -ENOENT;
3436         goto out_err;
3437     }
3438 
3439     if (!strcmp(".", old_name.data) || !strcmp("..", old_name.data) ||
3440         !strcmp(".", new_name.data) || !strcmp("..", new_name.data)) {
3441         err = -EISDIR;
3442         goto out_err;
3443     }
3444 
3445     v9fs_path_write_lock(s);
3446     err = v9fs_complete_renameat(pdu, olddirfid,
3447                                  &old_name, newdirfid, &new_name);
3448     v9fs_path_unlock(s);
3449     if (!err) {
3450         err = offset;
3451     }
3452 
3453 out_err:
3454     pdu_complete(pdu, err);
3455     v9fs_string_free(&old_name);
3456     v9fs_string_free(&new_name);
3457 }
3458 
3459 static void coroutine_fn v9fs_wstat(void *opaque)
3460 {
3461     int32_t fid;
3462     int err = 0;
3463     int16_t unused;
3464     V9fsStat v9stat;
3465     size_t offset = 7;
3466     struct stat stbuf;
3467     V9fsFidState *fidp;
3468     V9fsPDU *pdu = opaque;
3469     V9fsState *s = pdu->s;
3470 
3471     v9fs_stat_init(&v9stat);
3472     err = pdu_unmarshal(pdu, offset, "dwS", &fid, &unused, &v9stat);
3473     if (err < 0) {
3474         goto out_nofid;
3475     }
3476     trace_v9fs_wstat(pdu->tag, pdu->id, fid,
3477                      v9stat.mode, v9stat.atime, v9stat.mtime);
3478 
3479     fidp = get_fid(pdu, fid);
3480     if (fidp == NULL) {
3481         err = -EINVAL;
3482         goto out_nofid;
3483     }
3484     /* do we need to sync the file? */
3485     if (donttouch_stat(&v9stat)) {
3486         err = v9fs_co_fsync(pdu, fidp, 0);
3487         goto out;
3488     }
3489     if (v9stat.mode != -1) {
3490         uint32_t v9_mode;
3491         err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
3492         if (err < 0) {
3493             goto out;
3494         }
3495         v9_mode = stat_to_v9mode(&stbuf);
3496         if ((v9stat.mode & P9_STAT_MODE_TYPE_BITS) !=
3497             (v9_mode & P9_STAT_MODE_TYPE_BITS)) {
3498             /* Attempting to change the type */
3499             err = -EIO;
3500             goto out;
3501         }
3502         err = v9fs_co_chmod(pdu, &fidp->path,
3503                             v9mode_to_mode(v9stat.mode,
3504                                            &v9stat.extension));
3505         if (err < 0) {
3506             goto out;
3507         }
3508     }
3509     if (v9stat.mtime != -1 || v9stat.atime != -1) {
3510         struct timespec times[2];
3511         if (v9stat.atime != -1) {
3512             times[0].tv_sec = v9stat.atime;
3513             times[0].tv_nsec = 0;
3514         } else {
3515             times[0].tv_nsec = UTIME_OMIT;
3516         }
3517         if (v9stat.mtime != -1) {
3518             times[1].tv_sec = v9stat.mtime;
3519             times[1].tv_nsec = 0;
3520         } else {
3521             times[1].tv_nsec = UTIME_OMIT;
3522         }
3523         err = v9fs_co_utimensat(pdu, &fidp->path, times);
3524         if (err < 0) {
3525             goto out;
3526         }
3527     }
3528     if (v9stat.n_gid != -1 || v9stat.n_uid != -1) {
3529         err = v9fs_co_chown(pdu, &fidp->path, v9stat.n_uid, v9stat.n_gid);
3530         if (err < 0) {
3531             goto out;
3532         }
3533     }
3534     if (v9stat.name.size != 0) {
3535         v9fs_path_write_lock(s);
3536         err = v9fs_complete_rename(pdu, fidp, -1, &v9stat.name);
3537         v9fs_path_unlock(s);
3538         if (err < 0) {
3539             goto out;
3540         }
3541     }
3542     if (v9stat.length != -1) {
3543         err = v9fs_co_truncate(pdu, &fidp->path, v9stat.length);
3544         if (err < 0) {
3545             goto out;
3546         }
3547     }
3548     err = offset;
3549 out:
3550     put_fid(pdu, fidp);
3551 out_nofid:
3552     v9fs_stat_free(&v9stat);
3553     pdu_complete(pdu, err);
3554 }
3555 
3556 static int v9fs_fill_statfs(V9fsState *s, V9fsPDU *pdu, struct statfs *stbuf)
3557 {
3558     uint32_t f_type;
3559     uint32_t f_bsize;
3560     uint64_t f_blocks;
3561     uint64_t f_bfree;
3562     uint64_t f_bavail;
3563     uint64_t f_files;
3564     uint64_t f_ffree;
3565     uint64_t fsid_val;
3566     uint32_t f_namelen;
3567     size_t offset = 7;
3568     int32_t bsize_factor;
3569 
3570     /*
3571      * compute bsize factor based on host file system block size
3572      * and client msize
3573      */
3574     bsize_factor = (s->msize - P9_IOHDRSZ) / stbuf->f_bsize;
3575     if (!bsize_factor) {
3576         bsize_factor = 1;
3577     }
3578     f_type  = stbuf->f_type;
3579     f_bsize = stbuf->f_bsize;
3580     f_bsize *= bsize_factor;
3581     /*
3582      * f_bsize is adjusted(multiplied) by bsize factor, so we need to
3583      * adjust(divide) the number of blocks, free blocks and available
3584      * blocks by bsize factor
3585      */
3586     f_blocks = stbuf->f_blocks / bsize_factor;
3587     f_bfree  = stbuf->f_bfree / bsize_factor;
3588     f_bavail = stbuf->f_bavail / bsize_factor;
3589     f_files  = stbuf->f_files;
3590     f_ffree  = stbuf->f_ffree;
3591 #ifdef CONFIG_DARWIN
3592     fsid_val = (unsigned int)stbuf->f_fsid.val[0] |
3593                (unsigned long long)stbuf->f_fsid.val[1] << 32;
3594     f_namelen = NAME_MAX;
3595 #else
3596     fsid_val = (unsigned int) stbuf->f_fsid.__val[0] |
3597                (unsigned long long)stbuf->f_fsid.__val[1] << 32;
3598     f_namelen = stbuf->f_namelen;
3599 #endif
3600 
3601     return pdu_marshal(pdu, offset, "ddqqqqqqd",
3602                        f_type, f_bsize, f_blocks, f_bfree,
3603                        f_bavail, f_files, f_ffree,
3604                        fsid_val, f_namelen);
3605 }
3606 
3607 static void coroutine_fn v9fs_statfs(void *opaque)
3608 {
3609     int32_t fid;
3610     ssize_t retval = 0;
3611     size_t offset = 7;
3612     V9fsFidState *fidp;
3613     struct statfs stbuf;
3614     V9fsPDU *pdu = opaque;
3615     V9fsState *s = pdu->s;
3616 
3617     retval = pdu_unmarshal(pdu, offset, "d", &fid);
3618     if (retval < 0) {
3619         goto out_nofid;
3620     }
3621     fidp = get_fid(pdu, fid);
3622     if (fidp == NULL) {
3623         retval = -ENOENT;
3624         goto out_nofid;
3625     }
3626     retval = v9fs_co_statfs(pdu, &fidp->path, &stbuf);
3627     if (retval < 0) {
3628         goto out;
3629     }
3630     retval = v9fs_fill_statfs(s, pdu, &stbuf);
3631     if (retval < 0) {
3632         goto out;
3633     }
3634     retval += offset;
3635 out:
3636     put_fid(pdu, fidp);
3637 out_nofid:
3638     pdu_complete(pdu, retval);
3639 }
3640 
3641 static void coroutine_fn v9fs_mknod(void *opaque)
3642 {
3643 
3644     int mode;
3645     gid_t gid;
3646     int32_t fid;
3647     V9fsQID qid;
3648     int err = 0;
3649     int major, minor;
3650     size_t offset = 7;
3651     V9fsString name;
3652     struct stat stbuf;
3653     V9fsFidState *fidp;
3654     V9fsPDU *pdu = opaque;
3655 
3656     v9fs_string_init(&name);
3657     err = pdu_unmarshal(pdu, offset, "dsdddd", &fid, &name, &mode,
3658                         &major, &minor, &gid);
3659     if (err < 0) {
3660         goto out_nofid;
3661     }
3662     trace_v9fs_mknod(pdu->tag, pdu->id, fid, mode, major, minor);
3663 
3664     if (name_is_illegal(name.data)) {
3665         err = -ENOENT;
3666         goto out_nofid;
3667     }
3668 
3669     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3670         err = -EEXIST;
3671         goto out_nofid;
3672     }
3673 
3674     fidp = get_fid(pdu, fid);
3675     if (fidp == NULL) {
3676         err = -ENOENT;
3677         goto out_nofid;
3678     }
3679     err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, gid,
3680                         makedev(major, minor), mode, &stbuf);
3681     if (err < 0) {
3682         goto out;
3683     }
3684     err = stat_to_qid(pdu, &stbuf, &qid);
3685     if (err < 0) {
3686         goto out;
3687     }
3688     err = pdu_marshal(pdu, offset, "Q", &qid);
3689     if (err < 0) {
3690         goto out;
3691     }
3692     err += offset;
3693     trace_v9fs_mknod_return(pdu->tag, pdu->id,
3694                             qid.type, qid.version, qid.path);
3695 out:
3696     put_fid(pdu, fidp);
3697 out_nofid:
3698     pdu_complete(pdu, err);
3699     v9fs_string_free(&name);
3700 }
3701 
3702 /*
3703  * Implement posix byte range locking code
3704  * Server side handling of locking code is very simple, because 9p server in
3705  * QEMU can handle only one client. And most of the lock handling
3706  * (like conflict, merging) etc is done by the VFS layer itself, so no need to
3707  * do any thing in * qemu 9p server side lock code path.
3708  * So when a TLOCK request comes, always return success
3709  */
3710 static void coroutine_fn v9fs_lock(void *opaque)
3711 {
3712     V9fsFlock flock;
3713     size_t offset = 7;
3714     struct stat stbuf;
3715     V9fsFidState *fidp;
3716     int32_t fid, err = 0;
3717     V9fsPDU *pdu = opaque;
3718 
3719     v9fs_string_init(&flock.client_id);
3720     err = pdu_unmarshal(pdu, offset, "dbdqqds", &fid, &flock.type,
3721                         &flock.flags, &flock.start, &flock.length,
3722                         &flock.proc_id, &flock.client_id);
3723     if (err < 0) {
3724         goto out_nofid;
3725     }
3726     trace_v9fs_lock(pdu->tag, pdu->id, fid,
3727                     flock.type, flock.start, flock.length);
3728 
3729 
3730     /* We support only block flag now (that too ignored currently) */
3731     if (flock.flags & ~P9_LOCK_FLAGS_BLOCK) {
3732         err = -EINVAL;
3733         goto out_nofid;
3734     }
3735     fidp = get_fid(pdu, fid);
3736     if (fidp == NULL) {
3737         err = -ENOENT;
3738         goto out_nofid;
3739     }
3740     err = v9fs_co_fstat(pdu, fidp, &stbuf);
3741     if (err < 0) {
3742         goto out;
3743     }
3744     err = pdu_marshal(pdu, offset, "b", P9_LOCK_SUCCESS);
3745     if (err < 0) {
3746         goto out;
3747     }
3748     err += offset;
3749     trace_v9fs_lock_return(pdu->tag, pdu->id, P9_LOCK_SUCCESS);
3750 out:
3751     put_fid(pdu, fidp);
3752 out_nofid:
3753     pdu_complete(pdu, err);
3754     v9fs_string_free(&flock.client_id);
3755 }
3756 
3757 /*
3758  * When a TGETLOCK request comes, always return success because all lock
3759  * handling is done by client's VFS layer.
3760  */
3761 static void coroutine_fn v9fs_getlock(void *opaque)
3762 {
3763     size_t offset = 7;
3764     struct stat stbuf;
3765     V9fsFidState *fidp;
3766     V9fsGetlock glock;
3767     int32_t fid, err = 0;
3768     V9fsPDU *pdu = opaque;
3769 
3770     v9fs_string_init(&glock.client_id);
3771     err = pdu_unmarshal(pdu, offset, "dbqqds", &fid, &glock.type,
3772                         &glock.start, &glock.length, &glock.proc_id,
3773                         &glock.client_id);
3774     if (err < 0) {
3775         goto out_nofid;
3776     }
3777     trace_v9fs_getlock(pdu->tag, pdu->id, fid,
3778                        glock.type, glock.start, glock.length);
3779 
3780     fidp = get_fid(pdu, fid);
3781     if (fidp == NULL) {
3782         err = -ENOENT;
3783         goto out_nofid;
3784     }
3785     err = v9fs_co_fstat(pdu, fidp, &stbuf);
3786     if (err < 0) {
3787         goto out;
3788     }
3789     glock.type = P9_LOCK_TYPE_UNLCK;
3790     err = pdu_marshal(pdu, offset, "bqqds", glock.type,
3791                           glock.start, glock.length, glock.proc_id,
3792                           &glock.client_id);
3793     if (err < 0) {
3794         goto out;
3795     }
3796     err += offset;
3797     trace_v9fs_getlock_return(pdu->tag, pdu->id, glock.type, glock.start,
3798                               glock.length, glock.proc_id);
3799 out:
3800     put_fid(pdu, fidp);
3801 out_nofid:
3802     pdu_complete(pdu, err);
3803     v9fs_string_free(&glock.client_id);
3804 }
3805 
3806 static void coroutine_fn v9fs_mkdir(void *opaque)
3807 {
3808     V9fsPDU *pdu = opaque;
3809     size_t offset = 7;
3810     int32_t fid;
3811     struct stat stbuf;
3812     V9fsQID qid;
3813     V9fsString name;
3814     V9fsFidState *fidp;
3815     gid_t gid;
3816     int mode;
3817     int err = 0;
3818 
3819     v9fs_string_init(&name);
3820     err = pdu_unmarshal(pdu, offset, "dsdd", &fid, &name, &mode, &gid);
3821     if (err < 0) {
3822         goto out_nofid;
3823     }
3824     trace_v9fs_mkdir(pdu->tag, pdu->id, fid, name.data, mode, gid);
3825 
3826     if (name_is_illegal(name.data)) {
3827         err = -ENOENT;
3828         goto out_nofid;
3829     }
3830 
3831     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3832         err = -EEXIST;
3833         goto out_nofid;
3834     }
3835 
3836     fidp = get_fid(pdu, fid);
3837     if (fidp == NULL) {
3838         err = -ENOENT;
3839         goto out_nofid;
3840     }
3841     err = v9fs_co_mkdir(pdu, fidp, &name, mode, fidp->uid, gid, &stbuf);
3842     if (err < 0) {
3843         goto out;
3844     }
3845     err = stat_to_qid(pdu, &stbuf, &qid);
3846     if (err < 0) {
3847         goto out;
3848     }
3849     err = pdu_marshal(pdu, offset, "Q", &qid);
3850     if (err < 0) {
3851         goto out;
3852     }
3853     err += offset;
3854     trace_v9fs_mkdir_return(pdu->tag, pdu->id,
3855                             qid.type, qid.version, qid.path, err);
3856 out:
3857     put_fid(pdu, fidp);
3858 out_nofid:
3859     pdu_complete(pdu, err);
3860     v9fs_string_free(&name);
3861 }
3862 
3863 static void coroutine_fn v9fs_xattrwalk(void *opaque)
3864 {
3865     int64_t size;
3866     V9fsString name;
3867     ssize_t err = 0;
3868     size_t offset = 7;
3869     int32_t fid, newfid;
3870     V9fsFidState *file_fidp;
3871     V9fsFidState *xattr_fidp = NULL;
3872     V9fsPDU *pdu = opaque;
3873     V9fsState *s = pdu->s;
3874 
3875     v9fs_string_init(&name);
3876     err = pdu_unmarshal(pdu, offset, "dds", &fid, &newfid, &name);
3877     if (err < 0) {
3878         goto out_nofid;
3879     }
3880     trace_v9fs_xattrwalk(pdu->tag, pdu->id, fid, newfid, name.data);
3881 
3882     file_fidp = get_fid(pdu, fid);
3883     if (file_fidp == NULL) {
3884         err = -ENOENT;
3885         goto out_nofid;
3886     }
3887     xattr_fidp = alloc_fid(s, newfid);
3888     if (xattr_fidp == NULL) {
3889         err = -EINVAL;
3890         goto out;
3891     }
3892     v9fs_path_copy(&xattr_fidp->path, &file_fidp->path);
3893     if (!v9fs_string_size(&name)) {
3894         /*
3895          * listxattr request. Get the size first
3896          */
3897         size = v9fs_co_llistxattr(pdu, &xattr_fidp->path, NULL, 0);
3898         if (size < 0) {
3899             err = size;
3900             clunk_fid(s, xattr_fidp->fid);
3901             goto out;
3902         }
3903         /*
3904          * Read the xattr value
3905          */
3906         xattr_fidp->fs.xattr.len = size;
3907         xattr_fidp->fid_type = P9_FID_XATTR;
3908         xattr_fidp->fs.xattr.xattrwalk_fid = true;
3909         xattr_fidp->fs.xattr.value = g_malloc0(size);
3910         if (size) {
3911             err = v9fs_co_llistxattr(pdu, &xattr_fidp->path,
3912                                      xattr_fidp->fs.xattr.value,
3913                                      xattr_fidp->fs.xattr.len);
3914             if (err < 0) {
3915                 clunk_fid(s, xattr_fidp->fid);
3916                 goto out;
3917             }
3918         }
3919         err = pdu_marshal(pdu, offset, "q", size);
3920         if (err < 0) {
3921             goto out;
3922         }
3923         err += offset;
3924     } else {
3925         /*
3926          * specific xattr fid. We check for xattr
3927          * presence also collect the xattr size
3928          */
3929         size = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
3930                                  &name, NULL, 0);
3931         if (size < 0) {
3932             err = size;
3933             clunk_fid(s, xattr_fidp->fid);
3934             goto out;
3935         }
3936         /*
3937          * Read the xattr value
3938          */
3939         xattr_fidp->fs.xattr.len = size;
3940         xattr_fidp->fid_type = P9_FID_XATTR;
3941         xattr_fidp->fs.xattr.xattrwalk_fid = true;
3942         xattr_fidp->fs.xattr.value = g_malloc0(size);
3943         if (size) {
3944             err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
3945                                     &name, xattr_fidp->fs.xattr.value,
3946                                     xattr_fidp->fs.xattr.len);
3947             if (err < 0) {
3948                 clunk_fid(s, xattr_fidp->fid);
3949                 goto out;
3950             }
3951         }
3952         err = pdu_marshal(pdu, offset, "q", size);
3953         if (err < 0) {
3954             goto out;
3955         }
3956         err += offset;
3957     }
3958     trace_v9fs_xattrwalk_return(pdu->tag, pdu->id, size);
3959 out:
3960     put_fid(pdu, file_fidp);
3961     if (xattr_fidp) {
3962         put_fid(pdu, xattr_fidp);
3963     }
3964 out_nofid:
3965     pdu_complete(pdu, err);
3966     v9fs_string_free(&name);
3967 }
3968 
3969 #if defined(CONFIG_LINUX)
3970 /* Currently, only Linux has XATTR_SIZE_MAX */
3971 #define P9_XATTR_SIZE_MAX XATTR_SIZE_MAX
3972 #elif defined(CONFIG_DARWIN)
3973 /*
3974  * Darwin doesn't seem to define a maximum xattr size in its user
3975  * space header, so manually configure it across platforms as 64k.
3976  *
3977  * Having no limit at all can lead to QEMU crashing during large g_malloc()
3978  * calls. Because QEMU does not currently support macOS guests, the below
3979  * preliminary solution only works due to its being a reflection of the limit of
3980  * Linux guests.
3981  */
3982 #define P9_XATTR_SIZE_MAX 65536
3983 #else
3984 #error Missing definition for P9_XATTR_SIZE_MAX for this host system
3985 #endif
3986 
3987 static void coroutine_fn v9fs_xattrcreate(void *opaque)
3988 {
3989     int flags, rflags = 0;
3990     int32_t fid;
3991     uint64_t size;
3992     ssize_t err = 0;
3993     V9fsString name;
3994     size_t offset = 7;
3995     V9fsFidState *file_fidp;
3996     V9fsFidState *xattr_fidp;
3997     V9fsPDU *pdu = opaque;
3998 
3999     v9fs_string_init(&name);
4000     err = pdu_unmarshal(pdu, offset, "dsqd", &fid, &name, &size, &flags);
4001     if (err < 0) {
4002         goto out_nofid;
4003     }
4004     trace_v9fs_xattrcreate(pdu->tag, pdu->id, fid, name.data, size, flags);
4005 
4006     if (flags & ~(P9_XATTR_CREATE | P9_XATTR_REPLACE)) {
4007         err = -EINVAL;
4008         goto out_nofid;
4009     }
4010 
4011     if (flags & P9_XATTR_CREATE) {
4012         rflags |= XATTR_CREATE;
4013     }
4014 
4015     if (flags & P9_XATTR_REPLACE) {
4016         rflags |= XATTR_REPLACE;
4017     }
4018 
4019     if (size > P9_XATTR_SIZE_MAX) {
4020         err = -E2BIG;
4021         goto out_nofid;
4022     }
4023 
4024     file_fidp = get_fid(pdu, fid);
4025     if (file_fidp == NULL) {
4026         err = -EINVAL;
4027         goto out_nofid;
4028     }
4029     if (file_fidp->fid_type != P9_FID_NONE) {
4030         err = -EINVAL;
4031         goto out_put_fid;
4032     }
4033 
4034     /* Make the file fid point to xattr */
4035     xattr_fidp = file_fidp;
4036     xattr_fidp->fid_type = P9_FID_XATTR;
4037     xattr_fidp->fs.xattr.copied_len = 0;
4038     xattr_fidp->fs.xattr.xattrwalk_fid = false;
4039     xattr_fidp->fs.xattr.len = size;
4040     xattr_fidp->fs.xattr.flags = rflags;
4041     v9fs_string_init(&xattr_fidp->fs.xattr.name);
4042     v9fs_string_copy(&xattr_fidp->fs.xattr.name, &name);
4043     xattr_fidp->fs.xattr.value = g_malloc0(size);
4044     err = offset;
4045 out_put_fid:
4046     put_fid(pdu, file_fidp);
4047 out_nofid:
4048     pdu_complete(pdu, err);
4049     v9fs_string_free(&name);
4050 }
4051 
4052 static void coroutine_fn v9fs_readlink(void *opaque)
4053 {
4054     V9fsPDU *pdu = opaque;
4055     size_t offset = 7;
4056     V9fsString target;
4057     int32_t fid;
4058     int err = 0;
4059     V9fsFidState *fidp;
4060 
4061     err = pdu_unmarshal(pdu, offset, "d", &fid);
4062     if (err < 0) {
4063         goto out_nofid;
4064     }
4065     trace_v9fs_readlink(pdu->tag, pdu->id, fid);
4066     fidp = get_fid(pdu, fid);
4067     if (fidp == NULL) {
4068         err = -ENOENT;
4069         goto out_nofid;
4070     }
4071 
4072     v9fs_string_init(&target);
4073     err = v9fs_co_readlink(pdu, &fidp->path, &target);
4074     if (err < 0) {
4075         goto out;
4076     }
4077     err = pdu_marshal(pdu, offset, "s", &target);
4078     if (err < 0) {
4079         v9fs_string_free(&target);
4080         goto out;
4081     }
4082     err += offset;
4083     trace_v9fs_readlink_return(pdu->tag, pdu->id, target.data);
4084     v9fs_string_free(&target);
4085 out:
4086     put_fid(pdu, fidp);
4087 out_nofid:
4088     pdu_complete(pdu, err);
4089 }
4090 
4091 static CoroutineEntry *pdu_co_handlers[] = {
4092     [P9_TREADDIR] = v9fs_readdir,
4093     [P9_TSTATFS] = v9fs_statfs,
4094     [P9_TGETATTR] = v9fs_getattr,
4095     [P9_TSETATTR] = v9fs_setattr,
4096     [P9_TXATTRWALK] = v9fs_xattrwalk,
4097     [P9_TXATTRCREATE] = v9fs_xattrcreate,
4098     [P9_TMKNOD] = v9fs_mknod,
4099     [P9_TRENAME] = v9fs_rename,
4100     [P9_TLOCK] = v9fs_lock,
4101     [P9_TGETLOCK] = v9fs_getlock,
4102     [P9_TRENAMEAT] = v9fs_renameat,
4103     [P9_TREADLINK] = v9fs_readlink,
4104     [P9_TUNLINKAT] = v9fs_unlinkat,
4105     [P9_TMKDIR] = v9fs_mkdir,
4106     [P9_TVERSION] = v9fs_version,
4107     [P9_TLOPEN] = v9fs_open,
4108     [P9_TATTACH] = v9fs_attach,
4109     [P9_TSTAT] = v9fs_stat,
4110     [P9_TWALK] = v9fs_walk,
4111     [P9_TCLUNK] = v9fs_clunk,
4112     [P9_TFSYNC] = v9fs_fsync,
4113     [P9_TOPEN] = v9fs_open,
4114     [P9_TREAD] = v9fs_read,
4115 #if 0
4116     [P9_TAUTH] = v9fs_auth,
4117 #endif
4118     [P9_TFLUSH] = v9fs_flush,
4119     [P9_TLINK] = v9fs_link,
4120     [P9_TSYMLINK] = v9fs_symlink,
4121     [P9_TCREATE] = v9fs_create,
4122     [P9_TLCREATE] = v9fs_lcreate,
4123     [P9_TWRITE] = v9fs_write,
4124     [P9_TWSTAT] = v9fs_wstat,
4125     [P9_TREMOVE] = v9fs_remove,
4126 };
4127 
4128 static void coroutine_fn v9fs_op_not_supp(void *opaque)
4129 {
4130     V9fsPDU *pdu = opaque;
4131     pdu_complete(pdu, -EOPNOTSUPP);
4132 }
4133 
4134 static void coroutine_fn v9fs_fs_ro(void *opaque)
4135 {
4136     V9fsPDU *pdu = opaque;
4137     pdu_complete(pdu, -EROFS);
4138 }
4139 
4140 static inline bool is_read_only_op(V9fsPDU *pdu)
4141 {
4142     switch (pdu->id) {
4143     case P9_TREADDIR:
4144     case P9_TSTATFS:
4145     case P9_TGETATTR:
4146     case P9_TXATTRWALK:
4147     case P9_TLOCK:
4148     case P9_TGETLOCK:
4149     case P9_TREADLINK:
4150     case P9_TVERSION:
4151     case P9_TLOPEN:
4152     case P9_TATTACH:
4153     case P9_TSTAT:
4154     case P9_TWALK:
4155     case P9_TCLUNK:
4156     case P9_TFSYNC:
4157     case P9_TOPEN:
4158     case P9_TREAD:
4159     case P9_TAUTH:
4160     case P9_TFLUSH:
4161         return 1;
4162     default:
4163         return 0;
4164     }
4165 }
4166 
4167 void pdu_submit(V9fsPDU *pdu, P9MsgHeader *hdr)
4168 {
4169     Coroutine *co;
4170     CoroutineEntry *handler;
4171     V9fsState *s = pdu->s;
4172 
4173     pdu->size = le32_to_cpu(hdr->size_le);
4174     pdu->id = hdr->id;
4175     pdu->tag = le16_to_cpu(hdr->tag_le);
4176 
4177     if (pdu->id >= ARRAY_SIZE(pdu_co_handlers) ||
4178         (pdu_co_handlers[pdu->id] == NULL)) {
4179         handler = v9fs_op_not_supp;
4180     } else if (is_ro_export(&s->ctx) && !is_read_only_op(pdu)) {
4181         handler = v9fs_fs_ro;
4182     } else {
4183         handler = pdu_co_handlers[pdu->id];
4184     }
4185 
4186     qemu_co_queue_init(&pdu->complete);
4187     co = qemu_coroutine_create(handler, pdu);
4188     qemu_coroutine_enter(co);
4189 }
4190 
4191 /* Returns 0 on success, 1 on failure. */
4192 int v9fs_device_realize_common(V9fsState *s, const V9fsTransport *t,
4193                                Error **errp)
4194 {
4195     ERRP_GUARD();
4196     int i, len;
4197     struct stat stat;
4198     FsDriverEntry *fse;
4199     V9fsPath path;
4200     int rc = 1;
4201 
4202     assert(!s->transport);
4203     s->transport = t;
4204 
4205     /* initialize pdu allocator */
4206     QLIST_INIT(&s->free_list);
4207     QLIST_INIT(&s->active_list);
4208     for (i = 0; i < MAX_REQ; i++) {
4209         QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next);
4210         s->pdus[i].s = s;
4211         s->pdus[i].idx = i;
4212     }
4213 
4214     v9fs_path_init(&path);
4215 
4216     fse = get_fsdev_fsentry(s->fsconf.fsdev_id);
4217 
4218     if (!fse) {
4219         /* We don't have a fsdev identified by fsdev_id */
4220         error_setg(errp, "9pfs device couldn't find fsdev with the "
4221                    "id = %s",
4222                    s->fsconf.fsdev_id ? s->fsconf.fsdev_id : "NULL");
4223         goto out;
4224     }
4225 
4226     if (!s->fsconf.tag) {
4227         /* we haven't specified a mount_tag */
4228         error_setg(errp, "fsdev with id %s needs mount_tag arguments",
4229                    s->fsconf.fsdev_id);
4230         goto out;
4231     }
4232 
4233     s->ctx.export_flags = fse->export_flags;
4234     s->ctx.fs_root = g_strdup(fse->path);
4235     s->ctx.exops.get_st_gen = NULL;
4236     len = strlen(s->fsconf.tag);
4237     if (len > MAX_TAG_LEN - 1) {
4238         error_setg(errp, "mount tag '%s' (%d bytes) is longer than "
4239                    "maximum (%d bytes)", s->fsconf.tag, len, MAX_TAG_LEN - 1);
4240         goto out;
4241     }
4242 
4243     s->tag = g_strdup(s->fsconf.tag);
4244     s->ctx.uid = -1;
4245 
4246     s->ops = fse->ops;
4247 
4248     s->ctx.fmode = fse->fmode;
4249     s->ctx.dmode = fse->dmode;
4250 
4251     s->fids = g_hash_table_new(NULL, NULL);
4252     qemu_co_rwlock_init(&s->rename_lock);
4253 
4254     if (s->ops->init(&s->ctx, errp) < 0) {
4255         error_prepend(errp, "cannot initialize fsdev '%s': ",
4256                       s->fsconf.fsdev_id);
4257         goto out;
4258     }
4259 
4260     /*
4261      * Check details of export path, We need to use fs driver
4262      * call back to do that. Since we are in the init path, we don't
4263      * use co-routines here.
4264      */
4265     if (s->ops->name_to_path(&s->ctx, NULL, "/", &path) < 0) {
4266         error_setg(errp,
4267                    "error in converting name to path %s", strerror(errno));
4268         goto out;
4269     }
4270     if (s->ops->lstat(&s->ctx, &path, &stat)) {
4271         error_setg(errp, "share path %s does not exist", fse->path);
4272         goto out;
4273     } else if (!S_ISDIR(stat.st_mode)) {
4274         error_setg(errp, "share path %s is not a directory", fse->path);
4275         goto out;
4276     }
4277 
4278     s->dev_id = stat.st_dev;
4279 
4280     /* init inode remapping : */
4281     /* hash table for variable length inode suffixes */
4282     qpd_table_init(&s->qpd_table);
4283     /* hash table for slow/full inode remapping (most users won't need it) */
4284     qpf_table_init(&s->qpf_table);
4285     /* hash table for quick inode remapping */
4286     qpp_table_init(&s->qpp_table);
4287     s->qp_ndevices = 0;
4288     s->qp_affix_next = 1; /* reserve 0 to detect overflow */
4289     s->qp_fullpath_next = 1;
4290 
4291     s->ctx.fst = &fse->fst;
4292     fsdev_throttle_init(s->ctx.fst);
4293 
4294     rc = 0;
4295 out:
4296     if (rc) {
4297         v9fs_device_unrealize_common(s);
4298     }
4299     v9fs_path_free(&path);
4300     return rc;
4301 }
4302 
4303 void v9fs_device_unrealize_common(V9fsState *s)
4304 {
4305     if (s->ops && s->ops->cleanup) {
4306         s->ops->cleanup(&s->ctx);
4307     }
4308     if (s->ctx.fst) {
4309         fsdev_throttle_cleanup(s->ctx.fst);
4310     }
4311     if (s->fids) {
4312         g_hash_table_destroy(s->fids);
4313         s->fids = NULL;
4314     }
4315     g_free(s->tag);
4316     qp_table_destroy(&s->qpd_table);
4317     qp_table_destroy(&s->qpp_table);
4318     qp_table_destroy(&s->qpf_table);
4319     g_free(s->ctx.fs_root);
4320 }
4321 
4322 typedef struct VirtfsCoResetData {
4323     V9fsPDU pdu;
4324     bool done;
4325 } VirtfsCoResetData;
4326 
4327 static void coroutine_fn virtfs_co_reset(void *opaque)
4328 {
4329     VirtfsCoResetData *data = opaque;
4330 
4331     virtfs_reset(&data->pdu);
4332     data->done = true;
4333 }
4334 
4335 void v9fs_reset(V9fsState *s)
4336 {
4337     VirtfsCoResetData data = { .pdu = { .s = s }, .done = false };
4338     Coroutine *co;
4339 
4340     while (!QLIST_EMPTY(&s->active_list)) {
4341         aio_poll(qemu_get_aio_context(), true);
4342     }
4343 
4344     co = qemu_coroutine_create(virtfs_co_reset, &data);
4345     qemu_coroutine_enter(co);
4346 
4347     while (!data.done) {
4348         aio_poll(qemu_get_aio_context(), true);
4349     }
4350 }
4351 
4352 static void __attribute__((__constructor__)) v9fs_set_fd_limit(void)
4353 {
4354     struct rlimit rlim;
4355     if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) {
4356         error_report("Failed to get the resource limit");
4357         exit(1);
4358     }
4359     open_fd_hw = rlim.rlim_cur - MIN(400, rlim.rlim_cur / 3);
4360     open_fd_rc = rlim.rlim_cur / 2;
4361 }
4362