xref: /openbmc/qemu/hw/9pfs/9p.c (revision a0f326228a927d65e3b2dc23eab17b88aadc1a03)
1 /*
2  * Virtio 9p backend
3  *
4  * Copyright IBM, Corp. 2010
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * Not so fast! You might want to read the 9p developer docs first:
16  * https://wiki.qemu.org/Documentation/9p
17  */
18 
19 #include "qemu/osdep.h"
20 #ifdef CONFIG_LINUX
21 #include <linux/limits.h>
22 #endif
23 #include <glib/gprintf.h>
24 #include "hw/virtio/virtio.h"
25 #include "qapi/error.h"
26 #include "qemu/error-report.h"
27 #include "qemu/iov.h"
28 #include "qemu/main-loop.h"
29 #include "qemu/sockets.h"
30 #include "virtio-9p.h"
31 #include "fsdev/qemu-fsdev.h"
32 #include "9p-xattr.h"
33 #include "9p-util.h"
34 #include "coth.h"
35 #include "trace.h"
36 #include "migration/blocker.h"
37 #include "qemu/xxhash.h"
38 #include <math.h>
39 
40 int open_fd_hw;
41 int total_open_fd;
42 static int open_fd_rc;
43 
44 enum {
45     Oread   = 0x00,
46     Owrite  = 0x01,
47     Ordwr   = 0x02,
48     Oexec   = 0x03,
49     Oexcl   = 0x04,
50     Otrunc  = 0x10,
51     Orexec  = 0x20,
52     Orclose = 0x40,
53     Oappend = 0x80,
54 };
55 
56 P9ARRAY_DEFINE_TYPE(V9fsPath, v9fs_path_free);
57 
pdu_marshal(V9fsPDU * pdu,size_t offset,const char * fmt,...)58 static ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
59 {
60     ssize_t ret;
61     va_list ap;
62 
63     va_start(ap, fmt);
64     ret = pdu->s->transport->pdu_vmarshal(pdu, offset, fmt, ap);
65     va_end(ap);
66 
67     return ret;
68 }
69 
pdu_unmarshal(V9fsPDU * pdu,size_t offset,const char * fmt,...)70 static ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
71 {
72     ssize_t ret;
73     va_list ap;
74 
75     va_start(ap, fmt);
76     ret = pdu->s->transport->pdu_vunmarshal(pdu, offset, fmt, ap);
77     va_end(ap);
78 
79     return ret;
80 }
81 
omode_to_uflags(int8_t mode)82 static int omode_to_uflags(int8_t mode)
83 {
84     int ret = 0;
85 
86     switch (mode & 3) {
87     case Oread:
88         ret = O_RDONLY;
89         break;
90     case Ordwr:
91         ret = O_RDWR;
92         break;
93     case Owrite:
94         ret = O_WRONLY;
95         break;
96     case Oexec:
97         ret = O_RDONLY;
98         break;
99     }
100 
101     if (mode & Otrunc) {
102         ret |= O_TRUNC;
103     }
104 
105     if (mode & Oappend) {
106         ret |= O_APPEND;
107     }
108 
109     if (mode & Oexcl) {
110         ret |= O_EXCL;
111     }
112 
113     return ret;
114 }
115 
116 typedef struct DotlOpenflagMap {
117     int dotl_flag;
118     int open_flag;
119 } DotlOpenflagMap;
120 
dotl_to_open_flags(int flags)121 static int dotl_to_open_flags(int flags)
122 {
123     int i;
124     /*
125      * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
126      * and P9_DOTL_NOACCESS
127      */
128     int oflags = flags & O_ACCMODE;
129 
130     DotlOpenflagMap dotl_oflag_map[] = {
131         { P9_DOTL_CREATE, O_CREAT },
132         { P9_DOTL_EXCL, O_EXCL },
133         { P9_DOTL_NOCTTY , O_NOCTTY },
134         { P9_DOTL_TRUNC, O_TRUNC },
135         { P9_DOTL_APPEND, O_APPEND },
136         { P9_DOTL_NONBLOCK, O_NONBLOCK } ,
137         { P9_DOTL_DSYNC, O_DSYNC },
138         { P9_DOTL_FASYNC, FASYNC },
139 #ifndef CONFIG_DARWIN
140         { P9_DOTL_NOATIME, O_NOATIME },
141         /*
142          *  On Darwin, we could map to F_NOCACHE, which is
143          *  similar, but doesn't quite have the same
144          *  semantics. However, we don't support O_DIRECT
145          *  even on linux at the moment, so we just ignore
146          *  it here.
147          */
148         { P9_DOTL_DIRECT, O_DIRECT },
149 #endif
150         { P9_DOTL_LARGEFILE, O_LARGEFILE },
151         { P9_DOTL_DIRECTORY, O_DIRECTORY },
152         { P9_DOTL_NOFOLLOW, O_NOFOLLOW },
153         { P9_DOTL_SYNC, O_SYNC },
154     };
155 
156     for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
157         if (flags & dotl_oflag_map[i].dotl_flag) {
158             oflags |= dotl_oflag_map[i].open_flag;
159         }
160     }
161 
162     return oflags;
163 }
164 
cred_init(FsCred * credp)165 void cred_init(FsCred *credp)
166 {
167     credp->fc_uid = -1;
168     credp->fc_gid = -1;
169     credp->fc_mode = -1;
170     credp->fc_rdev = -1;
171 }
172 
get_dotl_openflags(V9fsState * s,int oflags)173 static int get_dotl_openflags(V9fsState *s, int oflags)
174 {
175     int flags;
176     /*
177      * Filter the client open flags
178      */
179     flags = dotl_to_open_flags(oflags);
180     flags &= ~(O_NOCTTY | O_ASYNC | O_CREAT);
181 #ifndef CONFIG_DARWIN
182     /*
183      * Ignore direct disk access hint until the server supports it.
184      */
185     flags &= ~O_DIRECT;
186 #endif
187     return flags;
188 }
189 
v9fs_path_init(V9fsPath * path)190 void v9fs_path_init(V9fsPath *path)
191 {
192     path->data = NULL;
193     path->size = 0;
194 }
195 
v9fs_path_free(V9fsPath * path)196 void v9fs_path_free(V9fsPath *path)
197 {
198     g_free(path->data);
199     path->data = NULL;
200     path->size = 0;
201 }
202 
203 
204 void G_GNUC_PRINTF(2, 3)
v9fs_path_sprintf(V9fsPath * path,const char * fmt,...)205 v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...)
206 {
207     va_list ap;
208 
209     v9fs_path_free(path);
210 
211     va_start(ap, fmt);
212     /* Bump the size for including terminating NULL */
213     path->size = g_vasprintf(&path->data, fmt, ap) + 1;
214     va_end(ap);
215 }
216 
v9fs_path_copy(V9fsPath * dst,const V9fsPath * src)217 void v9fs_path_copy(V9fsPath *dst, const V9fsPath *src)
218 {
219     v9fs_path_free(dst);
220     dst->size = src->size;
221     dst->data = g_memdup(src->data, src->size);
222 }
223 
v9fs_name_to_path(V9fsState * s,V9fsPath * dirpath,const char * name,V9fsPath * path)224 int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath,
225                       const char *name, V9fsPath *path)
226 {
227     int err;
228     err = s->ops->name_to_path(&s->ctx, dirpath, name, path);
229     if (err < 0) {
230         err = -errno;
231     }
232     return err;
233 }
234 
235 /*
236  * Return TRUE if s1 is an ancestor of s2.
237  *
238  * E.g. "a/b" is an ancestor of "a/b/c" but not of "a/bc/d".
239  * As a special case, We treat s1 as ancestor of s2 if they are same!
240  */
v9fs_path_is_ancestor(V9fsPath * s1,V9fsPath * s2)241 static int v9fs_path_is_ancestor(V9fsPath *s1, V9fsPath *s2)
242 {
243     if (!strncmp(s1->data, s2->data, s1->size - 1)) {
244         if (s2->data[s1->size - 1] == '\0' || s2->data[s1->size - 1] == '/') {
245             return 1;
246         }
247     }
248     return 0;
249 }
250 
v9fs_string_size(V9fsString * str)251 static size_t v9fs_string_size(V9fsString *str)
252 {
253     return str->size;
254 }
255 
256 /*
257  * returns 0 if fid got re-opened, 1 if not, < 0 on error
258  */
v9fs_reopen_fid(V9fsPDU * pdu,V9fsFidState * f)259 static int coroutine_fn v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f)
260 {
261     int err = 1;
262     if (f->fid_type == P9_FID_FILE) {
263         if (f->fs.fd == -1) {
264             do {
265                 err = v9fs_co_open(pdu, f, f->open_flags);
266             } while (err == -EINTR && !pdu->cancelled);
267         }
268     } else if (f->fid_type == P9_FID_DIR) {
269         if (f->fs.dir.stream == NULL) {
270             do {
271                 err = v9fs_co_opendir(pdu, f);
272             } while (err == -EINTR && !pdu->cancelled);
273         }
274     }
275     return err;
276 }
277 
get_fid(V9fsPDU * pdu,int32_t fid)278 static V9fsFidState *coroutine_fn get_fid(V9fsPDU *pdu, int32_t fid)
279 {
280     int err;
281     V9fsFidState *f;
282     V9fsState *s = pdu->s;
283 
284     f = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid));
285     if (f) {
286         BUG_ON(f->clunked);
287         /*
288          * Update the fid ref upfront so that
289          * we don't get reclaimed when we yield
290          * in open later.
291          */
292         f->ref++;
293         /*
294          * check whether we need to reopen the
295          * file. We might have closed the fd
296          * while trying to free up some file
297          * descriptors.
298          */
299         err = v9fs_reopen_fid(pdu, f);
300         if (err < 0) {
301             f->ref--;
302             return NULL;
303         }
304         /*
305          * Mark the fid as referenced so that the LRU
306          * reclaim won't close the file descriptor
307          */
308         f->flags |= FID_REFERENCED;
309         return f;
310     }
311     return NULL;
312 }
313 
alloc_fid(V9fsState * s,int32_t fid)314 static V9fsFidState *alloc_fid(V9fsState *s, int32_t fid)
315 {
316     V9fsFidState *f;
317 
318     f = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid));
319     if (f) {
320         /* If fid is already there return NULL */
321         BUG_ON(f->clunked);
322         return NULL;
323     }
324     f = g_new0(V9fsFidState, 1);
325     f->fid = fid;
326     f->fid_type = P9_FID_NONE;
327     f->ref = 1;
328     /*
329      * Mark the fid as referenced so that the LRU
330      * reclaim won't close the file descriptor
331      */
332     f->flags |= FID_REFERENCED;
333     g_hash_table_insert(s->fids, GINT_TO_POINTER(fid), f);
334 
335     v9fs_readdir_init(s->proto_version, &f->fs.dir);
336     v9fs_readdir_init(s->proto_version, &f->fs_reclaim.dir);
337 
338     return f;
339 }
340 
v9fs_xattr_fid_clunk(V9fsPDU * pdu,V9fsFidState * fidp)341 static int coroutine_fn v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp)
342 {
343     int retval = 0;
344 
345     if (fidp->fs.xattr.xattrwalk_fid) {
346         /* getxattr/listxattr fid */
347         goto free_value;
348     }
349     /*
350      * if this is fid for setxattr. clunk should
351      * result in setxattr localcall
352      */
353     if (fidp->fs.xattr.len != fidp->fs.xattr.copied_len) {
354         /* clunk after partial write */
355         retval = -EINVAL;
356         goto free_out;
357     }
358     if (fidp->fs.xattr.len) {
359         retval = v9fs_co_lsetxattr(pdu, &fidp->path, &fidp->fs.xattr.name,
360                                    fidp->fs.xattr.value,
361                                    fidp->fs.xattr.len,
362                                    fidp->fs.xattr.flags);
363     } else {
364         retval = v9fs_co_lremovexattr(pdu, &fidp->path, &fidp->fs.xattr.name);
365     }
366 free_out:
367     v9fs_string_free(&fidp->fs.xattr.name);
368 free_value:
369     g_free(fidp->fs.xattr.value);
370     return retval;
371 }
372 
free_fid(V9fsPDU * pdu,V9fsFidState * fidp)373 static int coroutine_fn free_fid(V9fsPDU *pdu, V9fsFidState *fidp)
374 {
375     int retval = 0;
376 
377     if (fidp->fid_type == P9_FID_FILE) {
378         /* If we reclaimed the fd no need to close */
379         if (fidp->fs.fd != -1) {
380             retval = v9fs_co_close(pdu, &fidp->fs);
381         }
382     } else if (fidp->fid_type == P9_FID_DIR) {
383         if (fidp->fs.dir.stream != NULL) {
384             retval = v9fs_co_closedir(pdu, &fidp->fs);
385         }
386     } else if (fidp->fid_type == P9_FID_XATTR) {
387         retval = v9fs_xattr_fid_clunk(pdu, fidp);
388     }
389     v9fs_path_free(&fidp->path);
390     g_free(fidp);
391     return retval;
392 }
393 
put_fid(V9fsPDU * pdu,V9fsFidState * fidp)394 static int coroutine_fn put_fid(V9fsPDU *pdu, V9fsFidState *fidp)
395 {
396     BUG_ON(!fidp->ref);
397     fidp->ref--;
398     /*
399      * Don't free the fid if it is in reclaim list
400      */
401     if (!fidp->ref && fidp->clunked) {
402         if (fidp->fid == pdu->s->root_fid) {
403             /*
404              * if the clunked fid is root fid then we
405              * have unmounted the fs on the client side.
406              * delete the migration blocker. Ideally, this
407              * should be hooked to transport close notification
408              */
409             migrate_del_blocker(&pdu->s->migration_blocker);
410         }
411         return free_fid(pdu, fidp);
412     }
413     return 0;
414 }
415 
clunk_fid(V9fsState * s,int32_t fid)416 static V9fsFidState *clunk_fid(V9fsState *s, int32_t fid)
417 {
418     V9fsFidState *fidp;
419 
420     /* TODO: Use g_hash_table_steal_extended() instead? */
421     fidp = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid));
422     if (fidp) {
423         g_hash_table_remove(s->fids, GINT_TO_POINTER(fid));
424         fidp->clunked = true;
425         return fidp;
426     }
427     return NULL;
428 }
429 
v9fs_reclaim_fd(V9fsPDU * pdu)430 void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu)
431 {
432     int reclaim_count = 0;
433     V9fsState *s = pdu->s;
434     V9fsFidState *f;
435     GHashTableIter iter;
436     gpointer fid;
437     int err;
438     int nclosed = 0;
439 
440     /* prevent multiple coroutines running this function simultaniously */
441     if (s->reclaiming) {
442         return;
443     }
444     s->reclaiming = true;
445 
446     g_hash_table_iter_init(&iter, s->fids);
447 
448     QSLIST_HEAD(, V9fsFidState) reclaim_list =
449         QSLIST_HEAD_INITIALIZER(reclaim_list);
450 
451     /* Pick FIDs to be closed, collect them on reclaim_list. */
452     while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &f)) {
453         /*
454          * Unlinked fids cannot be reclaimed, skip those, and also skip fids
455          * currently being operated on.
456          */
457         if (f->ref || f->flags & FID_NON_RECLAIMABLE) {
458             continue;
459         }
460         /*
461          * if it is a recently referenced fid
462          * we leave the fid untouched and clear the
463          * reference bit. We come back to it later
464          * in the next iteration. (a simple LRU without
465          * moving list elements around)
466          */
467         if (f->flags & FID_REFERENCED) {
468             f->flags &= ~FID_REFERENCED;
469             continue;
470         }
471         /*
472          * Add fids to reclaim list.
473          */
474         if (f->fid_type == P9_FID_FILE) {
475             if (f->fs.fd != -1) {
476                 /*
477                  * Up the reference count so that
478                  * a clunk request won't free this fid
479                  */
480                 f->ref++;
481                 QSLIST_INSERT_HEAD(&reclaim_list, f, reclaim_next);
482                 f->fs_reclaim.fd = f->fs.fd;
483                 f->fs.fd = -1;
484                 reclaim_count++;
485             }
486         } else if (f->fid_type == P9_FID_DIR) {
487             if (f->fs.dir.stream != NULL) {
488                 /*
489                  * Up the reference count so that
490                  * a clunk request won't free this fid
491                  */
492                 f->ref++;
493                 QSLIST_INSERT_HEAD(&reclaim_list, f, reclaim_next);
494                 f->fs_reclaim.dir.stream = f->fs.dir.stream;
495                 f->fs.dir.stream = NULL;
496                 reclaim_count++;
497             }
498         }
499         if (reclaim_count >= open_fd_rc) {
500             break;
501         }
502     }
503     /*
504      * Close the picked FIDs altogether on a background I/O driver thread. Do
505      * this all at once to keep latency (i.e. amount of thread hops between main
506      * thread <-> fs driver background thread) as low as possible.
507      */
508     v9fs_co_run_in_worker({
509         QSLIST_FOREACH(f, &reclaim_list, reclaim_next) {
510             err = (f->fid_type == P9_FID_DIR) ?
511                 s->ops->closedir(&s->ctx, &f->fs_reclaim) :
512                 s->ops->close(&s->ctx, &f->fs_reclaim);
513             if (!err) {
514                 /* total_open_fd must only be mutated on main thread */
515                 nclosed++;
516             }
517         }
518     });
519     total_open_fd -= nclosed;
520     /* Free the closed FIDs. */
521     while (!QSLIST_EMPTY(&reclaim_list)) {
522         f = QSLIST_FIRST(&reclaim_list);
523         QSLIST_REMOVE(&reclaim_list, f, V9fsFidState, reclaim_next);
524         /*
525          * Now drop the fid reference, free it
526          * if clunked.
527          */
528         put_fid(pdu, f);
529     }
530 
531     s->reclaiming = false;
532 }
533 
534 /*
535  * This is used when a path is removed from the directory tree. Any
536  * fids that still reference it must not be closed from then on, since
537  * they cannot be reopened.
538  */
v9fs_mark_fids_unreclaim(V9fsPDU * pdu,V9fsPath * path)539 static int coroutine_fn v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
540 {
541     int err = 0;
542     V9fsState *s = pdu->s;
543     V9fsFidState *fidp;
544     gpointer fid;
545     GHashTableIter iter;
546     /*
547      * The most common case is probably that we have exactly one
548      * fid for the given path, so preallocate exactly one.
549      */
550     g_autoptr(GArray) to_reopen = g_array_sized_new(FALSE, FALSE,
551             sizeof(V9fsFidState *), 1);
552     gint i;
553 
554     g_hash_table_iter_init(&iter, s->fids);
555 
556     /*
557      * We iterate over the fid table looking for the entries we need
558      * to reopen, and store them in to_reopen. This is because
559      * v9fs_reopen_fid() and put_fid() yield. This allows the fid table
560      * to be modified in the meantime, invalidating our iterator.
561      */
562     while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &fidp)) {
563         if (fidp->path.size == path->size &&
564             !memcmp(fidp->path.data, path->data, path->size)) {
565             /*
566              * Ensure the fid survives a potential clunk request during
567              * v9fs_reopen_fid or put_fid.
568              */
569             fidp->ref++;
570             fidp->flags |= FID_NON_RECLAIMABLE;
571             g_array_append_val(to_reopen, fidp);
572         }
573     }
574 
575     for (i = 0; i < to_reopen->len; i++) {
576         fidp = g_array_index(to_reopen, V9fsFidState*, i);
577         /* reopen the file/dir if already closed */
578         err = v9fs_reopen_fid(pdu, fidp);
579         if (err < 0) {
580             break;
581         }
582     }
583 
584     for (i = 0; i < to_reopen->len; i++) {
585         put_fid(pdu, g_array_index(to_reopen, V9fsFidState*, i));
586     }
587     return err;
588 }
589 
virtfs_reset(V9fsPDU * pdu)590 static void coroutine_fn virtfs_reset(V9fsPDU *pdu)
591 {
592     V9fsState *s = pdu->s;
593     V9fsFidState *fidp;
594     GList *freeing;
595     /*
596      * Get a list of all the values (fid states) in the table, which
597      * we then...
598      */
599     g_autoptr(GList) fids = g_hash_table_get_values(s->fids);
600 
601     /* ... remove from the table, taking over ownership. */
602     g_hash_table_steal_all(s->fids);
603 
604     /*
605      * This allows us to release our references to them asynchronously without
606      * iterating over the hash table and risking iterator invalidation
607      * through concurrent modifications.
608      */
609     for (freeing = fids; freeing; freeing = freeing->next) {
610         fidp = freeing->data;
611         fidp->ref++;
612         fidp->clunked = true;
613         put_fid(pdu, fidp);
614     }
615 }
616 
617 #define P9_QID_TYPE_DIR         0x80
618 #define P9_QID_TYPE_SYMLINK     0x02
619 
620 #define P9_STAT_MODE_DIR        0x80000000
621 #define P9_STAT_MODE_APPEND     0x40000000
622 #define P9_STAT_MODE_EXCL       0x20000000
623 #define P9_STAT_MODE_MOUNT      0x10000000
624 #define P9_STAT_MODE_AUTH       0x08000000
625 #define P9_STAT_MODE_TMP        0x04000000
626 #define P9_STAT_MODE_SYMLINK    0x02000000
627 #define P9_STAT_MODE_LINK       0x01000000
628 #define P9_STAT_MODE_DEVICE     0x00800000
629 #define P9_STAT_MODE_NAMED_PIPE 0x00200000
630 #define P9_STAT_MODE_SOCKET     0x00100000
631 #define P9_STAT_MODE_SETUID     0x00080000
632 #define P9_STAT_MODE_SETGID     0x00040000
633 #define P9_STAT_MODE_SETVTX     0x00010000
634 
635 #define P9_STAT_MODE_TYPE_BITS (P9_STAT_MODE_DIR |          \
636                                 P9_STAT_MODE_SYMLINK |      \
637                                 P9_STAT_MODE_LINK |         \
638                                 P9_STAT_MODE_DEVICE |       \
639                                 P9_STAT_MODE_NAMED_PIPE |   \
640                                 P9_STAT_MODE_SOCKET)
641 
642 /* Mirrors all bits of a byte. So e.g. binary 10100000 would become 00000101. */
mirror8bit(uint8_t byte)643 static inline uint8_t mirror8bit(uint8_t byte)
644 {
645     return (byte * 0x0202020202ULL & 0x010884422010ULL) % 1023;
646 }
647 
648 /* Same as mirror8bit() just for a 64 bit data type instead for a byte. */
mirror64bit(uint64_t value)649 static inline uint64_t mirror64bit(uint64_t value)
650 {
651     return ((uint64_t)mirror8bit(value         & 0xff) << 56) |
652            ((uint64_t)mirror8bit((value >> 8)  & 0xff) << 48) |
653            ((uint64_t)mirror8bit((value >> 16) & 0xff) << 40) |
654            ((uint64_t)mirror8bit((value >> 24) & 0xff) << 32) |
655            ((uint64_t)mirror8bit((value >> 32) & 0xff) << 24) |
656            ((uint64_t)mirror8bit((value >> 40) & 0xff) << 16) |
657            ((uint64_t)mirror8bit((value >> 48) & 0xff) << 8)  |
658            ((uint64_t)mirror8bit((value >> 56) & 0xff));
659 }
660 
661 /*
662  * Parameter k for the Exponential Golomb algorithm to be used.
663  *
664  * The smaller this value, the smaller the minimum bit count for the Exp.
665  * Golomb generated affixes will be (at lowest index) however for the
666  * price of having higher maximum bit count of generated affixes (at highest
667  * index). Likewise increasing this parameter yields in smaller maximum bit
668  * count for the price of having higher minimum bit count.
669  *
670  * In practice that means: a good value for k depends on the expected amount
671  * of devices to be exposed by one export. For a small amount of devices k
672  * should be small, for a large amount of devices k might be increased
673  * instead. The default of k=0 should be fine for most users though.
674  *
675  * IMPORTANT: In case this ever becomes a runtime parameter; the value of
676  * k should not change as long as guest is still running! Because that would
677  * cause completely different inode numbers to be generated on guest.
678  */
679 #define EXP_GOLOMB_K    0
680 
681 /**
682  * expGolombEncode() - Exponential Golomb algorithm for arbitrary k
683  *                     (including k=0).
684  *
685  * @n: natural number (or index) of the prefix to be generated
686  *     (1, 2, 3, ...)
687  * @k: parameter k of Exp. Golomb algorithm to be used
688  *     (see comment on EXP_GOLOMB_K macro for details about k)
689  * Return: prefix for given @n and @k
690  *
691  * The Exponential Golomb algorithm generates prefixes (NOT suffixes!)
692  * with growing length and with the mathematical property of being
693  * "prefix-free". The latter means the generated prefixes can be prepended
694  * in front of arbitrary numbers and the resulting concatenated numbers are
695  * guaranteed to be always unique.
696  *
697  * This is a minor adjustment to the original Exp. Golomb algorithm in the
698  * sense that lowest allowed index (@n) starts with 1, not with zero.
699  */
expGolombEncode(uint64_t n,int k)700 static VariLenAffix expGolombEncode(uint64_t n, int k)
701 {
702     const uint64_t value = n + (1 << k) - 1;
703     const int bits = (int) log2(value) + 1;
704     return (VariLenAffix) {
705         .type = AffixType_Prefix,
706         .value = value,
707         .bits = bits + MAX((bits - 1 - k), 0)
708     };
709 }
710 
711 /**
712  * invertAffix() - Converts a suffix into a prefix, or a prefix into a suffix.
713  * @affix: either suffix or prefix to be inverted
714  * Return: inversion of passed @affix
715  *
716  * Simply mirror all bits of the affix value, for the purpose to preserve
717  * respectively the mathematical "prefix-free" or "suffix-free" property
718  * after the conversion.
719  *
720  * If a passed prefix is suitable to create unique numbers, then the
721  * returned suffix is suitable to create unique numbers as well (and vice
722  * versa).
723  */
invertAffix(const VariLenAffix * affix)724 static VariLenAffix invertAffix(const VariLenAffix *affix)
725 {
726     return (VariLenAffix) {
727         .type =
728             (affix->type == AffixType_Suffix) ?
729                 AffixType_Prefix : AffixType_Suffix,
730         .value =
731             mirror64bit(affix->value) >>
732             ((sizeof(affix->value) * 8) - affix->bits),
733         .bits = affix->bits
734     };
735 }
736 
737 /**
738  * affixForIndex() - Generates suffix numbers with "suffix-free" property.
739  * @index: natural number (or index) of the suffix to be generated
740  *         (1, 2, 3, ...)
741  * Return: Suffix suitable to assemble unique number.
742  *
743  * This is just a wrapper function on top of the Exp. Golomb algorithm.
744  *
745  * Since the Exp. Golomb algorithm generates prefixes, but we need suffixes,
746  * this function converts the Exp. Golomb prefixes into appropriate suffixes
747  * which are still suitable for generating unique numbers.
748  */
affixForIndex(uint64_t index)749 static VariLenAffix affixForIndex(uint64_t index)
750 {
751     VariLenAffix prefix;
752     prefix = expGolombEncode(index, EXP_GOLOMB_K);
753     return invertAffix(&prefix); /* convert prefix to suffix */
754 }
755 
qpp_hash(QppEntry e)756 static uint32_t qpp_hash(QppEntry e)
757 {
758     return qemu_xxhash4(e.ino_prefix, e.dev);
759 }
760 
qpf_hash(QpfEntry e)761 static uint32_t qpf_hash(QpfEntry e)
762 {
763     return qemu_xxhash4(e.ino, e.dev);
764 }
765 
qpd_cmp_func(const void * obj,const void * userp)766 static bool qpd_cmp_func(const void *obj, const void *userp)
767 {
768     const QpdEntry *e1 = obj, *e2 = userp;
769     return e1->dev == e2->dev;
770 }
771 
qpp_cmp_func(const void * obj,const void * userp)772 static bool qpp_cmp_func(const void *obj, const void *userp)
773 {
774     const QppEntry *e1 = obj, *e2 = userp;
775     return e1->dev == e2->dev && e1->ino_prefix == e2->ino_prefix;
776 }
777 
qpf_cmp_func(const void * obj,const void * userp)778 static bool qpf_cmp_func(const void *obj, const void *userp)
779 {
780     const QpfEntry *e1 = obj, *e2 = userp;
781     return e1->dev == e2->dev && e1->ino == e2->ino;
782 }
783 
qp_table_remove(void * p,uint32_t h,void * up)784 static void qp_table_remove(void *p, uint32_t h, void *up)
785 {
786     g_free(p);
787 }
788 
qp_table_destroy(struct qht * ht)789 static void qp_table_destroy(struct qht *ht)
790 {
791     if (!ht || !ht->map) {
792         return;
793     }
794     qht_iter(ht, qp_table_remove, NULL);
795     qht_destroy(ht);
796 }
797 
qpd_table_init(struct qht * ht)798 static void qpd_table_init(struct qht *ht)
799 {
800     qht_init(ht, qpd_cmp_func, 1, QHT_MODE_AUTO_RESIZE);
801 }
802 
qpp_table_init(struct qht * ht)803 static void qpp_table_init(struct qht *ht)
804 {
805     qht_init(ht, qpp_cmp_func, 1, QHT_MODE_AUTO_RESIZE);
806 }
807 
qpf_table_init(struct qht * ht)808 static void qpf_table_init(struct qht *ht)
809 {
810     qht_init(ht, qpf_cmp_func, 1 << 16, QHT_MODE_AUTO_RESIZE);
811 }
812 
813 /*
814  * Returns how many (high end) bits of inode numbers of the passed fs
815  * device shall be used (in combination with the device number) to
816  * generate hash values for qpp_table entries.
817  *
818  * This function is required if variable length suffixes are used for inode
819  * number mapping on guest level. Since a device may end up having multiple
820  * entries in qpp_table, each entry most probably with a different suffix
821  * length, we thus need this function in conjunction with qpd_table to
822  * "agree" about a fix amount of bits (per device) to be always used for
823  * generating hash values for the purpose of accessing qpp_table in order
824  * get consistent behaviour when accessing qpp_table.
825  */
qid_inode_prefix_hash_bits(V9fsPDU * pdu,dev_t dev)826 static int qid_inode_prefix_hash_bits(V9fsPDU *pdu, dev_t dev)
827 {
828     QpdEntry lookup = {
829         .dev = dev
830     }, *val;
831     uint32_t hash = dev;
832     VariLenAffix affix;
833 
834     val = qht_lookup(&pdu->s->qpd_table, &lookup, hash);
835     if (!val) {
836         val = g_new0(QpdEntry, 1);
837         *val = lookup;
838         affix = affixForIndex(pdu->s->qp_affix_next);
839         val->prefix_bits = affix.bits;
840         qht_insert(&pdu->s->qpd_table, val, hash, NULL);
841         pdu->s->qp_ndevices++;
842     }
843     return val->prefix_bits;
844 }
845 
846 /*
847  * Slow / full mapping host inode nr -> guest inode nr.
848  *
849  * This function performs a slower and much more costly remapping of an
850  * original file inode number on host to an appropriate different inode
851  * number on guest. For every (dev, inode) combination on host a new
852  * sequential number is generated, cached and exposed as inode number on
853  * guest.
854  *
855  * This is just a "last resort" fallback solution if the much faster/cheaper
856  * qid_path_suffixmap() failed. In practice this slow / full mapping is not
857  * expected ever to be used at all though.
858  *
859  * See qid_path_suffixmap() for details
860  *
861  */
qid_path_fullmap(V9fsPDU * pdu,const struct stat * stbuf,uint64_t * path)862 static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf,
863                             uint64_t *path)
864 {
865     QpfEntry lookup = {
866         .dev = stbuf->st_dev,
867         .ino = stbuf->st_ino
868     }, *val;
869     uint32_t hash = qpf_hash(lookup);
870     VariLenAffix affix;
871 
872     val = qht_lookup(&pdu->s->qpf_table, &lookup, hash);
873 
874     if (!val) {
875         if (pdu->s->qp_fullpath_next == 0) {
876             /* no more files can be mapped :'( */
877             error_report_once(
878                 "9p: No more prefixes available for remapping inodes from "
879                 "host to guest."
880             );
881             return -ENFILE;
882         }
883 
884         val = g_new0(QpfEntry, 1);
885         *val = lookup;
886 
887         /* new unique inode and device combo */
888         affix = affixForIndex(
889             1ULL << (sizeof(pdu->s->qp_affix_next) * 8)
890         );
891         val->path = (pdu->s->qp_fullpath_next++ << affix.bits) | affix.value;
892         pdu->s->qp_fullpath_next &= ((1ULL << (64 - affix.bits)) - 1);
893         qht_insert(&pdu->s->qpf_table, val, hash, NULL);
894     }
895 
896     *path = val->path;
897     return 0;
898 }
899 
900 /*
901  * Quick mapping host inode nr -> guest inode nr.
902  *
903  * This function performs quick remapping of an original file inode number
904  * on host to an appropriate different inode number on guest. This remapping
905  * of inodes is required to avoid inode nr collisions on guest which would
906  * happen if the 9p export contains more than 1 exported file system (or
907  * more than 1 file system data set), because unlike on host level where the
908  * files would have different device nrs, all files exported by 9p would
909  * share the same device nr on guest (the device nr of the virtual 9p device
910  * that is).
911  *
912  * Inode remapping is performed by chopping off high end bits of the original
913  * inode number from host, shifting the result upwards and then assigning a
914  * generated suffix number for the low end bits, where the same suffix number
915  * will be shared by all inodes with the same device id AND the same high end
916  * bits that have been chopped off. That approach utilizes the fact that inode
917  * numbers very likely share the same high end bits (i.e. due to their common
918  * sequential generation by file systems) and hence we only have to generate
919  * and track a very limited amount of suffixes in practice due to that.
920  *
921  * We generate variable size suffixes for that purpose. The 1st generated
922  * suffix will only have 1 bit and hence we only need to chop off 1 bit from
923  * the original inode number. The subsequent suffixes being generated will
924  * grow in (bit) size subsequently, i.e. the 2nd and 3rd suffix being
925  * generated will have 3 bits and hence we have to chop off 3 bits from their
926  * original inodes, and so on. That approach of using variable length suffixes
927  * (i.e. over fixed size ones) utilizes the fact that in practice only a very
928  * limited amount of devices are shared by the same export (e.g. typically
929  * less than 2 dozen devices per 9p export), so in practice we need to chop
930  * off less bits than with fixed size prefixes and yet are flexible to add
931  * new devices at runtime below host's export directory at any time without
932  * having to reboot guest nor requiring to reconfigure guest for that. And due
933  * to the very limited amount of original high end bits that we chop off that
934  * way, the total amount of suffixes we need to generate is less than by using
935  * fixed size prefixes and hence it also improves performance of the inode
936  * remapping algorithm, and finally has the nice side effect that the inode
937  * numbers on guest will be much smaller & human friendly. ;-)
938  */
qid_path_suffixmap(V9fsPDU * pdu,const struct stat * stbuf,uint64_t * path)939 static int qid_path_suffixmap(V9fsPDU *pdu, const struct stat *stbuf,
940                               uint64_t *path)
941 {
942     const int ino_hash_bits = qid_inode_prefix_hash_bits(pdu, stbuf->st_dev);
943     QppEntry lookup = {
944         .dev = stbuf->st_dev,
945         .ino_prefix = (uint16_t) (stbuf->st_ino >> (64 - ino_hash_bits))
946     }, *val;
947     uint32_t hash = qpp_hash(lookup);
948 
949     val = qht_lookup(&pdu->s->qpp_table, &lookup, hash);
950 
951     if (!val) {
952         if (pdu->s->qp_affix_next == 0) {
953             /* we ran out of affixes */
954             warn_report_once(
955                 "9p: Potential degraded performance of inode remapping"
956             );
957             return -ENFILE;
958         }
959 
960         val = g_new0(QppEntry, 1);
961         *val = lookup;
962 
963         /* new unique inode affix and device combo */
964         val->qp_affix_index = pdu->s->qp_affix_next++;
965         val->qp_affix = affixForIndex(val->qp_affix_index);
966         qht_insert(&pdu->s->qpp_table, val, hash, NULL);
967     }
968     /* assuming generated affix to be suffix type, not prefix */
969     *path = (stbuf->st_ino << val->qp_affix.bits) | val->qp_affix.value;
970     return 0;
971 }
972 
stat_to_qid(V9fsPDU * pdu,const struct stat * stbuf,V9fsQID * qidp)973 static int stat_to_qid(V9fsPDU *pdu, const struct stat *stbuf, V9fsQID *qidp)
974 {
975     int err;
976     size_t size;
977 
978     if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) {
979         /* map inode+device to qid path (fast path) */
980         err = qid_path_suffixmap(pdu, stbuf, &qidp->path);
981         if (err == -ENFILE) {
982             /* fast path didn't work, fall back to full map */
983             err = qid_path_fullmap(pdu, stbuf, &qidp->path);
984         }
985         if (err) {
986             return err;
987         }
988     } else {
989         if (pdu->s->dev_id != stbuf->st_dev) {
990             if (pdu->s->ctx.export_flags & V9FS_FORBID_MULTIDEVS) {
991                 error_report_once(
992                     "9p: Multiple devices detected in same VirtFS export. "
993                     "Access of guest to additional devices is (partly) "
994                     "denied due to virtfs option 'multidevs=forbid' being "
995                     "effective."
996                 );
997                 return -ENODEV;
998             } else {
999                 warn_report_once(
1000                     "9p: Multiple devices detected in same VirtFS export, "
1001                     "which might lead to file ID collisions and severe "
1002                     "misbehaviours on guest! You should either use a "
1003                     "separate export for each device shared from host or "
1004                     "use virtfs option 'multidevs=remap'!"
1005                 );
1006             }
1007         }
1008         memset(&qidp->path, 0, sizeof(qidp->path));
1009         size = MIN(sizeof(stbuf->st_ino), sizeof(qidp->path));
1010         memcpy(&qidp->path, &stbuf->st_ino, size);
1011     }
1012 
1013     qidp->version = stbuf->st_mtime ^ (stbuf->st_size << 8);
1014     qidp->type = 0;
1015     if (S_ISDIR(stbuf->st_mode)) {
1016         qidp->type |= P9_QID_TYPE_DIR;
1017     }
1018     if (S_ISLNK(stbuf->st_mode)) {
1019         qidp->type |= P9_QID_TYPE_SYMLINK;
1020     }
1021 
1022     return 0;
1023 }
1024 
pdu_alloc(V9fsState * s)1025 V9fsPDU *pdu_alloc(V9fsState *s)
1026 {
1027     V9fsPDU *pdu = NULL;
1028 
1029     if (!QLIST_EMPTY(&s->free_list)) {
1030         pdu = QLIST_FIRST(&s->free_list);
1031         QLIST_REMOVE(pdu, next);
1032         QLIST_INSERT_HEAD(&s->active_list, pdu, next);
1033     }
1034     return pdu;
1035 }
1036 
pdu_free(V9fsPDU * pdu)1037 void pdu_free(V9fsPDU *pdu)
1038 {
1039     V9fsState *s = pdu->s;
1040 
1041     g_assert(!pdu->cancelled);
1042     QLIST_REMOVE(pdu, next);
1043     QLIST_INSERT_HEAD(&s->free_list, pdu, next);
1044 }
1045 
pdu_complete(V9fsPDU * pdu,ssize_t len)1046 static void coroutine_fn pdu_complete(V9fsPDU *pdu, ssize_t len)
1047 {
1048     int8_t id = pdu->id + 1; /* Response */
1049     V9fsState *s = pdu->s;
1050     int ret;
1051 
1052     /*
1053      * The 9p spec requires that successfully cancelled pdus receive no reply.
1054      * Sending a reply would confuse clients because they would
1055      * assume that any EINTR is the actual result of the operation,
1056      * rather than a consequence of the cancellation. However, if
1057      * the operation completed (successfully or with an error other
1058      * than caused be cancellation), we do send out that reply, both
1059      * for efficiency and to avoid confusing the rest of the state machine
1060      * that assumes passing a non-error here will mean a successful
1061      * transmission of the reply.
1062      */
1063     bool discard = pdu->cancelled && len == -EINTR;
1064     if (discard) {
1065         trace_v9fs_rcancel(pdu->tag, pdu->id);
1066         pdu->size = 0;
1067         goto out_notify;
1068     }
1069 
1070     if (len < 0) {
1071         int err = -len;
1072         len = 7;
1073 
1074         if (s->proto_version != V9FS_PROTO_2000L) {
1075             V9fsString str;
1076 
1077             str.data = strerror(err);
1078             str.size = strlen(str.data);
1079 
1080             ret = pdu_marshal(pdu, len, "s", &str);
1081             if (ret < 0) {
1082                 goto out_notify;
1083             }
1084             len += ret;
1085             id = P9_RERROR;
1086         } else {
1087             err = errno_to_dotl(err);
1088         }
1089 
1090         ret = pdu_marshal(pdu, len, "d", err);
1091         if (ret < 0) {
1092             goto out_notify;
1093         }
1094         len += ret;
1095 
1096         if (s->proto_version == V9FS_PROTO_2000L) {
1097             id = P9_RLERROR;
1098         }
1099         trace_v9fs_rerror(pdu->tag, pdu->id, err); /* Trace ERROR */
1100     }
1101 
1102     /* fill out the header */
1103     if (pdu_marshal(pdu, 0, "dbw", (int32_t)len, id, pdu->tag) < 0) {
1104         goto out_notify;
1105     }
1106 
1107     /* keep these in sync */
1108     pdu->size = len;
1109     pdu->id = id;
1110 
1111 out_notify:
1112     pdu->s->transport->push_and_notify(pdu);
1113 
1114     /* Now wakeup anybody waiting in flush for this request */
1115     if (!qemu_co_queue_next(&pdu->complete)) {
1116         pdu_free(pdu);
1117     }
1118 }
1119 
v9mode_to_mode(uint32_t mode,V9fsString * extension)1120 static mode_t v9mode_to_mode(uint32_t mode, V9fsString *extension)
1121 {
1122     mode_t ret;
1123 
1124     ret = mode & 0777;
1125     if (mode & P9_STAT_MODE_DIR) {
1126         ret |= S_IFDIR;
1127     }
1128 
1129     if (mode & P9_STAT_MODE_SYMLINK) {
1130         ret |= S_IFLNK;
1131     }
1132     if (mode & P9_STAT_MODE_SOCKET) {
1133         ret |= S_IFSOCK;
1134     }
1135     if (mode & P9_STAT_MODE_NAMED_PIPE) {
1136         ret |= S_IFIFO;
1137     }
1138     if (mode & P9_STAT_MODE_DEVICE) {
1139         if (extension->size && extension->data[0] == 'c') {
1140             ret |= S_IFCHR;
1141         } else {
1142             ret |= S_IFBLK;
1143         }
1144     }
1145 
1146     if (!(ret & ~0777)) {
1147         ret |= S_IFREG;
1148     }
1149 
1150     if (mode & P9_STAT_MODE_SETUID) {
1151         ret |= S_ISUID;
1152     }
1153     if (mode & P9_STAT_MODE_SETGID) {
1154         ret |= S_ISGID;
1155     }
1156     if (mode & P9_STAT_MODE_SETVTX) {
1157         ret |= S_ISVTX;
1158     }
1159 
1160     return ret;
1161 }
1162 
donttouch_stat(V9fsStat * stat)1163 static int donttouch_stat(V9fsStat *stat)
1164 {
1165     if (stat->type == -1 &&
1166         stat->dev == -1 &&
1167         stat->qid.type == 0xff &&
1168         stat->qid.version == (uint32_t) -1 &&
1169         stat->qid.path == (uint64_t) -1 &&
1170         stat->mode == -1 &&
1171         stat->atime == -1 &&
1172         stat->mtime == -1 &&
1173         stat->length == -1 &&
1174         !stat->name.size &&
1175         !stat->uid.size &&
1176         !stat->gid.size &&
1177         !stat->muid.size &&
1178         stat->n_uid == -1 &&
1179         stat->n_gid == -1 &&
1180         stat->n_muid == -1) {
1181         return 1;
1182     }
1183 
1184     return 0;
1185 }
1186 
v9fs_stat_init(V9fsStat * stat)1187 static void v9fs_stat_init(V9fsStat *stat)
1188 {
1189     v9fs_string_init(&stat->name);
1190     v9fs_string_init(&stat->uid);
1191     v9fs_string_init(&stat->gid);
1192     v9fs_string_init(&stat->muid);
1193     v9fs_string_init(&stat->extension);
1194 }
1195 
v9fs_stat_free(V9fsStat * stat)1196 static void v9fs_stat_free(V9fsStat *stat)
1197 {
1198     v9fs_string_free(&stat->name);
1199     v9fs_string_free(&stat->uid);
1200     v9fs_string_free(&stat->gid);
1201     v9fs_string_free(&stat->muid);
1202     v9fs_string_free(&stat->extension);
1203 }
1204 
stat_to_v9mode(const struct stat * stbuf)1205 static uint32_t stat_to_v9mode(const struct stat *stbuf)
1206 {
1207     uint32_t mode;
1208 
1209     mode = stbuf->st_mode & 0777;
1210     if (S_ISDIR(stbuf->st_mode)) {
1211         mode |= P9_STAT_MODE_DIR;
1212     }
1213 
1214     if (S_ISLNK(stbuf->st_mode)) {
1215         mode |= P9_STAT_MODE_SYMLINK;
1216     }
1217 
1218     if (S_ISSOCK(stbuf->st_mode)) {
1219         mode |= P9_STAT_MODE_SOCKET;
1220     }
1221 
1222     if (S_ISFIFO(stbuf->st_mode)) {
1223         mode |= P9_STAT_MODE_NAMED_PIPE;
1224     }
1225 
1226     if (S_ISBLK(stbuf->st_mode) || S_ISCHR(stbuf->st_mode)) {
1227         mode |= P9_STAT_MODE_DEVICE;
1228     }
1229 
1230     if (stbuf->st_mode & S_ISUID) {
1231         mode |= P9_STAT_MODE_SETUID;
1232     }
1233 
1234     if (stbuf->st_mode & S_ISGID) {
1235         mode |= P9_STAT_MODE_SETGID;
1236     }
1237 
1238     if (stbuf->st_mode & S_ISVTX) {
1239         mode |= P9_STAT_MODE_SETVTX;
1240     }
1241 
1242     return mode;
1243 }
1244 
stat_to_v9stat(V9fsPDU * pdu,V9fsPath * path,const char * basename,const struct stat * stbuf,V9fsStat * v9stat)1245 static int coroutine_fn stat_to_v9stat(V9fsPDU *pdu, V9fsPath *path,
1246                                        const char *basename,
1247                                        const struct stat *stbuf,
1248                                        V9fsStat *v9stat)
1249 {
1250     int err;
1251 
1252     memset(v9stat, 0, sizeof(*v9stat));
1253 
1254     err = stat_to_qid(pdu, stbuf, &v9stat->qid);
1255     if (err < 0) {
1256         return err;
1257     }
1258     v9stat->mode = stat_to_v9mode(stbuf);
1259     v9stat->atime = stbuf->st_atime;
1260     v9stat->mtime = stbuf->st_mtime;
1261     v9stat->length = stbuf->st_size;
1262 
1263     v9fs_string_free(&v9stat->uid);
1264     v9fs_string_free(&v9stat->gid);
1265     v9fs_string_free(&v9stat->muid);
1266 
1267     v9stat->n_uid = stbuf->st_uid;
1268     v9stat->n_gid = stbuf->st_gid;
1269     v9stat->n_muid = 0;
1270 
1271     v9fs_string_free(&v9stat->extension);
1272 
1273     if (v9stat->mode & P9_STAT_MODE_SYMLINK) {
1274         err = v9fs_co_readlink(pdu, path, &v9stat->extension);
1275         if (err < 0) {
1276             return err;
1277         }
1278     } else if (v9stat->mode & P9_STAT_MODE_DEVICE) {
1279         v9fs_string_sprintf(&v9stat->extension, "%c %u %u",
1280                 S_ISCHR(stbuf->st_mode) ? 'c' : 'b',
1281                 major(stbuf->st_rdev), minor(stbuf->st_rdev));
1282     } else if (S_ISDIR(stbuf->st_mode) || S_ISREG(stbuf->st_mode)) {
1283         v9fs_string_sprintf(&v9stat->extension, "%s %lu",
1284                 "HARDLINKCOUNT", (unsigned long)stbuf->st_nlink);
1285     }
1286 
1287     v9fs_string_sprintf(&v9stat->name, "%s", basename);
1288 
1289     v9stat->size = 61 +
1290         v9fs_string_size(&v9stat->name) +
1291         v9fs_string_size(&v9stat->uid) +
1292         v9fs_string_size(&v9stat->gid) +
1293         v9fs_string_size(&v9stat->muid) +
1294         v9fs_string_size(&v9stat->extension);
1295     return 0;
1296 }
1297 
1298 #define P9_STATS_MODE          0x00000001ULL
1299 #define P9_STATS_NLINK         0x00000002ULL
1300 #define P9_STATS_UID           0x00000004ULL
1301 #define P9_STATS_GID           0x00000008ULL
1302 #define P9_STATS_RDEV          0x00000010ULL
1303 #define P9_STATS_ATIME         0x00000020ULL
1304 #define P9_STATS_MTIME         0x00000040ULL
1305 #define P9_STATS_CTIME         0x00000080ULL
1306 #define P9_STATS_INO           0x00000100ULL
1307 #define P9_STATS_SIZE          0x00000200ULL
1308 #define P9_STATS_BLOCKS        0x00000400ULL
1309 
1310 #define P9_STATS_BTIME         0x00000800ULL
1311 #define P9_STATS_GEN           0x00001000ULL
1312 #define P9_STATS_DATA_VERSION  0x00002000ULL
1313 
1314 #define P9_STATS_BASIC         0x000007ffULL /* Mask for fields up to BLOCKS */
1315 #define P9_STATS_ALL           0x00003fffULL /* Mask for All fields above */
1316 
1317 
1318 /**
1319  * blksize_to_iounit() - Block size exposed to 9p client.
1320  * Return: block size
1321  *
1322  * @pdu: 9p client request
1323  * @blksize: host filesystem's block size
1324  *
1325  * Convert host filesystem's block size into an appropriate block size for
1326  * 9p client (guest OS side). The value returned suggests an "optimum" block
1327  * size for 9p I/O, i.e. to maximize performance.
1328  */
blksize_to_iounit(const V9fsPDU * pdu,int32_t blksize)1329 static int32_t blksize_to_iounit(const V9fsPDU *pdu, int32_t blksize)
1330 {
1331     int32_t iounit = 0;
1332     V9fsState *s = pdu->s;
1333 
1334     /*
1335      * iounit should be multiples of blksize (host filesystem block size)
1336      * as well as less than (client msize - P9_IOHDRSZ)
1337      */
1338     if (blksize) {
1339         iounit = QEMU_ALIGN_DOWN(s->msize - P9_IOHDRSZ, blksize);
1340     }
1341     if (!iounit) {
1342         iounit = s->msize - P9_IOHDRSZ;
1343     }
1344     return iounit;
1345 }
1346 
stat_to_iounit(const V9fsPDU * pdu,const struct stat * stbuf)1347 static int32_t stat_to_iounit(const V9fsPDU *pdu, const struct stat *stbuf)
1348 {
1349     return blksize_to_iounit(pdu, stbuf->st_blksize);
1350 }
1351 
stat_to_v9stat_dotl(V9fsPDU * pdu,const struct stat * stbuf,V9fsStatDotl * v9lstat)1352 static int stat_to_v9stat_dotl(V9fsPDU *pdu, const struct stat *stbuf,
1353                                 V9fsStatDotl *v9lstat)
1354 {
1355     memset(v9lstat, 0, sizeof(*v9lstat));
1356 
1357     v9lstat->st_mode = stbuf->st_mode;
1358     v9lstat->st_nlink = stbuf->st_nlink;
1359     v9lstat->st_uid = stbuf->st_uid;
1360     v9lstat->st_gid = stbuf->st_gid;
1361     v9lstat->st_rdev = host_dev_to_dotl_dev(stbuf->st_rdev);
1362     v9lstat->st_size = stbuf->st_size;
1363     v9lstat->st_blksize = stat_to_iounit(pdu, stbuf);
1364     v9lstat->st_blocks = stbuf->st_blocks;
1365     v9lstat->st_atime_sec = stbuf->st_atime;
1366     v9lstat->st_mtime_sec = stbuf->st_mtime;
1367     v9lstat->st_ctime_sec = stbuf->st_ctime;
1368 #ifdef CONFIG_DARWIN
1369     v9lstat->st_atime_nsec = stbuf->st_atimespec.tv_nsec;
1370     v9lstat->st_mtime_nsec = stbuf->st_mtimespec.tv_nsec;
1371     v9lstat->st_ctime_nsec = stbuf->st_ctimespec.tv_nsec;
1372 #else
1373     v9lstat->st_atime_nsec = stbuf->st_atim.tv_nsec;
1374     v9lstat->st_mtime_nsec = stbuf->st_mtim.tv_nsec;
1375     v9lstat->st_ctime_nsec = stbuf->st_ctim.tv_nsec;
1376 #endif
1377     /* Currently we only support BASIC fields in stat */
1378     v9lstat->st_result_mask = P9_STATS_BASIC;
1379 
1380     return stat_to_qid(pdu, stbuf, &v9lstat->qid);
1381 }
1382 
print_sg(struct iovec * sg,int cnt)1383 static void print_sg(struct iovec *sg, int cnt)
1384 {
1385     int i;
1386 
1387     printf("sg[%d]: {", cnt);
1388     for (i = 0; i < cnt; i++) {
1389         if (i) {
1390             printf(", ");
1391         }
1392         printf("(%p, %zd)", sg[i].iov_base, sg[i].iov_len);
1393     }
1394     printf("}\n");
1395 }
1396 
1397 /* Will call this only for path name based fid */
v9fs_fix_path(V9fsPath * dst,V9fsPath * src,int len)1398 static void v9fs_fix_path(V9fsPath *dst, V9fsPath *src, int len)
1399 {
1400     V9fsPath str;
1401     v9fs_path_init(&str);
1402     v9fs_path_copy(&str, dst);
1403     v9fs_path_sprintf(dst, "%s%s", src->data, str.data + len);
1404     v9fs_path_free(&str);
1405 }
1406 
is_ro_export(FsContext * ctx)1407 static inline bool is_ro_export(FsContext *ctx)
1408 {
1409     return ctx->export_flags & V9FS_RDONLY;
1410 }
1411 
v9fs_version(void * opaque)1412 static void coroutine_fn v9fs_version(void *opaque)
1413 {
1414     ssize_t err;
1415     V9fsPDU *pdu = opaque;
1416     V9fsState *s = pdu->s;
1417     V9fsString version;
1418     size_t offset = 7;
1419 
1420     v9fs_string_init(&version);
1421     err = pdu_unmarshal(pdu, offset, "ds", &s->msize, &version);
1422     if (err < 0) {
1423         goto out;
1424     }
1425     trace_v9fs_version(pdu->tag, pdu->id, s->msize, version.data);
1426 
1427     virtfs_reset(pdu);
1428 
1429     if (!strcmp(version.data, "9P2000.u")) {
1430         s->proto_version = V9FS_PROTO_2000U;
1431     } else if (!strcmp(version.data, "9P2000.L")) {
1432         s->proto_version = V9FS_PROTO_2000L;
1433     } else {
1434         v9fs_string_sprintf(&version, "unknown");
1435         /* skip min. msize check, reporting invalid version has priority */
1436         goto marshal;
1437     }
1438 
1439     if (s->msize < P9_MIN_MSIZE) {
1440         err = -EMSGSIZE;
1441         error_report(
1442             "9pfs: Client requested msize < minimum msize ("
1443             stringify(P9_MIN_MSIZE) ") supported by this server."
1444         );
1445         goto out;
1446     }
1447 
1448     /* 8192 is the default msize of Linux clients */
1449     if (s->msize <= 8192 && !(s->ctx.export_flags & V9FS_NO_PERF_WARN)) {
1450         warn_report_once(
1451             "9p: degraded performance: a reasonable high msize should be "
1452             "chosen on client/guest side (chosen msize is <= 8192). See "
1453             "https://wiki.qemu.org/Documentation/9psetup#msize for details."
1454         );
1455     }
1456 
1457 marshal:
1458     err = pdu_marshal(pdu, offset, "ds", s->msize, &version);
1459     if (err < 0) {
1460         goto out;
1461     }
1462     err += offset;
1463     trace_v9fs_version_return(pdu->tag, pdu->id, s->msize, version.data);
1464 out:
1465     pdu_complete(pdu, err);
1466     v9fs_string_free(&version);
1467 }
1468 
v9fs_attach(void * opaque)1469 static void coroutine_fn v9fs_attach(void *opaque)
1470 {
1471     V9fsPDU *pdu = opaque;
1472     V9fsState *s = pdu->s;
1473     int32_t fid, afid, n_uname;
1474     V9fsString uname, aname;
1475     V9fsFidState *fidp;
1476     size_t offset = 7;
1477     V9fsQID qid;
1478     ssize_t err;
1479     struct stat stbuf;
1480 
1481     v9fs_string_init(&uname);
1482     v9fs_string_init(&aname);
1483     err = pdu_unmarshal(pdu, offset, "ddssd", &fid,
1484                         &afid, &uname, &aname, &n_uname);
1485     if (err < 0) {
1486         goto out_nofid;
1487     }
1488     trace_v9fs_attach(pdu->tag, pdu->id, fid, afid, uname.data, aname.data);
1489 
1490     fidp = alloc_fid(s, fid);
1491     if (fidp == NULL) {
1492         err = -EINVAL;
1493         goto out_nofid;
1494     }
1495     fidp->uid = n_uname;
1496     err = v9fs_co_name_to_path(pdu, NULL, "/", &fidp->path);
1497     if (err < 0) {
1498         err = -EINVAL;
1499         clunk_fid(s, fid);
1500         goto out;
1501     }
1502     err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1503     if (err < 0) {
1504         err = -EINVAL;
1505         clunk_fid(s, fid);
1506         goto out;
1507     }
1508     err = stat_to_qid(pdu, &stbuf, &qid);
1509     if (err < 0) {
1510         err = -EINVAL;
1511         clunk_fid(s, fid);
1512         goto out;
1513     }
1514 
1515     /*
1516      * disable migration if we haven't done already.
1517      * attach could get called multiple times for the same export.
1518      */
1519     if (!s->migration_blocker) {
1520         error_setg(&s->migration_blocker,
1521                    "Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'",
1522                    s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag);
1523         err = migrate_add_blocker(&s->migration_blocker, NULL);
1524         if (err < 0) {
1525             clunk_fid(s, fid);
1526             goto out;
1527         }
1528         s->root_fid = fid;
1529     }
1530 
1531     err = pdu_marshal(pdu, offset, "Q", &qid);
1532     if (err < 0) {
1533         clunk_fid(s, fid);
1534         goto out;
1535     }
1536     err += offset;
1537 
1538     memcpy(&s->root_st, &stbuf, sizeof(stbuf));
1539     trace_v9fs_attach_return(pdu->tag, pdu->id,
1540                              qid.type, qid.version, qid.path);
1541 out:
1542     put_fid(pdu, fidp);
1543 out_nofid:
1544     pdu_complete(pdu, err);
1545     v9fs_string_free(&uname);
1546     v9fs_string_free(&aname);
1547 }
1548 
v9fs_stat(void * opaque)1549 static void coroutine_fn v9fs_stat(void *opaque)
1550 {
1551     int32_t fid;
1552     V9fsStat v9stat;
1553     ssize_t err = 0;
1554     size_t offset = 7;
1555     struct stat stbuf;
1556     V9fsFidState *fidp;
1557     V9fsPDU *pdu = opaque;
1558     char *basename;
1559 
1560     err = pdu_unmarshal(pdu, offset, "d", &fid);
1561     if (err < 0) {
1562         goto out_nofid;
1563     }
1564     trace_v9fs_stat(pdu->tag, pdu->id, fid);
1565 
1566     fidp = get_fid(pdu, fid);
1567     if (fidp == NULL) {
1568         err = -ENOENT;
1569         goto out_nofid;
1570     }
1571     err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1572     if (err < 0) {
1573         goto out;
1574     }
1575     basename = g_path_get_basename(fidp->path.data);
1576     err = stat_to_v9stat(pdu, &fidp->path, basename, &stbuf, &v9stat);
1577     g_free(basename);
1578     if (err < 0) {
1579         goto out;
1580     }
1581     err = pdu_marshal(pdu, offset, "wS", 0, &v9stat);
1582     if (err < 0) {
1583         v9fs_stat_free(&v9stat);
1584         goto out;
1585     }
1586     trace_v9fs_stat_return(pdu->tag, pdu->id, v9stat.mode,
1587                            v9stat.atime, v9stat.mtime, v9stat.length);
1588     err += offset;
1589     v9fs_stat_free(&v9stat);
1590 out:
1591     put_fid(pdu, fidp);
1592 out_nofid:
1593     pdu_complete(pdu, err);
1594 }
1595 
v9fs_getattr(void * opaque)1596 static void coroutine_fn v9fs_getattr(void *opaque)
1597 {
1598     int32_t fid;
1599     size_t offset = 7;
1600     ssize_t retval = 0;
1601     struct stat stbuf;
1602     V9fsFidState *fidp;
1603     uint64_t request_mask;
1604     V9fsStatDotl v9stat_dotl;
1605     V9fsPDU *pdu = opaque;
1606 
1607     retval = pdu_unmarshal(pdu, offset, "dq", &fid, &request_mask);
1608     if (retval < 0) {
1609         goto out_nofid;
1610     }
1611     trace_v9fs_getattr(pdu->tag, pdu->id, fid, request_mask);
1612 
1613     fidp = get_fid(pdu, fid);
1614     if (fidp == NULL) {
1615         retval = -ENOENT;
1616         goto out_nofid;
1617     }
1618     if ((fidp->fid_type == P9_FID_FILE && fidp->fs.fd != -1) ||
1619         (fidp->fid_type == P9_FID_DIR && fidp->fs.dir.stream))
1620     {
1621         retval = v9fs_co_fstat(pdu, fidp, &stbuf);
1622     } else {
1623         retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1624     }
1625     if (retval < 0) {
1626         goto out;
1627     }
1628     retval = stat_to_v9stat_dotl(pdu, &stbuf, &v9stat_dotl);
1629     if (retval < 0) {
1630         goto out;
1631     }
1632 
1633     /*  fill st_gen if requested and supported by underlying fs */
1634     if (request_mask & P9_STATS_GEN) {
1635         retval = v9fs_co_st_gen(pdu, &fidp->path, stbuf.st_mode, &v9stat_dotl);
1636         switch (retval) {
1637         case 0:
1638             /* we have valid st_gen: update result mask */
1639             v9stat_dotl.st_result_mask |= P9_STATS_GEN;
1640             break;
1641         case -EINTR:
1642             /* request cancelled, e.g. by Tflush */
1643             goto out;
1644         default:
1645             /* failed to get st_gen: not fatal, ignore */
1646             break;
1647         }
1648     }
1649     retval = pdu_marshal(pdu, offset, "A", &v9stat_dotl);
1650     if (retval < 0) {
1651         goto out;
1652     }
1653     retval += offset;
1654     trace_v9fs_getattr_return(pdu->tag, pdu->id, v9stat_dotl.st_result_mask,
1655                               v9stat_dotl.st_mode, v9stat_dotl.st_uid,
1656                               v9stat_dotl.st_gid);
1657 out:
1658     put_fid(pdu, fidp);
1659 out_nofid:
1660     pdu_complete(pdu, retval);
1661 }
1662 
1663 /* Attribute flags */
1664 #define P9_ATTR_MODE       (1 << 0)
1665 #define P9_ATTR_UID        (1 << 1)
1666 #define P9_ATTR_GID        (1 << 2)
1667 #define P9_ATTR_SIZE       (1 << 3)
1668 #define P9_ATTR_ATIME      (1 << 4)
1669 #define P9_ATTR_MTIME      (1 << 5)
1670 #define P9_ATTR_CTIME      (1 << 6)
1671 #define P9_ATTR_ATIME_SET  (1 << 7)
1672 #define P9_ATTR_MTIME_SET  (1 << 8)
1673 
1674 #define P9_ATTR_MASK    127
1675 
v9fs_setattr(void * opaque)1676 static void coroutine_fn v9fs_setattr(void *opaque)
1677 {
1678     int err = 0;
1679     int32_t fid;
1680     V9fsFidState *fidp;
1681     size_t offset = 7;
1682     V9fsIattr v9iattr;
1683     V9fsPDU *pdu = opaque;
1684 
1685     err = pdu_unmarshal(pdu, offset, "dI", &fid, &v9iattr);
1686     if (err < 0) {
1687         goto out_nofid;
1688     }
1689 
1690     trace_v9fs_setattr(pdu->tag, pdu->id, fid,
1691                        v9iattr.valid, v9iattr.mode, v9iattr.uid, v9iattr.gid,
1692                        v9iattr.size, v9iattr.atime_sec, v9iattr.mtime_sec);
1693 
1694     fidp = get_fid(pdu, fid);
1695     if (fidp == NULL) {
1696         err = -EINVAL;
1697         goto out_nofid;
1698     }
1699     if (v9iattr.valid & P9_ATTR_MODE) {
1700         err = v9fs_co_chmod(pdu, &fidp->path, v9iattr.mode);
1701         if (err < 0) {
1702             goto out;
1703         }
1704     }
1705     if (v9iattr.valid & (P9_ATTR_ATIME | P9_ATTR_MTIME)) {
1706         struct timespec times[2];
1707         if (v9iattr.valid & P9_ATTR_ATIME) {
1708             if (v9iattr.valid & P9_ATTR_ATIME_SET) {
1709                 times[0].tv_sec = v9iattr.atime_sec;
1710                 times[0].tv_nsec = v9iattr.atime_nsec;
1711             } else {
1712                 times[0].tv_nsec = UTIME_NOW;
1713             }
1714         } else {
1715             times[0].tv_nsec = UTIME_OMIT;
1716         }
1717         if (v9iattr.valid & P9_ATTR_MTIME) {
1718             if (v9iattr.valid & P9_ATTR_MTIME_SET) {
1719                 times[1].tv_sec = v9iattr.mtime_sec;
1720                 times[1].tv_nsec = v9iattr.mtime_nsec;
1721             } else {
1722                 times[1].tv_nsec = UTIME_NOW;
1723             }
1724         } else {
1725             times[1].tv_nsec = UTIME_OMIT;
1726         }
1727         err = v9fs_co_utimensat(pdu, &fidp->path, times);
1728         if (err < 0) {
1729             goto out;
1730         }
1731     }
1732     /*
1733      * If the only valid entry in iattr is ctime we can call
1734      * chown(-1,-1) to update the ctime of the file
1735      */
1736     if ((v9iattr.valid & (P9_ATTR_UID | P9_ATTR_GID)) ||
1737         ((v9iattr.valid & P9_ATTR_CTIME)
1738          && !((v9iattr.valid & P9_ATTR_MASK) & ~P9_ATTR_CTIME))) {
1739         if (!(v9iattr.valid & P9_ATTR_UID)) {
1740             v9iattr.uid = -1;
1741         }
1742         if (!(v9iattr.valid & P9_ATTR_GID)) {
1743             v9iattr.gid = -1;
1744         }
1745         err = v9fs_co_chown(pdu, &fidp->path, v9iattr.uid,
1746                             v9iattr.gid);
1747         if (err < 0) {
1748             goto out;
1749         }
1750     }
1751     if (v9iattr.valid & (P9_ATTR_SIZE)) {
1752         err = v9fs_co_truncate(pdu, &fidp->path, v9iattr.size);
1753         if (err < 0) {
1754             goto out;
1755         }
1756     }
1757     err = offset;
1758     trace_v9fs_setattr_return(pdu->tag, pdu->id);
1759 out:
1760     put_fid(pdu, fidp);
1761 out_nofid:
1762     pdu_complete(pdu, err);
1763 }
1764 
v9fs_walk_marshal(V9fsPDU * pdu,uint16_t nwnames,V9fsQID * qids)1765 static int v9fs_walk_marshal(V9fsPDU *pdu, uint16_t nwnames, V9fsQID *qids)
1766 {
1767     int i;
1768     ssize_t err;
1769     size_t offset = 7;
1770 
1771     err = pdu_marshal(pdu, offset, "w", nwnames);
1772     if (err < 0) {
1773         return err;
1774     }
1775     offset += err;
1776     for (i = 0; i < nwnames; i++) {
1777         err = pdu_marshal(pdu, offset, "Q", &qids[i]);
1778         if (err < 0) {
1779             return err;
1780         }
1781         offset += err;
1782     }
1783     return offset;
1784 }
1785 
name_is_illegal(const char * name)1786 static bool name_is_illegal(const char *name)
1787 {
1788     return !*name || strchr(name, '/') != NULL;
1789 }
1790 
same_stat_id(const struct stat * a,const struct stat * b)1791 static bool same_stat_id(const struct stat *a, const struct stat *b)
1792 {
1793     return a->st_dev == b->st_dev && a->st_ino == b->st_ino;
1794 }
1795 
1796 /*
1797  * Returns a (newly allocated) comma-separated string presentation of the
1798  * passed array for logging (tracing) purpose for trace event "v9fs_walk".
1799  *
1800  * It is caller's responsibility to free the returned string.
1801  */
trace_v9fs_walk_wnames(V9fsString * wnames,size_t nwnames)1802 static char *trace_v9fs_walk_wnames(V9fsString *wnames, size_t nwnames)
1803 {
1804     g_autofree char **arr = g_malloc0_n(nwnames + 1, sizeof(char *));
1805     for (size_t i = 0; i < nwnames; ++i) {
1806         arr[i] = wnames[i].data;
1807     }
1808     return g_strjoinv(", ", arr);
1809 }
1810 
v9fs_walk(void * opaque)1811 static void coroutine_fn v9fs_walk(void *opaque)
1812 {
1813     int name_idx, nwalked;
1814     g_autofree V9fsQID *qids = NULL;
1815     int i, err = 0, any_err = 0;
1816     V9fsPath dpath, path;
1817     P9ARRAY_REF(V9fsPath) pathes = NULL;
1818     uint16_t nwnames;
1819     struct stat stbuf, fidst;
1820     g_autofree struct stat *stbufs = NULL;
1821     size_t offset = 7;
1822     int32_t fid, newfid;
1823     P9ARRAY_REF(V9fsString) wnames = NULL;
1824     g_autofree char *trace_wnames = NULL;
1825     V9fsFidState *fidp;
1826     V9fsFidState *newfidp = NULL;
1827     V9fsPDU *pdu = opaque;
1828     V9fsState *s = pdu->s;
1829     V9fsQID qid;
1830 
1831     err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames);
1832     if (err < 0) {
1833         pdu_complete(pdu, err);
1834         return;
1835     }
1836     offset += err;
1837 
1838     if (nwnames > P9_MAXWELEM) {
1839         err = -EINVAL;
1840         goto out_nofid_nownames;
1841     }
1842     if (nwnames) {
1843         P9ARRAY_NEW(V9fsString, wnames, nwnames);
1844         qids   = g_new0(V9fsQID, nwnames);
1845         stbufs = g_new0(struct stat, nwnames);
1846         P9ARRAY_NEW(V9fsPath, pathes, nwnames);
1847         for (i = 0; i < nwnames; i++) {
1848             err = pdu_unmarshal(pdu, offset, "s", &wnames[i]);
1849             if (err < 0) {
1850                 goto out_nofid_nownames;
1851             }
1852             if (name_is_illegal(wnames[i].data)) {
1853                 err = -ENOENT;
1854                 goto out_nofid_nownames;
1855             }
1856             offset += err;
1857         }
1858         if (trace_event_get_state_backends(TRACE_V9FS_WALK)) {
1859             trace_wnames = trace_v9fs_walk_wnames(wnames, nwnames);
1860             trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames,
1861                             trace_wnames);
1862         }
1863     } else {
1864         trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames, "");
1865     }
1866 
1867     fidp = get_fid(pdu, fid);
1868     if (fidp == NULL) {
1869         err = -ENOENT;
1870         goto out_nofid;
1871     }
1872 
1873     v9fs_path_init(&dpath);
1874     v9fs_path_init(&path);
1875     /*
1876      * Both dpath and path initially point to fidp.
1877      * Needed to handle request with nwnames == 0
1878      */
1879     v9fs_path_copy(&dpath, &fidp->path);
1880     v9fs_path_copy(&path, &fidp->path);
1881 
1882     /*
1883      * To keep latency (i.e. overall execution time for processing this
1884      * Twalk client request) as small as possible, run all the required fs
1885      * driver code altogether inside the following block.
1886      */
1887     v9fs_co_run_in_worker({
1888         nwalked = 0;
1889         if (v9fs_request_cancelled(pdu)) {
1890             any_err |= err = -EINTR;
1891             break;
1892         }
1893         err = s->ops->lstat(&s->ctx, &dpath, &fidst);
1894         if (err < 0) {
1895             any_err |= err = -errno;
1896             break;
1897         }
1898         stbuf = fidst;
1899         for (; nwalked < nwnames; nwalked++) {
1900             if (v9fs_request_cancelled(pdu)) {
1901                 any_err |= err = -EINTR;
1902                 break;
1903             }
1904             if (!same_stat_id(&pdu->s->root_st, &stbuf) ||
1905                 strcmp("..", wnames[nwalked].data))
1906             {
1907                 err = s->ops->name_to_path(&s->ctx, &dpath,
1908                                            wnames[nwalked].data,
1909                                            &pathes[nwalked]);
1910                 if (err < 0) {
1911                     any_err |= err = -errno;
1912                     break;
1913                 }
1914                 if (v9fs_request_cancelled(pdu)) {
1915                     any_err |= err = -EINTR;
1916                     break;
1917                 }
1918                 err = s->ops->lstat(&s->ctx, &pathes[nwalked], &stbuf);
1919                 if (err < 0) {
1920                     any_err |= err = -errno;
1921                     break;
1922                 }
1923                 stbufs[nwalked] = stbuf;
1924                 v9fs_path_copy(&dpath, &pathes[nwalked]);
1925             }
1926         }
1927     });
1928     /*
1929      * Handle all the rest of this Twalk request on main thread ...
1930      *
1931      * NOTE: -EINTR is an exception where we deviate from the protocol spec
1932      * and simply send a (R)Lerror response instead of bothering to assemble
1933      * a (deducted) Rwalk response; because -EINTR is always the result of a
1934      * Tflush request, so client would no longer wait for a response in this
1935      * case anyway.
1936      */
1937     if ((err < 0 && !nwalked) || err == -EINTR) {
1938         goto out;
1939     }
1940 
1941     any_err |= err = stat_to_qid(pdu, &fidst, &qid);
1942     if (err < 0 && !nwalked) {
1943         goto out;
1944     }
1945     stbuf = fidst;
1946 
1947     /* reset dpath and path */
1948     v9fs_path_copy(&dpath, &fidp->path);
1949     v9fs_path_copy(&path, &fidp->path);
1950 
1951     for (name_idx = 0; name_idx < nwalked; name_idx++) {
1952         if (!same_stat_id(&pdu->s->root_st, &stbuf) ||
1953             strcmp("..", wnames[name_idx].data))
1954         {
1955             stbuf = stbufs[name_idx];
1956             any_err |= err = stat_to_qid(pdu, &stbuf, &qid);
1957             if (err < 0) {
1958                 break;
1959             }
1960             v9fs_path_copy(&path, &pathes[name_idx]);
1961             v9fs_path_copy(&dpath, &path);
1962         }
1963         memcpy(&qids[name_idx], &qid, sizeof(qid));
1964     }
1965     if (any_err < 0) {
1966         if (!name_idx) {
1967             /* don't send any QIDs, send Rlerror instead */
1968             goto out;
1969         } else {
1970             /* send QIDs (not Rlerror), but fid MUST remain unaffected */
1971             goto send_qids;
1972         }
1973     }
1974     if (fid == newfid) {
1975         if (fidp->fid_type != P9_FID_NONE) {
1976             err = -EINVAL;
1977             goto out;
1978         }
1979         v9fs_path_write_lock(s);
1980         v9fs_path_copy(&fidp->path, &path);
1981         v9fs_path_unlock(s);
1982     } else {
1983         newfidp = alloc_fid(s, newfid);
1984         if (newfidp == NULL) {
1985             err = -EINVAL;
1986             goto out;
1987         }
1988         newfidp->uid = fidp->uid;
1989         v9fs_path_copy(&newfidp->path, &path);
1990     }
1991 send_qids:
1992     err = v9fs_walk_marshal(pdu, name_idx, qids);
1993     trace_v9fs_walk_return(pdu->tag, pdu->id, name_idx, qids);
1994 out:
1995     put_fid(pdu, fidp);
1996     if (newfidp) {
1997         put_fid(pdu, newfidp);
1998     }
1999     v9fs_path_free(&dpath);
2000     v9fs_path_free(&path);
2001     goto out_pdu_complete;
2002 out_nofid_nownames:
2003     trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames, "<?>");
2004 out_nofid:
2005 out_pdu_complete:
2006     pdu_complete(pdu, err);
2007 }
2008 
get_iounit(V9fsPDU * pdu,V9fsPath * path)2009 static int32_t coroutine_fn get_iounit(V9fsPDU *pdu, V9fsPath *path)
2010 {
2011     struct statfs stbuf;
2012     int err = v9fs_co_statfs(pdu, path, &stbuf);
2013 
2014     return blksize_to_iounit(pdu, (err >= 0) ? stbuf.f_bsize : 0);
2015 }
2016 
v9fs_open(void * opaque)2017 static void coroutine_fn v9fs_open(void *opaque)
2018 {
2019     int flags;
2020     int32_t fid;
2021     int32_t mode;
2022     V9fsQID qid;
2023     int iounit = 0;
2024     ssize_t err = 0;
2025     size_t offset = 7;
2026     struct stat stbuf;
2027     V9fsFidState *fidp;
2028     V9fsPDU *pdu = opaque;
2029     V9fsState *s = pdu->s;
2030     g_autofree char *trace_oflags = NULL;
2031 
2032     if (s->proto_version == V9FS_PROTO_2000L) {
2033         err = pdu_unmarshal(pdu, offset, "dd", &fid, &mode);
2034     } else {
2035         uint8_t modebyte;
2036         err = pdu_unmarshal(pdu, offset, "db", &fid, &modebyte);
2037         mode = modebyte;
2038     }
2039     if (err < 0) {
2040         goto out_nofid;
2041     }
2042     if (trace_event_get_state_backends(TRACE_V9FS_OPEN)) {
2043         trace_oflags = qemu_open_flags_tostr(
2044             (s->proto_version == V9FS_PROTO_2000L) ?
2045                 dotl_to_open_flags(mode) : omode_to_uflags(mode)
2046         );
2047         trace_v9fs_open(pdu->tag, pdu->id, fid, mode, trace_oflags);
2048     }
2049 
2050     fidp = get_fid(pdu, fid);
2051     if (fidp == NULL) {
2052         err = -ENOENT;
2053         goto out_nofid;
2054     }
2055     if (fidp->fid_type != P9_FID_NONE) {
2056         err = -EINVAL;
2057         goto out;
2058     }
2059 
2060     err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
2061     if (err < 0) {
2062         goto out;
2063     }
2064     err = stat_to_qid(pdu, &stbuf, &qid);
2065     if (err < 0) {
2066         goto out;
2067     }
2068     if (S_ISDIR(stbuf.st_mode)) {
2069         err = v9fs_co_opendir(pdu, fidp);
2070         if (err < 0) {
2071             goto out;
2072         }
2073         fidp->fid_type = P9_FID_DIR;
2074         err = pdu_marshal(pdu, offset, "Qd", &qid, 0);
2075         if (err < 0) {
2076             goto out;
2077         }
2078         err += offset;
2079     } else {
2080         if (s->proto_version == V9FS_PROTO_2000L) {
2081             flags = get_dotl_openflags(s, mode);
2082         } else {
2083             flags = omode_to_uflags(mode);
2084         }
2085         if (is_ro_export(&s->ctx)) {
2086             if (mode & O_WRONLY || mode & O_RDWR ||
2087                 mode & O_APPEND || mode & O_TRUNC) {
2088                 err = -EROFS;
2089                 goto out;
2090             }
2091         }
2092         err = v9fs_co_open(pdu, fidp, flags);
2093         if (err < 0) {
2094             goto out;
2095         }
2096         fidp->fid_type = P9_FID_FILE;
2097         fidp->open_flags = flags;
2098         if (flags & O_EXCL) {
2099             /*
2100              * We let the host file system do O_EXCL check
2101              * We should not reclaim such fd
2102              */
2103             fidp->flags |= FID_NON_RECLAIMABLE;
2104         }
2105         iounit = get_iounit(pdu, &fidp->path);
2106         err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2107         if (err < 0) {
2108             goto out;
2109         }
2110         err += offset;
2111     }
2112     trace_v9fs_open_return(pdu->tag, pdu->id,
2113                            qid.type, qid.version, qid.path, iounit);
2114 out:
2115     put_fid(pdu, fidp);
2116 out_nofid:
2117     pdu_complete(pdu, err);
2118 }
2119 
v9fs_lcreate(void * opaque)2120 static void coroutine_fn v9fs_lcreate(void *opaque)
2121 {
2122     int32_t dfid, flags, mode;
2123     gid_t gid;
2124     ssize_t err = 0;
2125     ssize_t offset = 7;
2126     V9fsString name;
2127     V9fsFidState *fidp;
2128     struct stat stbuf;
2129     V9fsQID qid;
2130     int32_t iounit;
2131     V9fsPDU *pdu = opaque;
2132 
2133     v9fs_string_init(&name);
2134     err = pdu_unmarshal(pdu, offset, "dsddd", &dfid,
2135                         &name, &flags, &mode, &gid);
2136     if (err < 0) {
2137         goto out_nofid;
2138     }
2139     trace_v9fs_lcreate(pdu->tag, pdu->id, dfid, flags, mode, gid);
2140 
2141     if (name_is_illegal(name.data)) {
2142         err = -ENOENT;
2143         goto out_nofid;
2144     }
2145 
2146     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
2147         err = -EEXIST;
2148         goto out_nofid;
2149     }
2150 
2151     fidp = get_fid(pdu, dfid);
2152     if (fidp == NULL) {
2153         err = -ENOENT;
2154         goto out_nofid;
2155     }
2156     if (fidp->fid_type != P9_FID_NONE) {
2157         err = -EINVAL;
2158         goto out;
2159     }
2160 
2161     flags = get_dotl_openflags(pdu->s, flags);
2162     err = v9fs_co_open2(pdu, fidp, &name, gid,
2163                         flags | O_CREAT, mode, &stbuf);
2164     if (err < 0) {
2165         goto out;
2166     }
2167     fidp->fid_type = P9_FID_FILE;
2168     fidp->open_flags = flags;
2169     if (flags & O_EXCL) {
2170         /*
2171          * We let the host file system do O_EXCL check
2172          * We should not reclaim such fd
2173          */
2174         fidp->flags |= FID_NON_RECLAIMABLE;
2175     }
2176     iounit =  get_iounit(pdu, &fidp->path);
2177     err = stat_to_qid(pdu, &stbuf, &qid);
2178     if (err < 0) {
2179         goto out;
2180     }
2181     err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2182     if (err < 0) {
2183         goto out;
2184     }
2185     err += offset;
2186     trace_v9fs_lcreate_return(pdu->tag, pdu->id,
2187                               qid.type, qid.version, qid.path, iounit);
2188 out:
2189     put_fid(pdu, fidp);
2190 out_nofid:
2191     pdu_complete(pdu, err);
2192     v9fs_string_free(&name);
2193 }
2194 
v9fs_fsync(void * opaque)2195 static void coroutine_fn v9fs_fsync(void *opaque)
2196 {
2197     int err;
2198     int32_t fid;
2199     int datasync;
2200     size_t offset = 7;
2201     V9fsFidState *fidp;
2202     V9fsPDU *pdu = opaque;
2203 
2204     err = pdu_unmarshal(pdu, offset, "dd", &fid, &datasync);
2205     if (err < 0) {
2206         goto out_nofid;
2207     }
2208     trace_v9fs_fsync(pdu->tag, pdu->id, fid, datasync);
2209 
2210     fidp = get_fid(pdu, fid);
2211     if (fidp == NULL) {
2212         err = -ENOENT;
2213         goto out_nofid;
2214     }
2215     err = v9fs_co_fsync(pdu, fidp, datasync);
2216     if (!err) {
2217         err = offset;
2218     }
2219     put_fid(pdu, fidp);
2220 out_nofid:
2221     pdu_complete(pdu, err);
2222 }
2223 
v9fs_clunk(void * opaque)2224 static void coroutine_fn v9fs_clunk(void *opaque)
2225 {
2226     int err;
2227     int32_t fid;
2228     size_t offset = 7;
2229     V9fsFidState *fidp;
2230     V9fsPDU *pdu = opaque;
2231     V9fsState *s = pdu->s;
2232 
2233     err = pdu_unmarshal(pdu, offset, "d", &fid);
2234     if (err < 0) {
2235         goto out_nofid;
2236     }
2237     trace_v9fs_clunk(pdu->tag, pdu->id, fid);
2238 
2239     fidp = clunk_fid(s, fid);
2240     if (fidp == NULL) {
2241         err = -ENOENT;
2242         goto out_nofid;
2243     }
2244     /*
2245      * Bump the ref so that put_fid will
2246      * free the fid.
2247      */
2248     fidp->ref++;
2249     err = put_fid(pdu, fidp);
2250     if (!err) {
2251         err = offset;
2252     }
2253 out_nofid:
2254     pdu_complete(pdu, err);
2255 }
2256 
2257 /*
2258  * Create a QEMUIOVector for a sub-region of PDU iovecs
2259  *
2260  * @qiov:       uninitialized QEMUIOVector
2261  * @skip:       number of bytes to skip from beginning of PDU
2262  * @size:       number of bytes to include
2263  * @is_write:   true - write, false - read
2264  *
2265  * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
2266  * with qemu_iovec_destroy().
2267  */
v9fs_init_qiov_from_pdu(QEMUIOVector * qiov,V9fsPDU * pdu,size_t skip,size_t size,bool is_write)2268 static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
2269                                     size_t skip, size_t size,
2270                                     bool is_write)
2271 {
2272     QEMUIOVector elem;
2273     struct iovec *iov;
2274     unsigned int niov;
2275 
2276     if (is_write) {
2277         pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + skip);
2278     } else {
2279         pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip);
2280     }
2281 
2282     qemu_iovec_init_external(&elem, iov, niov);
2283     qemu_iovec_init(qiov, niov);
2284     qemu_iovec_concat(qiov, &elem, skip, size);
2285 }
2286 
v9fs_xattr_read(V9fsState * s,V9fsPDU * pdu,V9fsFidState * fidp,uint64_t off,uint32_t max_count)2287 static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
2288                            uint64_t off, uint32_t max_count)
2289 {
2290     ssize_t err;
2291     size_t offset = 7;
2292     uint64_t read_count;
2293     QEMUIOVector qiov_full;
2294 
2295     if (fidp->fs.xattr.len < off) {
2296         read_count = 0;
2297     } else {
2298         read_count = fidp->fs.xattr.len - off;
2299     }
2300     if (read_count > max_count) {
2301         read_count = max_count;
2302     }
2303     err = pdu_marshal(pdu, offset, "d", read_count);
2304     if (err < 0) {
2305         return err;
2306     }
2307     offset += err;
2308 
2309     v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
2310     err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
2311                     ((char *)fidp->fs.xattr.value) + off,
2312                     read_count);
2313     qemu_iovec_destroy(&qiov_full);
2314     if (err < 0) {
2315         return err;
2316     }
2317     offset += err;
2318     return offset;
2319 }
2320 
v9fs_do_readdir_with_stat(V9fsPDU * pdu,V9fsFidState * fidp,uint32_t max_count)2321 static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu,
2322                                                   V9fsFidState *fidp,
2323                                                   uint32_t max_count)
2324 {
2325     V9fsPath path;
2326     V9fsStat v9stat;
2327     int len, err = 0;
2328     int32_t count = 0;
2329     struct stat stbuf;
2330     off_t saved_dir_pos;
2331     struct dirent *dent;
2332 
2333     /* save the directory position */
2334     saved_dir_pos = v9fs_co_telldir(pdu, fidp);
2335     if (saved_dir_pos < 0) {
2336         return saved_dir_pos;
2337     }
2338 
2339     while (1) {
2340         v9fs_path_init(&path);
2341 
2342         v9fs_readdir_lock(&fidp->fs.dir);
2343 
2344         err = v9fs_co_readdir(pdu, fidp, &dent);
2345         if (err || !dent) {
2346             break;
2347         }
2348         err = v9fs_co_name_to_path(pdu, &fidp->path, dent->d_name, &path);
2349         if (err < 0) {
2350             break;
2351         }
2352         err = v9fs_co_lstat(pdu, &path, &stbuf);
2353         if (err < 0) {
2354             break;
2355         }
2356         err = stat_to_v9stat(pdu, &path, dent->d_name, &stbuf, &v9stat);
2357         if (err < 0) {
2358             break;
2359         }
2360         if ((count + v9stat.size + 2) > max_count) {
2361             v9fs_readdir_unlock(&fidp->fs.dir);
2362 
2363             /* Ran out of buffer. Set dir back to old position and return */
2364             v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
2365             v9fs_stat_free(&v9stat);
2366             v9fs_path_free(&path);
2367             return count;
2368         }
2369 
2370         /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */
2371         len = pdu_marshal(pdu, 11 + count, "S", &v9stat);
2372 
2373         v9fs_readdir_unlock(&fidp->fs.dir);
2374 
2375         if (len < 0) {
2376             v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
2377             v9fs_stat_free(&v9stat);
2378             v9fs_path_free(&path);
2379             return len;
2380         }
2381         count += len;
2382         v9fs_stat_free(&v9stat);
2383         v9fs_path_free(&path);
2384         saved_dir_pos = qemu_dirent_off(dent);
2385     }
2386 
2387     v9fs_readdir_unlock(&fidp->fs.dir);
2388 
2389     v9fs_path_free(&path);
2390     if (err < 0) {
2391         return err;
2392     }
2393     return count;
2394 }
2395 
v9fs_read(void * opaque)2396 static void coroutine_fn v9fs_read(void *opaque)
2397 {
2398     int32_t fid;
2399     uint64_t off;
2400     ssize_t err = 0;
2401     int32_t count = 0;
2402     size_t offset = 7;
2403     uint32_t max_count;
2404     V9fsFidState *fidp;
2405     V9fsPDU *pdu = opaque;
2406     V9fsState *s = pdu->s;
2407 
2408     err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &max_count);
2409     if (err < 0) {
2410         goto out_nofid;
2411     }
2412     trace_v9fs_read(pdu->tag, pdu->id, fid, off, max_count);
2413 
2414     fidp = get_fid(pdu, fid);
2415     if (fidp == NULL) {
2416         err = -EINVAL;
2417         goto out_nofid;
2418     }
2419     if (fidp->fid_type == P9_FID_DIR) {
2420         if (s->proto_version != V9FS_PROTO_2000U) {
2421             warn_report_once(
2422                 "9p: bad client: T_read request on directory only expected "
2423                 "with 9P2000.u protocol version"
2424             );
2425             err = -EOPNOTSUPP;
2426             goto out;
2427         }
2428         if (off == 0) {
2429             v9fs_co_rewinddir(pdu, fidp);
2430         }
2431         count = v9fs_do_readdir_with_stat(pdu, fidp, max_count);
2432         if (count < 0) {
2433             err = count;
2434             goto out;
2435         }
2436         err = pdu_marshal(pdu, offset, "d", count);
2437         if (err < 0) {
2438             goto out;
2439         }
2440         err += offset + count;
2441     } else if (fidp->fid_type == P9_FID_FILE) {
2442         QEMUIOVector qiov_full;
2443         QEMUIOVector qiov;
2444         int32_t len;
2445 
2446         v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false);
2447         qemu_iovec_init(&qiov, qiov_full.niov);
2448         do {
2449             qemu_iovec_reset(&qiov);
2450             qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count);
2451             if (0) {
2452                 print_sg(qiov.iov, qiov.niov);
2453             }
2454             /* Loop in case of EINTR */
2455             do {
2456                 len = v9fs_co_preadv(pdu, fidp, qiov.iov, qiov.niov, off);
2457                 if (len >= 0) {
2458                     off   += len;
2459                     count += len;
2460                 }
2461             } while (len == -EINTR && !pdu->cancelled);
2462             if (len < 0) {
2463                 /* IO error return the error */
2464                 err = len;
2465                 goto out_free_iovec;
2466             }
2467         } while (count < max_count && len > 0);
2468         err = pdu_marshal(pdu, offset, "d", count);
2469         if (err < 0) {
2470             goto out_free_iovec;
2471         }
2472         err += offset + count;
2473 out_free_iovec:
2474         qemu_iovec_destroy(&qiov);
2475         qemu_iovec_destroy(&qiov_full);
2476     } else if (fidp->fid_type == P9_FID_XATTR) {
2477         err = v9fs_xattr_read(s, pdu, fidp, off, max_count);
2478     } else {
2479         err = -EINVAL;
2480     }
2481     trace_v9fs_read_return(pdu->tag, pdu->id, count, err);
2482 out:
2483     put_fid(pdu, fidp);
2484 out_nofid:
2485     pdu_complete(pdu, err);
2486 }
2487 
2488 /**
2489  * v9fs_readdir_response_size() - Returns size required in Rreaddir response
2490  * for the passed dirent @name.
2491  *
2492  * @name: directory entry's name (i.e. file name, directory name)
2493  * Return: required size in bytes
2494  */
v9fs_readdir_response_size(V9fsString * name)2495 size_t v9fs_readdir_response_size(V9fsString *name)
2496 {
2497     /*
2498      * Size of each dirent on the wire: size of qid (13) + size of offset (8)
2499      * size of type (1) + size of name.size (2) + strlen(name.data)
2500      */
2501     return 24 + v9fs_string_size(name);
2502 }
2503 
v9fs_free_dirents(struct V9fsDirEnt * e)2504 static void v9fs_free_dirents(struct V9fsDirEnt *e)
2505 {
2506     struct V9fsDirEnt *next = NULL;
2507 
2508     for (; e; e = next) {
2509         next = e->next;
2510         g_free(e->dent);
2511         g_free(e->st);
2512         g_free(e);
2513     }
2514 }
2515 
v9fs_do_readdir(V9fsPDU * pdu,V9fsFidState * fidp,off_t offset,int32_t max_count)2516 static int coroutine_fn v9fs_do_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
2517                                         off_t offset, int32_t max_count)
2518 {
2519     size_t size;
2520     V9fsQID qid;
2521     V9fsString name;
2522     int len, err = 0;
2523     int32_t count = 0;
2524     off_t off;
2525     struct dirent *dent;
2526     struct stat *st;
2527     struct V9fsDirEnt *entries = NULL;
2528 
2529     /*
2530      * inode remapping requires the device id, which in turn might be
2531      * different for different directory entries, so if inode remapping is
2532      * enabled we have to make a full stat for each directory entry
2533      */
2534     const bool dostat = pdu->s->ctx.export_flags & V9FS_REMAP_INODES;
2535 
2536     /*
2537      * Fetch all required directory entries altogether on a background IO
2538      * thread from fs driver. We don't want to do that for each entry
2539      * individually, because hopping between threads (this main IO thread
2540      * and background IO driver thread) would sum up to huge latencies.
2541      */
2542     count = v9fs_co_readdir_many(pdu, fidp, &entries, offset, max_count,
2543                                  dostat);
2544     if (count < 0) {
2545         err = count;
2546         count = 0;
2547         goto out;
2548     }
2549     count = 0;
2550 
2551     for (struct V9fsDirEnt *e = entries; e; e = e->next) {
2552         dent = e->dent;
2553 
2554         if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) {
2555             st = e->st;
2556             /* e->st should never be NULL, but just to be sure */
2557             if (!st) {
2558                 err = -1;
2559                 break;
2560             }
2561 
2562             /* remap inode */
2563             err = stat_to_qid(pdu, st, &qid);
2564             if (err < 0) {
2565                 break;
2566             }
2567         } else {
2568             /*
2569              * Fill up just the path field of qid because the client uses
2570              * only that. To fill the entire qid structure we will have
2571              * to stat each dirent found, which is expensive. For the
2572              * latter reason we don't call stat_to_qid() here. Only drawback
2573              * is that no multi-device export detection of stat_to_qid()
2574              * would be done and provided as error to the user here. But
2575              * user would get that error anyway when accessing those
2576              * files/dirs through other ways.
2577              */
2578             size = MIN(sizeof(dent->d_ino), sizeof(qid.path));
2579             memcpy(&qid.path, &dent->d_ino, size);
2580             /* Fill the other fields with dummy values */
2581             qid.type = 0;
2582             qid.version = 0;
2583         }
2584 
2585         off = qemu_dirent_off(dent);
2586         v9fs_string_init(&name);
2587         v9fs_string_sprintf(&name, "%s", dent->d_name);
2588 
2589         /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */
2590         len = pdu_marshal(pdu, 11 + count, "Qqbs",
2591                           &qid, off,
2592                           dent->d_type, &name);
2593 
2594         v9fs_string_free(&name);
2595 
2596         if (len < 0) {
2597             err = len;
2598             break;
2599         }
2600 
2601         count += len;
2602     }
2603 
2604 out:
2605     v9fs_free_dirents(entries);
2606     if (err < 0) {
2607         return err;
2608     }
2609     return count;
2610 }
2611 
v9fs_readdir(void * opaque)2612 static void coroutine_fn v9fs_readdir(void *opaque)
2613 {
2614     int32_t fid;
2615     V9fsFidState *fidp;
2616     ssize_t retval = 0;
2617     size_t offset = 7;
2618     uint64_t initial_offset;
2619     int32_t count;
2620     uint32_t max_count;
2621     V9fsPDU *pdu = opaque;
2622     V9fsState *s = pdu->s;
2623 
2624     retval = pdu_unmarshal(pdu, offset, "dqd", &fid,
2625                            &initial_offset, &max_count);
2626     if (retval < 0) {
2627         goto out_nofid;
2628     }
2629     trace_v9fs_readdir(pdu->tag, pdu->id, fid, initial_offset, max_count);
2630 
2631     /* Enough space for a R_readdir header: size[4] Rreaddir tag[2] count[4] */
2632     if (max_count > s->msize - 11) {
2633         max_count = s->msize - 11;
2634         warn_report_once(
2635             "9p: bad client: T_readdir with count > msize - 11"
2636         );
2637     }
2638 
2639     fidp = get_fid(pdu, fid);
2640     if (fidp == NULL) {
2641         retval = -EINVAL;
2642         goto out_nofid;
2643     }
2644     if (fidp->fid_type != P9_FID_DIR) {
2645         warn_report_once("9p: bad client: T_readdir on non-directory stream");
2646         retval = -ENOTDIR;
2647         goto out;
2648     }
2649     if (!fidp->fs.dir.stream) {
2650         retval = -EINVAL;
2651         goto out;
2652     }
2653     if (s->proto_version != V9FS_PROTO_2000L) {
2654         warn_report_once(
2655             "9p: bad client: T_readdir request only expected with 9P2000.L "
2656             "protocol version"
2657         );
2658         retval = -EOPNOTSUPP;
2659         goto out;
2660     }
2661     count = v9fs_do_readdir(pdu, fidp, (off_t) initial_offset, max_count);
2662     if (count < 0) {
2663         retval = count;
2664         goto out;
2665     }
2666     retval = pdu_marshal(pdu, offset, "d", count);
2667     if (retval < 0) {
2668         goto out;
2669     }
2670     retval += count + offset;
2671     trace_v9fs_readdir_return(pdu->tag, pdu->id, count, retval);
2672 out:
2673     put_fid(pdu, fidp);
2674 out_nofid:
2675     pdu_complete(pdu, retval);
2676 }
2677 
v9fs_xattr_write(V9fsState * s,V9fsPDU * pdu,V9fsFidState * fidp,uint64_t off,uint32_t count,struct iovec * sg,int cnt)2678 static int v9fs_xattr_write(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
2679                             uint64_t off, uint32_t count,
2680                             struct iovec *sg, int cnt)
2681 {
2682     int i, to_copy;
2683     ssize_t err = 0;
2684     uint64_t write_count;
2685     size_t offset = 7;
2686 
2687 
2688     if (fidp->fs.xattr.len < off) {
2689         return -ENOSPC;
2690     }
2691     write_count = fidp->fs.xattr.len - off;
2692     if (write_count > count) {
2693         write_count = count;
2694     }
2695     err = pdu_marshal(pdu, offset, "d", write_count);
2696     if (err < 0) {
2697         return err;
2698     }
2699     err += offset;
2700     fidp->fs.xattr.copied_len += write_count;
2701     /*
2702      * Now copy the content from sg list
2703      */
2704     for (i = 0; i < cnt; i++) {
2705         if (write_count > sg[i].iov_len) {
2706             to_copy = sg[i].iov_len;
2707         } else {
2708             to_copy = write_count;
2709         }
2710         memcpy((char *)fidp->fs.xattr.value + off, sg[i].iov_base, to_copy);
2711         /* updating vs->off since we are not using below */
2712         off += to_copy;
2713         write_count -= to_copy;
2714     }
2715 
2716     return err;
2717 }
2718 
v9fs_write(void * opaque)2719 static void coroutine_fn v9fs_write(void *opaque)
2720 {
2721     ssize_t err;
2722     int32_t fid;
2723     uint64_t off;
2724     uint32_t count;
2725     int32_t len = 0;
2726     int32_t total = 0;
2727     size_t offset = 7;
2728     V9fsFidState *fidp;
2729     V9fsPDU *pdu = opaque;
2730     V9fsState *s = pdu->s;
2731     QEMUIOVector qiov_full;
2732     QEMUIOVector qiov;
2733 
2734     err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &count);
2735     if (err < 0) {
2736         pdu_complete(pdu, err);
2737         return;
2738     }
2739     offset += err;
2740     v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true);
2741     trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov);
2742 
2743     fidp = get_fid(pdu, fid);
2744     if (fidp == NULL) {
2745         err = -EINVAL;
2746         goto out_nofid;
2747     }
2748     if (fidp->fid_type == P9_FID_FILE) {
2749         if (fidp->fs.fd == -1) {
2750             err = -EINVAL;
2751             goto out;
2752         }
2753     } else if (fidp->fid_type == P9_FID_XATTR) {
2754         /*
2755          * setxattr operation
2756          */
2757         err = v9fs_xattr_write(s, pdu, fidp, off, count,
2758                                qiov_full.iov, qiov_full.niov);
2759         goto out;
2760     } else {
2761         err = -EINVAL;
2762         goto out;
2763     }
2764     qemu_iovec_init(&qiov, qiov_full.niov);
2765     do {
2766         qemu_iovec_reset(&qiov);
2767         qemu_iovec_concat(&qiov, &qiov_full, total, qiov_full.size - total);
2768         if (0) {
2769             print_sg(qiov.iov, qiov.niov);
2770         }
2771         /* Loop in case of EINTR */
2772         do {
2773             len = v9fs_co_pwritev(pdu, fidp, qiov.iov, qiov.niov, off);
2774             if (len >= 0) {
2775                 off   += len;
2776                 total += len;
2777             }
2778         } while (len == -EINTR && !pdu->cancelled);
2779         if (len < 0) {
2780             /* IO error return the error */
2781             err = len;
2782             goto out_qiov;
2783         }
2784     } while (total < count && len > 0);
2785 
2786     offset = 7;
2787     err = pdu_marshal(pdu, offset, "d", total);
2788     if (err < 0) {
2789         goto out_qiov;
2790     }
2791     err += offset;
2792     trace_v9fs_write_return(pdu->tag, pdu->id, total, err);
2793 out_qiov:
2794     qemu_iovec_destroy(&qiov);
2795 out:
2796     put_fid(pdu, fidp);
2797 out_nofid:
2798     qemu_iovec_destroy(&qiov_full);
2799     pdu_complete(pdu, err);
2800 }
2801 
v9fs_create(void * opaque)2802 static void coroutine_fn v9fs_create(void *opaque)
2803 {
2804     int32_t fid;
2805     int err = 0;
2806     size_t offset = 7;
2807     V9fsFidState *fidp;
2808     V9fsQID qid;
2809     int32_t perm;
2810     int8_t mode;
2811     V9fsPath path;
2812     struct stat stbuf;
2813     V9fsString name;
2814     V9fsString extension;
2815     int iounit;
2816     V9fsPDU *pdu = opaque;
2817     V9fsState *s = pdu->s;
2818 
2819     v9fs_path_init(&path);
2820     v9fs_string_init(&name);
2821     v9fs_string_init(&extension);
2822     err = pdu_unmarshal(pdu, offset, "dsdbs", &fid, &name,
2823                         &perm, &mode, &extension);
2824     if (err < 0) {
2825         goto out_nofid;
2826     }
2827     trace_v9fs_create(pdu->tag, pdu->id, fid, name.data, perm, mode);
2828 
2829     if (name_is_illegal(name.data)) {
2830         err = -ENOENT;
2831         goto out_nofid;
2832     }
2833 
2834     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
2835         err = -EEXIST;
2836         goto out_nofid;
2837     }
2838 
2839     fidp = get_fid(pdu, fid);
2840     if (fidp == NULL) {
2841         err = -EINVAL;
2842         goto out_nofid;
2843     }
2844     if (fidp->fid_type != P9_FID_NONE) {
2845         err = -EINVAL;
2846         goto out;
2847     }
2848     if (perm & P9_STAT_MODE_DIR) {
2849         err = v9fs_co_mkdir(pdu, fidp, &name, perm & 0777,
2850                             fidp->uid, -1, &stbuf);
2851         if (err < 0) {
2852             goto out;
2853         }
2854         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2855         if (err < 0) {
2856             goto out;
2857         }
2858         v9fs_path_write_lock(s);
2859         v9fs_path_copy(&fidp->path, &path);
2860         v9fs_path_unlock(s);
2861         err = v9fs_co_opendir(pdu, fidp);
2862         if (err < 0) {
2863             goto out;
2864         }
2865         fidp->fid_type = P9_FID_DIR;
2866     } else if (perm & P9_STAT_MODE_SYMLINK) {
2867         err = v9fs_co_symlink(pdu, fidp, &name,
2868                               extension.data, -1 , &stbuf);
2869         if (err < 0) {
2870             goto out;
2871         }
2872         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2873         if (err < 0) {
2874             goto out;
2875         }
2876         v9fs_path_write_lock(s);
2877         v9fs_path_copy(&fidp->path, &path);
2878         v9fs_path_unlock(s);
2879     } else if (perm & P9_STAT_MODE_LINK) {
2880         int32_t ofid = atoi(extension.data);
2881         V9fsFidState *ofidp = get_fid(pdu, ofid);
2882         if (ofidp == NULL) {
2883             err = -EINVAL;
2884             goto out;
2885         }
2886         err = v9fs_co_link(pdu, ofidp, fidp, &name);
2887         put_fid(pdu, ofidp);
2888         if (err < 0) {
2889             goto out;
2890         }
2891         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2892         if (err < 0) {
2893             fidp->fid_type = P9_FID_NONE;
2894             goto out;
2895         }
2896         v9fs_path_write_lock(s);
2897         v9fs_path_copy(&fidp->path, &path);
2898         v9fs_path_unlock(s);
2899         err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
2900         if (err < 0) {
2901             fidp->fid_type = P9_FID_NONE;
2902             goto out;
2903         }
2904     } else if (perm & P9_STAT_MODE_DEVICE) {
2905         char ctype;
2906         uint32_t major, minor;
2907         mode_t nmode = 0;
2908 
2909         if (sscanf(extension.data, "%c %u %u", &ctype, &major, &minor) != 3) {
2910             err = -errno;
2911             goto out;
2912         }
2913 
2914         switch (ctype) {
2915         case 'c':
2916             nmode = S_IFCHR;
2917             break;
2918         case 'b':
2919             nmode = S_IFBLK;
2920             break;
2921         default:
2922             err = -EIO;
2923             goto out;
2924         }
2925 
2926         nmode |= perm & 0777;
2927         err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2928                             makedev(major, minor), nmode, &stbuf);
2929         if (err < 0) {
2930             goto out;
2931         }
2932         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2933         if (err < 0) {
2934             goto out;
2935         }
2936         v9fs_path_write_lock(s);
2937         v9fs_path_copy(&fidp->path, &path);
2938         v9fs_path_unlock(s);
2939     } else if (perm & P9_STAT_MODE_NAMED_PIPE) {
2940         err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2941                             0, S_IFIFO | (perm & 0777), &stbuf);
2942         if (err < 0) {
2943             goto out;
2944         }
2945         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2946         if (err < 0) {
2947             goto out;
2948         }
2949         v9fs_path_write_lock(s);
2950         v9fs_path_copy(&fidp->path, &path);
2951         v9fs_path_unlock(s);
2952     } else if (perm & P9_STAT_MODE_SOCKET) {
2953         err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2954                             0, S_IFSOCK | (perm & 0777), &stbuf);
2955         if (err < 0) {
2956             goto out;
2957         }
2958         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2959         if (err < 0) {
2960             goto out;
2961         }
2962         v9fs_path_write_lock(s);
2963         v9fs_path_copy(&fidp->path, &path);
2964         v9fs_path_unlock(s);
2965     } else {
2966         err = v9fs_co_open2(pdu, fidp, &name, -1,
2967                             omode_to_uflags(mode) | O_CREAT, perm, &stbuf);
2968         if (err < 0) {
2969             goto out;
2970         }
2971         fidp->fid_type = P9_FID_FILE;
2972         fidp->open_flags = omode_to_uflags(mode);
2973         if (fidp->open_flags & O_EXCL) {
2974             /*
2975              * We let the host file system do O_EXCL check
2976              * We should not reclaim such fd
2977              */
2978             fidp->flags |= FID_NON_RECLAIMABLE;
2979         }
2980     }
2981     iounit = get_iounit(pdu, &fidp->path);
2982     err = stat_to_qid(pdu, &stbuf, &qid);
2983     if (err < 0) {
2984         goto out;
2985     }
2986     err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2987     if (err < 0) {
2988         goto out;
2989     }
2990     err += offset;
2991     trace_v9fs_create_return(pdu->tag, pdu->id,
2992                              qid.type, qid.version, qid.path, iounit);
2993 out:
2994     put_fid(pdu, fidp);
2995 out_nofid:
2996    pdu_complete(pdu, err);
2997    v9fs_string_free(&name);
2998    v9fs_string_free(&extension);
2999    v9fs_path_free(&path);
3000 }
3001 
v9fs_symlink(void * opaque)3002 static void coroutine_fn v9fs_symlink(void *opaque)
3003 {
3004     V9fsPDU *pdu = opaque;
3005     V9fsString name;
3006     V9fsString symname;
3007     V9fsFidState *dfidp;
3008     V9fsQID qid;
3009     struct stat stbuf;
3010     int32_t dfid;
3011     int err = 0;
3012     gid_t gid;
3013     size_t offset = 7;
3014 
3015     v9fs_string_init(&name);
3016     v9fs_string_init(&symname);
3017     err = pdu_unmarshal(pdu, offset, "dssd", &dfid, &name, &symname, &gid);
3018     if (err < 0) {
3019         goto out_nofid;
3020     }
3021     trace_v9fs_symlink(pdu->tag, pdu->id, dfid, name.data, symname.data, gid);
3022 
3023     if (name_is_illegal(name.data)) {
3024         err = -ENOENT;
3025         goto out_nofid;
3026     }
3027 
3028     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3029         err = -EEXIST;
3030         goto out_nofid;
3031     }
3032 
3033     dfidp = get_fid(pdu, dfid);
3034     if (dfidp == NULL) {
3035         err = -EINVAL;
3036         goto out_nofid;
3037     }
3038     err = v9fs_co_symlink(pdu, dfidp, &name, symname.data, gid, &stbuf);
3039     if (err < 0) {
3040         goto out;
3041     }
3042     err = stat_to_qid(pdu, &stbuf, &qid);
3043     if (err < 0) {
3044         goto out;
3045     }
3046     err =  pdu_marshal(pdu, offset, "Q", &qid);
3047     if (err < 0) {
3048         goto out;
3049     }
3050     err += offset;
3051     trace_v9fs_symlink_return(pdu->tag, pdu->id,
3052                               qid.type, qid.version, qid.path);
3053 out:
3054     put_fid(pdu, dfidp);
3055 out_nofid:
3056     pdu_complete(pdu, err);
3057     v9fs_string_free(&name);
3058     v9fs_string_free(&symname);
3059 }
3060 
v9fs_flush(void * opaque)3061 static void coroutine_fn v9fs_flush(void *opaque)
3062 {
3063     ssize_t err;
3064     int16_t tag;
3065     size_t offset = 7;
3066     V9fsPDU *cancel_pdu = NULL;
3067     V9fsPDU *pdu = opaque;
3068     V9fsState *s = pdu->s;
3069 
3070     err = pdu_unmarshal(pdu, offset, "w", &tag);
3071     if (err < 0) {
3072         pdu_complete(pdu, err);
3073         return;
3074     }
3075     trace_v9fs_flush(pdu->tag, pdu->id, tag);
3076 
3077     if (pdu->tag == tag) {
3078         warn_report("the guest sent a self-referencing 9P flush request");
3079     } else {
3080         QLIST_FOREACH(cancel_pdu, &s->active_list, next) {
3081             if (cancel_pdu->tag == tag) {
3082                 break;
3083             }
3084         }
3085     }
3086     if (cancel_pdu) {
3087         cancel_pdu->cancelled = 1;
3088         /*
3089          * Wait for pdu to complete.
3090          */
3091         qemu_co_queue_wait(&cancel_pdu->complete, NULL);
3092         if (!qemu_co_queue_next(&cancel_pdu->complete)) {
3093             cancel_pdu->cancelled = 0;
3094             pdu_free(cancel_pdu);
3095         }
3096     }
3097     pdu_complete(pdu, 7);
3098 }
3099 
v9fs_link(void * opaque)3100 static void coroutine_fn v9fs_link(void *opaque)
3101 {
3102     V9fsPDU *pdu = opaque;
3103     int32_t dfid, oldfid;
3104     V9fsFidState *dfidp, *oldfidp;
3105     V9fsString name;
3106     size_t offset = 7;
3107     int err = 0;
3108 
3109     v9fs_string_init(&name);
3110     err = pdu_unmarshal(pdu, offset, "dds", &dfid, &oldfid, &name);
3111     if (err < 0) {
3112         goto out_nofid;
3113     }
3114     trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data);
3115 
3116     if (name_is_illegal(name.data)) {
3117         err = -ENOENT;
3118         goto out_nofid;
3119     }
3120 
3121     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3122         err = -EEXIST;
3123         goto out_nofid;
3124     }
3125 
3126     dfidp = get_fid(pdu, dfid);
3127     if (dfidp == NULL) {
3128         err = -ENOENT;
3129         goto out_nofid;
3130     }
3131 
3132     oldfidp = get_fid(pdu, oldfid);
3133     if (oldfidp == NULL) {
3134         err = -ENOENT;
3135         goto out;
3136     }
3137     err = v9fs_co_link(pdu, oldfidp, dfidp, &name);
3138     if (!err) {
3139         err = offset;
3140     }
3141     put_fid(pdu, oldfidp);
3142 out:
3143     put_fid(pdu, dfidp);
3144 out_nofid:
3145     v9fs_string_free(&name);
3146     pdu_complete(pdu, err);
3147 }
3148 
3149 /* Only works with path name based fid */
v9fs_remove(void * opaque)3150 static void coroutine_fn v9fs_remove(void *opaque)
3151 {
3152     int32_t fid;
3153     int err = 0;
3154     size_t offset = 7;
3155     V9fsFidState *fidp;
3156     V9fsPDU *pdu = opaque;
3157 
3158     err = pdu_unmarshal(pdu, offset, "d", &fid);
3159     if (err < 0) {
3160         goto out_nofid;
3161     }
3162     trace_v9fs_remove(pdu->tag, pdu->id, fid);
3163 
3164     fidp = get_fid(pdu, fid);
3165     if (fidp == NULL) {
3166         err = -EINVAL;
3167         goto out_nofid;
3168     }
3169     /* if fs driver is not path based, return EOPNOTSUPP */
3170     if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
3171         err = -EOPNOTSUPP;
3172         goto out_err;
3173     }
3174     /*
3175      * IF the file is unlinked, we cannot reopen
3176      * the file later. So don't reclaim fd
3177      */
3178     err = v9fs_mark_fids_unreclaim(pdu, &fidp->path);
3179     if (err < 0) {
3180         goto out_err;
3181     }
3182     err = v9fs_co_remove(pdu, &fidp->path);
3183     if (!err) {
3184         err = offset;
3185     }
3186 out_err:
3187     /* For TREMOVE we need to clunk the fid even on failed remove */
3188     clunk_fid(pdu->s, fidp->fid);
3189     put_fid(pdu, fidp);
3190 out_nofid:
3191     pdu_complete(pdu, err);
3192 }
3193 
v9fs_unlinkat(void * opaque)3194 static void coroutine_fn v9fs_unlinkat(void *opaque)
3195 {
3196     int err = 0;
3197     V9fsString name;
3198     int32_t dfid, flags, rflags = 0;
3199     size_t offset = 7;
3200     V9fsPath path;
3201     V9fsFidState *dfidp;
3202     V9fsPDU *pdu = opaque;
3203 
3204     v9fs_string_init(&name);
3205     err = pdu_unmarshal(pdu, offset, "dsd", &dfid, &name, &flags);
3206     if (err < 0) {
3207         goto out_nofid;
3208     }
3209 
3210     if (name_is_illegal(name.data)) {
3211         err = -ENOENT;
3212         goto out_nofid;
3213     }
3214 
3215     if (!strcmp(".", name.data)) {
3216         err = -EINVAL;
3217         goto out_nofid;
3218     }
3219 
3220     if (!strcmp("..", name.data)) {
3221         err = -ENOTEMPTY;
3222         goto out_nofid;
3223     }
3224 
3225     if (flags & ~P9_DOTL_AT_REMOVEDIR) {
3226         err = -EINVAL;
3227         goto out_nofid;
3228     }
3229 
3230     if (flags & P9_DOTL_AT_REMOVEDIR) {
3231         rflags |= AT_REMOVEDIR;
3232     }
3233 
3234     dfidp = get_fid(pdu, dfid);
3235     if (dfidp == NULL) {
3236         err = -EINVAL;
3237         goto out_nofid;
3238     }
3239     /*
3240      * IF the file is unlinked, we cannot reopen
3241      * the file later. So don't reclaim fd
3242      */
3243     v9fs_path_init(&path);
3244     err = v9fs_co_name_to_path(pdu, &dfidp->path, name.data, &path);
3245     if (err < 0) {
3246         goto out_err;
3247     }
3248     err = v9fs_mark_fids_unreclaim(pdu, &path);
3249     if (err < 0) {
3250         goto out_err;
3251     }
3252     err = v9fs_co_unlinkat(pdu, &dfidp->path, &name, rflags);
3253     if (!err) {
3254         err = offset;
3255     }
3256 out_err:
3257     put_fid(pdu, dfidp);
3258     v9fs_path_free(&path);
3259 out_nofid:
3260     pdu_complete(pdu, err);
3261     v9fs_string_free(&name);
3262 }
3263 
3264 
3265 /* Only works with path name based fid */
v9fs_complete_rename(V9fsPDU * pdu,V9fsFidState * fidp,int32_t newdirfid,V9fsString * name)3266 static int coroutine_fn v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp,
3267                                              int32_t newdirfid,
3268                                              V9fsString *name)
3269 {
3270     int err = 0;
3271     V9fsPath new_path;
3272     V9fsFidState *tfidp;
3273     V9fsState *s = pdu->s;
3274     V9fsFidState *dirfidp = NULL;
3275     GHashTableIter iter;
3276     gpointer fid;
3277 
3278     v9fs_path_init(&new_path);
3279     if (newdirfid != -1) {
3280         dirfidp = get_fid(pdu, newdirfid);
3281         if (dirfidp == NULL) {
3282             return -ENOENT;
3283         }
3284         if (fidp->fid_type != P9_FID_NONE) {
3285             err = -EINVAL;
3286             goto out;
3287         }
3288         err = v9fs_co_name_to_path(pdu, &dirfidp->path, name->data, &new_path);
3289         if (err < 0) {
3290             goto out;
3291         }
3292     } else {
3293         char *dir_name = g_path_get_dirname(fidp->path.data);
3294         V9fsPath dir_path;
3295 
3296         v9fs_path_init(&dir_path);
3297         v9fs_path_sprintf(&dir_path, "%s", dir_name);
3298         g_free(dir_name);
3299 
3300         err = v9fs_co_name_to_path(pdu, &dir_path, name->data, &new_path);
3301         v9fs_path_free(&dir_path);
3302         if (err < 0) {
3303             goto out;
3304         }
3305     }
3306     err = v9fs_co_rename(pdu, &fidp->path, &new_path);
3307     if (err < 0) {
3308         goto out;
3309     }
3310 
3311     /*
3312      * Fixup fid's pointing to the old name to
3313      * start pointing to the new name
3314      */
3315     g_hash_table_iter_init(&iter, s->fids);
3316     while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &tfidp)) {
3317         if (v9fs_path_is_ancestor(&fidp->path, &tfidp->path)) {
3318             /* replace the name */
3319             v9fs_fix_path(&tfidp->path, &new_path, strlen(fidp->path.data));
3320         }
3321     }
3322 out:
3323     if (dirfidp) {
3324         put_fid(pdu, dirfidp);
3325     }
3326     v9fs_path_free(&new_path);
3327     return err;
3328 }
3329 
3330 /* Only works with path name based fid */
v9fs_rename(void * opaque)3331 static void coroutine_fn v9fs_rename(void *opaque)
3332 {
3333     int32_t fid;
3334     ssize_t err = 0;
3335     size_t offset = 7;
3336     V9fsString name;
3337     int32_t newdirfid;
3338     V9fsFidState *fidp;
3339     V9fsPDU *pdu = opaque;
3340     V9fsState *s = pdu->s;
3341 
3342     v9fs_string_init(&name);
3343     err = pdu_unmarshal(pdu, offset, "dds", &fid, &newdirfid, &name);
3344     if (err < 0) {
3345         goto out_nofid;
3346     }
3347 
3348     if (name_is_illegal(name.data)) {
3349         err = -ENOENT;
3350         goto out_nofid;
3351     }
3352 
3353     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3354         err = -EISDIR;
3355         goto out_nofid;
3356     }
3357 
3358     fidp = get_fid(pdu, fid);
3359     if (fidp == NULL) {
3360         err = -ENOENT;
3361         goto out_nofid;
3362     }
3363     if (fidp->fid_type != P9_FID_NONE) {
3364         err = -EINVAL;
3365         goto out;
3366     }
3367     /* if fs driver is not path based, return EOPNOTSUPP */
3368     if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
3369         err = -EOPNOTSUPP;
3370         goto out;
3371     }
3372     v9fs_path_write_lock(s);
3373     err = v9fs_complete_rename(pdu, fidp, newdirfid, &name);
3374     v9fs_path_unlock(s);
3375     if (!err) {
3376         err = offset;
3377     }
3378 out:
3379     put_fid(pdu, fidp);
3380 out_nofid:
3381     pdu_complete(pdu, err);
3382     v9fs_string_free(&name);
3383 }
3384 
v9fs_fix_fid_paths(V9fsPDU * pdu,V9fsPath * olddir,V9fsString * old_name,V9fsPath * newdir,V9fsString * new_name)3385 static int coroutine_fn v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir,
3386                                            V9fsString *old_name,
3387                                            V9fsPath *newdir,
3388                                            V9fsString *new_name)
3389 {
3390     V9fsFidState *tfidp;
3391     V9fsPath oldpath, newpath;
3392     V9fsState *s = pdu->s;
3393     int err;
3394     GHashTableIter iter;
3395     gpointer fid;
3396 
3397     v9fs_path_init(&oldpath);
3398     v9fs_path_init(&newpath);
3399     err = v9fs_co_name_to_path(pdu, olddir, old_name->data, &oldpath);
3400     if (err < 0) {
3401         goto out;
3402     }
3403     err = v9fs_co_name_to_path(pdu, newdir, new_name->data, &newpath);
3404     if (err < 0) {
3405         goto out;
3406     }
3407 
3408     /*
3409      * Fixup fid's pointing to the old name to
3410      * start pointing to the new name
3411      */
3412     g_hash_table_iter_init(&iter, s->fids);
3413     while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &tfidp)) {
3414         if (v9fs_path_is_ancestor(&oldpath, &tfidp->path)) {
3415             /* replace the name */
3416             v9fs_fix_path(&tfidp->path, &newpath, strlen(oldpath.data));
3417         }
3418     }
3419 out:
3420     v9fs_path_free(&oldpath);
3421     v9fs_path_free(&newpath);
3422     return err;
3423 }
3424 
v9fs_complete_renameat(V9fsPDU * pdu,int32_t olddirfid,V9fsString * old_name,int32_t newdirfid,V9fsString * new_name)3425 static int coroutine_fn v9fs_complete_renameat(V9fsPDU *pdu, int32_t olddirfid,
3426                                                V9fsString *old_name,
3427                                                int32_t newdirfid,
3428                                                V9fsString *new_name)
3429 {
3430     int err = 0;
3431     V9fsState *s = pdu->s;
3432     V9fsFidState *newdirfidp = NULL, *olddirfidp = NULL;
3433 
3434     olddirfidp = get_fid(pdu, olddirfid);
3435     if (olddirfidp == NULL) {
3436         err = -ENOENT;
3437         goto out;
3438     }
3439     if (newdirfid != -1) {
3440         newdirfidp = get_fid(pdu, newdirfid);
3441         if (newdirfidp == NULL) {
3442             err = -ENOENT;
3443             goto out;
3444         }
3445     } else {
3446         newdirfidp = get_fid(pdu, olddirfid);
3447     }
3448 
3449     err = v9fs_co_renameat(pdu, &olddirfidp->path, old_name,
3450                            &newdirfidp->path, new_name);
3451     if (err < 0) {
3452         goto out;
3453     }
3454     if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) {
3455         /* Only for path based fid  we need to do the below fixup */
3456         err = v9fs_fix_fid_paths(pdu, &olddirfidp->path, old_name,
3457                                  &newdirfidp->path, new_name);
3458     }
3459 out:
3460     if (olddirfidp) {
3461         put_fid(pdu, olddirfidp);
3462     }
3463     if (newdirfidp) {
3464         put_fid(pdu, newdirfidp);
3465     }
3466     return err;
3467 }
3468 
v9fs_renameat(void * opaque)3469 static void coroutine_fn v9fs_renameat(void *opaque)
3470 {
3471     ssize_t err = 0;
3472     size_t offset = 7;
3473     V9fsPDU *pdu = opaque;
3474     V9fsState *s = pdu->s;
3475     int32_t olddirfid, newdirfid;
3476     V9fsString old_name, new_name;
3477 
3478     v9fs_string_init(&old_name);
3479     v9fs_string_init(&new_name);
3480     err = pdu_unmarshal(pdu, offset, "dsds", &olddirfid,
3481                         &old_name, &newdirfid, &new_name);
3482     if (err < 0) {
3483         goto out_err;
3484     }
3485 
3486     if (name_is_illegal(old_name.data) || name_is_illegal(new_name.data)) {
3487         err = -ENOENT;
3488         goto out_err;
3489     }
3490 
3491     if (!strcmp(".", old_name.data) || !strcmp("..", old_name.data) ||
3492         !strcmp(".", new_name.data) || !strcmp("..", new_name.data)) {
3493         err = -EISDIR;
3494         goto out_err;
3495     }
3496 
3497     v9fs_path_write_lock(s);
3498     err = v9fs_complete_renameat(pdu, olddirfid,
3499                                  &old_name, newdirfid, &new_name);
3500     v9fs_path_unlock(s);
3501     if (!err) {
3502         err = offset;
3503     }
3504 
3505 out_err:
3506     pdu_complete(pdu, err);
3507     v9fs_string_free(&old_name);
3508     v9fs_string_free(&new_name);
3509 }
3510 
v9fs_wstat(void * opaque)3511 static void coroutine_fn v9fs_wstat(void *opaque)
3512 {
3513     int32_t fid;
3514     int err = 0;
3515     int16_t unused;
3516     V9fsStat v9stat;
3517     size_t offset = 7;
3518     struct stat stbuf;
3519     V9fsFidState *fidp;
3520     V9fsPDU *pdu = opaque;
3521     V9fsState *s = pdu->s;
3522 
3523     v9fs_stat_init(&v9stat);
3524     err = pdu_unmarshal(pdu, offset, "dwS", &fid, &unused, &v9stat);
3525     if (err < 0) {
3526         goto out_nofid;
3527     }
3528     trace_v9fs_wstat(pdu->tag, pdu->id, fid,
3529                      v9stat.mode, v9stat.atime, v9stat.mtime);
3530 
3531     fidp = get_fid(pdu, fid);
3532     if (fidp == NULL) {
3533         err = -EINVAL;
3534         goto out_nofid;
3535     }
3536     /* do we need to sync the file? */
3537     if (donttouch_stat(&v9stat)) {
3538         err = v9fs_co_fsync(pdu, fidp, 0);
3539         goto out;
3540     }
3541     if (v9stat.mode != -1) {
3542         uint32_t v9_mode;
3543         err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
3544         if (err < 0) {
3545             goto out;
3546         }
3547         v9_mode = stat_to_v9mode(&stbuf);
3548         if ((v9stat.mode & P9_STAT_MODE_TYPE_BITS) !=
3549             (v9_mode & P9_STAT_MODE_TYPE_BITS)) {
3550             /* Attempting to change the type */
3551             err = -EIO;
3552             goto out;
3553         }
3554         err = v9fs_co_chmod(pdu, &fidp->path,
3555                             v9mode_to_mode(v9stat.mode,
3556                                            &v9stat.extension));
3557         if (err < 0) {
3558             goto out;
3559         }
3560     }
3561     if (v9stat.mtime != -1 || v9stat.atime != -1) {
3562         struct timespec times[2];
3563         if (v9stat.atime != -1) {
3564             times[0].tv_sec = v9stat.atime;
3565             times[0].tv_nsec = 0;
3566         } else {
3567             times[0].tv_nsec = UTIME_OMIT;
3568         }
3569         if (v9stat.mtime != -1) {
3570             times[1].tv_sec = v9stat.mtime;
3571             times[1].tv_nsec = 0;
3572         } else {
3573             times[1].tv_nsec = UTIME_OMIT;
3574         }
3575         err = v9fs_co_utimensat(pdu, &fidp->path, times);
3576         if (err < 0) {
3577             goto out;
3578         }
3579     }
3580     if (v9stat.n_gid != -1 || v9stat.n_uid != -1) {
3581         err = v9fs_co_chown(pdu, &fidp->path, v9stat.n_uid, v9stat.n_gid);
3582         if (err < 0) {
3583             goto out;
3584         }
3585     }
3586     if (v9stat.name.size != 0) {
3587         v9fs_path_write_lock(s);
3588         err = v9fs_complete_rename(pdu, fidp, -1, &v9stat.name);
3589         v9fs_path_unlock(s);
3590         if (err < 0) {
3591             goto out;
3592         }
3593     }
3594     if (v9stat.length != -1) {
3595         err = v9fs_co_truncate(pdu, &fidp->path, v9stat.length);
3596         if (err < 0) {
3597             goto out;
3598         }
3599     }
3600     err = offset;
3601 out:
3602     put_fid(pdu, fidp);
3603 out_nofid:
3604     v9fs_stat_free(&v9stat);
3605     pdu_complete(pdu, err);
3606 }
3607 
v9fs_fill_statfs(V9fsState * s,V9fsPDU * pdu,struct statfs * stbuf)3608 static int v9fs_fill_statfs(V9fsState *s, V9fsPDU *pdu, struct statfs *stbuf)
3609 {
3610     uint32_t f_type;
3611     uint32_t f_bsize;
3612     uint64_t f_blocks;
3613     uint64_t f_bfree;
3614     uint64_t f_bavail;
3615     uint64_t f_files;
3616     uint64_t f_ffree;
3617     uint64_t fsid_val;
3618     uint32_t f_namelen;
3619     size_t offset = 7;
3620     int32_t bsize_factor;
3621 
3622     /*
3623      * compute bsize factor based on host file system block size
3624      * and client msize
3625      */
3626     bsize_factor = (s->msize - P9_IOHDRSZ) / stbuf->f_bsize;
3627     if (!bsize_factor) {
3628         bsize_factor = 1;
3629     }
3630     f_type  = stbuf->f_type;
3631     f_bsize = stbuf->f_bsize;
3632     f_bsize *= bsize_factor;
3633     /*
3634      * f_bsize is adjusted(multiplied) by bsize factor, so we need to
3635      * adjust(divide) the number of blocks, free blocks and available
3636      * blocks by bsize factor
3637      */
3638     f_blocks = stbuf->f_blocks / bsize_factor;
3639     f_bfree  = stbuf->f_bfree / bsize_factor;
3640     f_bavail = stbuf->f_bavail / bsize_factor;
3641     f_files  = stbuf->f_files;
3642     f_ffree  = stbuf->f_ffree;
3643 #ifdef CONFIG_DARWIN
3644     fsid_val = (unsigned int)stbuf->f_fsid.val[0] |
3645                (unsigned long long)stbuf->f_fsid.val[1] << 32;
3646     f_namelen = NAME_MAX;
3647 #else
3648     fsid_val = (unsigned int) stbuf->f_fsid.__val[0] |
3649                (unsigned long long)stbuf->f_fsid.__val[1] << 32;
3650     f_namelen = stbuf->f_namelen;
3651 #endif
3652 
3653     return pdu_marshal(pdu, offset, "ddqqqqqqd",
3654                        f_type, f_bsize, f_blocks, f_bfree,
3655                        f_bavail, f_files, f_ffree,
3656                        fsid_val, f_namelen);
3657 }
3658 
v9fs_statfs(void * opaque)3659 static void coroutine_fn v9fs_statfs(void *opaque)
3660 {
3661     int32_t fid;
3662     ssize_t retval = 0;
3663     size_t offset = 7;
3664     V9fsFidState *fidp;
3665     struct statfs stbuf;
3666     V9fsPDU *pdu = opaque;
3667     V9fsState *s = pdu->s;
3668 
3669     retval = pdu_unmarshal(pdu, offset, "d", &fid);
3670     if (retval < 0) {
3671         goto out_nofid;
3672     }
3673     fidp = get_fid(pdu, fid);
3674     if (fidp == NULL) {
3675         retval = -ENOENT;
3676         goto out_nofid;
3677     }
3678     retval = v9fs_co_statfs(pdu, &fidp->path, &stbuf);
3679     if (retval < 0) {
3680         goto out;
3681     }
3682     retval = v9fs_fill_statfs(s, pdu, &stbuf);
3683     if (retval < 0) {
3684         goto out;
3685     }
3686     retval += offset;
3687 out:
3688     put_fid(pdu, fidp);
3689 out_nofid:
3690     pdu_complete(pdu, retval);
3691 }
3692 
v9fs_mknod(void * opaque)3693 static void coroutine_fn v9fs_mknod(void *opaque)
3694 {
3695 
3696     int mode;
3697     gid_t gid;
3698     int32_t fid;
3699     V9fsQID qid;
3700     int err = 0;
3701     int major, minor;
3702     size_t offset = 7;
3703     V9fsString name;
3704     struct stat stbuf;
3705     V9fsFidState *fidp;
3706     V9fsPDU *pdu = opaque;
3707 
3708     v9fs_string_init(&name);
3709     err = pdu_unmarshal(pdu, offset, "dsdddd", &fid, &name, &mode,
3710                         &major, &minor, &gid);
3711     if (err < 0) {
3712         goto out_nofid;
3713     }
3714     trace_v9fs_mknod(pdu->tag, pdu->id, fid, mode, major, minor);
3715 
3716     if (name_is_illegal(name.data)) {
3717         err = -ENOENT;
3718         goto out_nofid;
3719     }
3720 
3721     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3722         err = -EEXIST;
3723         goto out_nofid;
3724     }
3725 
3726     fidp = get_fid(pdu, fid);
3727     if (fidp == NULL) {
3728         err = -ENOENT;
3729         goto out_nofid;
3730     }
3731     err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, gid,
3732                         makedev(major, minor), mode, &stbuf);
3733     if (err < 0) {
3734         goto out;
3735     }
3736     err = stat_to_qid(pdu, &stbuf, &qid);
3737     if (err < 0) {
3738         goto out;
3739     }
3740     err = pdu_marshal(pdu, offset, "Q", &qid);
3741     if (err < 0) {
3742         goto out;
3743     }
3744     err += offset;
3745     trace_v9fs_mknod_return(pdu->tag, pdu->id,
3746                             qid.type, qid.version, qid.path);
3747 out:
3748     put_fid(pdu, fidp);
3749 out_nofid:
3750     pdu_complete(pdu, err);
3751     v9fs_string_free(&name);
3752 }
3753 
3754 /*
3755  * Implement posix byte range locking code
3756  * Server side handling of locking code is very simple, because 9p server in
3757  * QEMU can handle only one client. And most of the lock handling
3758  * (like conflict, merging) etc is done by the VFS layer itself, so no need to
3759  * do any thing in * qemu 9p server side lock code path.
3760  * So when a TLOCK request comes, always return success
3761  */
v9fs_lock(void * opaque)3762 static void coroutine_fn v9fs_lock(void *opaque)
3763 {
3764     V9fsFlock flock;
3765     size_t offset = 7;
3766     struct stat stbuf;
3767     V9fsFidState *fidp;
3768     int32_t fid, err = 0;
3769     V9fsPDU *pdu = opaque;
3770 
3771     v9fs_string_init(&flock.client_id);
3772     err = pdu_unmarshal(pdu, offset, "dbdqqds", &fid, &flock.type,
3773                         &flock.flags, &flock.start, &flock.length,
3774                         &flock.proc_id, &flock.client_id);
3775     if (err < 0) {
3776         goto out_nofid;
3777     }
3778     trace_v9fs_lock(pdu->tag, pdu->id, fid,
3779                     flock.type, flock.start, flock.length);
3780 
3781 
3782     /* We support only block flag now (that too ignored currently) */
3783     if (flock.flags & ~P9_LOCK_FLAGS_BLOCK) {
3784         err = -EINVAL;
3785         goto out_nofid;
3786     }
3787     fidp = get_fid(pdu, fid);
3788     if (fidp == NULL) {
3789         err = -ENOENT;
3790         goto out_nofid;
3791     }
3792     err = v9fs_co_fstat(pdu, fidp, &stbuf);
3793     if (err < 0) {
3794         goto out;
3795     }
3796     err = pdu_marshal(pdu, offset, "b", P9_LOCK_SUCCESS);
3797     if (err < 0) {
3798         goto out;
3799     }
3800     err += offset;
3801     trace_v9fs_lock_return(pdu->tag, pdu->id, P9_LOCK_SUCCESS);
3802 out:
3803     put_fid(pdu, fidp);
3804 out_nofid:
3805     pdu_complete(pdu, err);
3806     v9fs_string_free(&flock.client_id);
3807 }
3808 
3809 /*
3810  * When a TGETLOCK request comes, always return success because all lock
3811  * handling is done by client's VFS layer.
3812  */
v9fs_getlock(void * opaque)3813 static void coroutine_fn v9fs_getlock(void *opaque)
3814 {
3815     size_t offset = 7;
3816     struct stat stbuf;
3817     V9fsFidState *fidp;
3818     V9fsGetlock glock;
3819     int32_t fid, err = 0;
3820     V9fsPDU *pdu = opaque;
3821 
3822     v9fs_string_init(&glock.client_id);
3823     err = pdu_unmarshal(pdu, offset, "dbqqds", &fid, &glock.type,
3824                         &glock.start, &glock.length, &glock.proc_id,
3825                         &glock.client_id);
3826     if (err < 0) {
3827         goto out_nofid;
3828     }
3829     trace_v9fs_getlock(pdu->tag, pdu->id, fid,
3830                        glock.type, glock.start, glock.length);
3831 
3832     fidp = get_fid(pdu, fid);
3833     if (fidp == NULL) {
3834         err = -ENOENT;
3835         goto out_nofid;
3836     }
3837     err = v9fs_co_fstat(pdu, fidp, &stbuf);
3838     if (err < 0) {
3839         goto out;
3840     }
3841     glock.type = P9_LOCK_TYPE_UNLCK;
3842     err = pdu_marshal(pdu, offset, "bqqds", glock.type,
3843                           glock.start, glock.length, glock.proc_id,
3844                           &glock.client_id);
3845     if (err < 0) {
3846         goto out;
3847     }
3848     err += offset;
3849     trace_v9fs_getlock_return(pdu->tag, pdu->id, glock.type, glock.start,
3850                               glock.length, glock.proc_id);
3851 out:
3852     put_fid(pdu, fidp);
3853 out_nofid:
3854     pdu_complete(pdu, err);
3855     v9fs_string_free(&glock.client_id);
3856 }
3857 
v9fs_mkdir(void * opaque)3858 static void coroutine_fn v9fs_mkdir(void *opaque)
3859 {
3860     V9fsPDU *pdu = opaque;
3861     size_t offset = 7;
3862     int32_t fid;
3863     struct stat stbuf;
3864     V9fsQID qid;
3865     V9fsString name;
3866     V9fsFidState *fidp;
3867     gid_t gid;
3868     int mode;
3869     int err = 0;
3870 
3871     v9fs_string_init(&name);
3872     err = pdu_unmarshal(pdu, offset, "dsdd", &fid, &name, &mode, &gid);
3873     if (err < 0) {
3874         goto out_nofid;
3875     }
3876     trace_v9fs_mkdir(pdu->tag, pdu->id, fid, name.data, mode, gid);
3877 
3878     if (name_is_illegal(name.data)) {
3879         err = -ENOENT;
3880         goto out_nofid;
3881     }
3882 
3883     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3884         err = -EEXIST;
3885         goto out_nofid;
3886     }
3887 
3888     fidp = get_fid(pdu, fid);
3889     if (fidp == NULL) {
3890         err = -ENOENT;
3891         goto out_nofid;
3892     }
3893     err = v9fs_co_mkdir(pdu, fidp, &name, mode, fidp->uid, gid, &stbuf);
3894     if (err < 0) {
3895         goto out;
3896     }
3897     err = stat_to_qid(pdu, &stbuf, &qid);
3898     if (err < 0) {
3899         goto out;
3900     }
3901     err = pdu_marshal(pdu, offset, "Q", &qid);
3902     if (err < 0) {
3903         goto out;
3904     }
3905     err += offset;
3906     trace_v9fs_mkdir_return(pdu->tag, pdu->id,
3907                             qid.type, qid.version, qid.path, err);
3908 out:
3909     put_fid(pdu, fidp);
3910 out_nofid:
3911     pdu_complete(pdu, err);
3912     v9fs_string_free(&name);
3913 }
3914 
v9fs_xattrwalk(void * opaque)3915 static void coroutine_fn v9fs_xattrwalk(void *opaque)
3916 {
3917     int64_t size;
3918     V9fsString name;
3919     ssize_t err = 0;
3920     size_t offset = 7;
3921     int32_t fid, newfid;
3922     V9fsFidState *file_fidp;
3923     V9fsFidState *xattr_fidp = NULL;
3924     V9fsPDU *pdu = opaque;
3925     V9fsState *s = pdu->s;
3926 
3927     v9fs_string_init(&name);
3928     err = pdu_unmarshal(pdu, offset, "dds", &fid, &newfid, &name);
3929     if (err < 0) {
3930         goto out_nofid;
3931     }
3932     trace_v9fs_xattrwalk(pdu->tag, pdu->id, fid, newfid, name.data);
3933 
3934     file_fidp = get_fid(pdu, fid);
3935     if (file_fidp == NULL) {
3936         err = -ENOENT;
3937         goto out_nofid;
3938     }
3939     xattr_fidp = alloc_fid(s, newfid);
3940     if (xattr_fidp == NULL) {
3941         err = -EINVAL;
3942         goto out;
3943     }
3944     v9fs_path_copy(&xattr_fidp->path, &file_fidp->path);
3945     if (!v9fs_string_size(&name)) {
3946         /*
3947          * listxattr request. Get the size first
3948          */
3949         size = v9fs_co_llistxattr(pdu, &xattr_fidp->path, NULL, 0);
3950         if (size < 0) {
3951             err = size;
3952             clunk_fid(s, xattr_fidp->fid);
3953             goto out;
3954         }
3955         /*
3956          * Read the xattr value
3957          */
3958         xattr_fidp->fs.xattr.len = size;
3959         xattr_fidp->fid_type = P9_FID_XATTR;
3960         xattr_fidp->fs.xattr.xattrwalk_fid = true;
3961         xattr_fidp->fs.xattr.value = g_malloc0(size);
3962         if (size) {
3963             err = v9fs_co_llistxattr(pdu, &xattr_fidp->path,
3964                                      xattr_fidp->fs.xattr.value,
3965                                      xattr_fidp->fs.xattr.len);
3966             if (err < 0) {
3967                 clunk_fid(s, xattr_fidp->fid);
3968                 goto out;
3969             }
3970         }
3971         err = pdu_marshal(pdu, offset, "q", size);
3972         if (err < 0) {
3973             goto out;
3974         }
3975         err += offset;
3976     } else {
3977         /*
3978          * specific xattr fid. We check for xattr
3979          * presence also collect the xattr size
3980          */
3981         size = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
3982                                  &name, NULL, 0);
3983         if (size < 0) {
3984             err = size;
3985             clunk_fid(s, xattr_fidp->fid);
3986             goto out;
3987         }
3988         /*
3989          * Read the xattr value
3990          */
3991         xattr_fidp->fs.xattr.len = size;
3992         xattr_fidp->fid_type = P9_FID_XATTR;
3993         xattr_fidp->fs.xattr.xattrwalk_fid = true;
3994         xattr_fidp->fs.xattr.value = g_malloc0(size);
3995         if (size) {
3996             err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
3997                                     &name, xattr_fidp->fs.xattr.value,
3998                                     xattr_fidp->fs.xattr.len);
3999             if (err < 0) {
4000                 clunk_fid(s, xattr_fidp->fid);
4001                 goto out;
4002             }
4003         }
4004         err = pdu_marshal(pdu, offset, "q", size);
4005         if (err < 0) {
4006             goto out;
4007         }
4008         err += offset;
4009     }
4010     trace_v9fs_xattrwalk_return(pdu->tag, pdu->id, size);
4011 out:
4012     put_fid(pdu, file_fidp);
4013     if (xattr_fidp) {
4014         put_fid(pdu, xattr_fidp);
4015     }
4016 out_nofid:
4017     pdu_complete(pdu, err);
4018     v9fs_string_free(&name);
4019 }
4020 
4021 #if defined(CONFIG_LINUX)
4022 /* Currently, only Linux has XATTR_SIZE_MAX */
4023 #define P9_XATTR_SIZE_MAX XATTR_SIZE_MAX
4024 #elif defined(CONFIG_DARWIN)
4025 /*
4026  * Darwin doesn't seem to define a maximum xattr size in its user
4027  * space header, so manually configure it across platforms as 64k.
4028  *
4029  * Having no limit at all can lead to QEMU crashing during large g_malloc()
4030  * calls. Because QEMU does not currently support macOS guests, the below
4031  * preliminary solution only works due to its being a reflection of the limit of
4032  * Linux guests.
4033  */
4034 #define P9_XATTR_SIZE_MAX 65536
4035 #else
4036 #error Missing definition for P9_XATTR_SIZE_MAX for this host system
4037 #endif
4038 
v9fs_xattrcreate(void * opaque)4039 static void coroutine_fn v9fs_xattrcreate(void *opaque)
4040 {
4041     int flags, rflags = 0;
4042     int32_t fid;
4043     uint64_t size;
4044     ssize_t err = 0;
4045     V9fsString name;
4046     size_t offset = 7;
4047     V9fsFidState *file_fidp;
4048     V9fsFidState *xattr_fidp;
4049     V9fsPDU *pdu = opaque;
4050 
4051     v9fs_string_init(&name);
4052     err = pdu_unmarshal(pdu, offset, "dsqd", &fid, &name, &size, &flags);
4053     if (err < 0) {
4054         goto out_nofid;
4055     }
4056     trace_v9fs_xattrcreate(pdu->tag, pdu->id, fid, name.data, size, flags);
4057 
4058     if (flags & ~(P9_XATTR_CREATE | P9_XATTR_REPLACE)) {
4059         err = -EINVAL;
4060         goto out_nofid;
4061     }
4062 
4063     if (flags & P9_XATTR_CREATE) {
4064         rflags |= XATTR_CREATE;
4065     }
4066 
4067     if (flags & P9_XATTR_REPLACE) {
4068         rflags |= XATTR_REPLACE;
4069     }
4070 
4071     if (size > P9_XATTR_SIZE_MAX) {
4072         err = -E2BIG;
4073         goto out_nofid;
4074     }
4075 
4076     file_fidp = get_fid(pdu, fid);
4077     if (file_fidp == NULL) {
4078         err = -EINVAL;
4079         goto out_nofid;
4080     }
4081     if (file_fidp->fid_type != P9_FID_NONE) {
4082         err = -EINVAL;
4083         goto out_put_fid;
4084     }
4085 
4086     /* Make the file fid point to xattr */
4087     xattr_fidp = file_fidp;
4088     xattr_fidp->fid_type = P9_FID_XATTR;
4089     xattr_fidp->fs.xattr.copied_len = 0;
4090     xattr_fidp->fs.xattr.xattrwalk_fid = false;
4091     xattr_fidp->fs.xattr.len = size;
4092     xattr_fidp->fs.xattr.flags = rflags;
4093     v9fs_string_init(&xattr_fidp->fs.xattr.name);
4094     v9fs_string_copy(&xattr_fidp->fs.xattr.name, &name);
4095     xattr_fidp->fs.xattr.value = g_malloc0(size);
4096     err = offset;
4097 out_put_fid:
4098     put_fid(pdu, file_fidp);
4099 out_nofid:
4100     pdu_complete(pdu, err);
4101     v9fs_string_free(&name);
4102 }
4103 
v9fs_readlink(void * opaque)4104 static void coroutine_fn v9fs_readlink(void *opaque)
4105 {
4106     V9fsPDU *pdu = opaque;
4107     size_t offset = 7;
4108     V9fsString target;
4109     int32_t fid;
4110     int err = 0;
4111     V9fsFidState *fidp;
4112 
4113     err = pdu_unmarshal(pdu, offset, "d", &fid);
4114     if (err < 0) {
4115         goto out_nofid;
4116     }
4117     trace_v9fs_readlink(pdu->tag, pdu->id, fid);
4118     fidp = get_fid(pdu, fid);
4119     if (fidp == NULL) {
4120         err = -ENOENT;
4121         goto out_nofid;
4122     }
4123 
4124     v9fs_string_init(&target);
4125     err = v9fs_co_readlink(pdu, &fidp->path, &target);
4126     if (err < 0) {
4127         goto out;
4128     }
4129     err = pdu_marshal(pdu, offset, "s", &target);
4130     if (err < 0) {
4131         v9fs_string_free(&target);
4132         goto out;
4133     }
4134     err += offset;
4135     trace_v9fs_readlink_return(pdu->tag, pdu->id, target.data);
4136     v9fs_string_free(&target);
4137 out:
4138     put_fid(pdu, fidp);
4139 out_nofid:
4140     pdu_complete(pdu, err);
4141 }
4142 
4143 static CoroutineEntry *pdu_co_handlers[] = {
4144     [P9_TREADDIR] = v9fs_readdir,
4145     [P9_TSTATFS] = v9fs_statfs,
4146     [P9_TGETATTR] = v9fs_getattr,
4147     [P9_TSETATTR] = v9fs_setattr,
4148     [P9_TXATTRWALK] = v9fs_xattrwalk,
4149     [P9_TXATTRCREATE] = v9fs_xattrcreate,
4150     [P9_TMKNOD] = v9fs_mknod,
4151     [P9_TRENAME] = v9fs_rename,
4152     [P9_TLOCK] = v9fs_lock,
4153     [P9_TGETLOCK] = v9fs_getlock,
4154     [P9_TRENAMEAT] = v9fs_renameat,
4155     [P9_TREADLINK] = v9fs_readlink,
4156     [P9_TUNLINKAT] = v9fs_unlinkat,
4157     [P9_TMKDIR] = v9fs_mkdir,
4158     [P9_TVERSION] = v9fs_version,
4159     [P9_TLOPEN] = v9fs_open,
4160     [P9_TATTACH] = v9fs_attach,
4161     [P9_TSTAT] = v9fs_stat,
4162     [P9_TWALK] = v9fs_walk,
4163     [P9_TCLUNK] = v9fs_clunk,
4164     [P9_TFSYNC] = v9fs_fsync,
4165     [P9_TOPEN] = v9fs_open,
4166     [P9_TREAD] = v9fs_read,
4167 #if 0
4168     [P9_TAUTH] = v9fs_auth,
4169 #endif
4170     [P9_TFLUSH] = v9fs_flush,
4171     [P9_TLINK] = v9fs_link,
4172     [P9_TSYMLINK] = v9fs_symlink,
4173     [P9_TCREATE] = v9fs_create,
4174     [P9_TLCREATE] = v9fs_lcreate,
4175     [P9_TWRITE] = v9fs_write,
4176     [P9_TWSTAT] = v9fs_wstat,
4177     [P9_TREMOVE] = v9fs_remove,
4178 };
4179 
v9fs_op_not_supp(void * opaque)4180 static void coroutine_fn v9fs_op_not_supp(void *opaque)
4181 {
4182     V9fsPDU *pdu = opaque;
4183     pdu_complete(pdu, -EOPNOTSUPP);
4184 }
4185 
v9fs_fs_ro(void * opaque)4186 static void coroutine_fn v9fs_fs_ro(void *opaque)
4187 {
4188     V9fsPDU *pdu = opaque;
4189     pdu_complete(pdu, -EROFS);
4190 }
4191 
is_read_only_op(V9fsPDU * pdu)4192 static inline bool is_read_only_op(V9fsPDU *pdu)
4193 {
4194     switch (pdu->id) {
4195     case P9_TREADDIR:
4196     case P9_TSTATFS:
4197     case P9_TGETATTR:
4198     case P9_TXATTRWALK:
4199     case P9_TLOCK:
4200     case P9_TGETLOCK:
4201     case P9_TREADLINK:
4202     case P9_TVERSION:
4203     case P9_TLOPEN:
4204     case P9_TATTACH:
4205     case P9_TSTAT:
4206     case P9_TWALK:
4207     case P9_TCLUNK:
4208     case P9_TFSYNC:
4209     case P9_TOPEN:
4210     case P9_TREAD:
4211     case P9_TAUTH:
4212     case P9_TFLUSH:
4213         return 1;
4214     default:
4215         return 0;
4216     }
4217 }
4218 
pdu_submit(V9fsPDU * pdu,P9MsgHeader * hdr)4219 void pdu_submit(V9fsPDU *pdu, P9MsgHeader *hdr)
4220 {
4221     Coroutine *co;
4222     CoroutineEntry *handler;
4223     V9fsState *s = pdu->s;
4224 
4225     pdu->size = le32_to_cpu(hdr->size_le);
4226     pdu->id = hdr->id;
4227     pdu->tag = le16_to_cpu(hdr->tag_le);
4228 
4229     if (pdu->id >= ARRAY_SIZE(pdu_co_handlers) ||
4230         (pdu_co_handlers[pdu->id] == NULL)) {
4231         handler = v9fs_op_not_supp;
4232     } else if (is_ro_export(&s->ctx) && !is_read_only_op(pdu)) {
4233         handler = v9fs_fs_ro;
4234     } else {
4235         handler = pdu_co_handlers[pdu->id];
4236     }
4237 
4238     qemu_co_queue_init(&pdu->complete);
4239     co = qemu_coroutine_create(handler, pdu);
4240     qemu_coroutine_enter(co);
4241 }
4242 
4243 /* Returns 0 on success, 1 on failure. */
v9fs_device_realize_common(V9fsState * s,const V9fsTransport * t,Error ** errp)4244 int v9fs_device_realize_common(V9fsState *s, const V9fsTransport *t,
4245                                Error **errp)
4246 {
4247     ERRP_GUARD();
4248     int i, len;
4249     struct stat stat;
4250     FsDriverEntry *fse;
4251     V9fsPath path;
4252     int rc = 1;
4253 
4254     assert(!s->transport);
4255     s->transport = t;
4256 
4257     /* initialize pdu allocator */
4258     QLIST_INIT(&s->free_list);
4259     QLIST_INIT(&s->active_list);
4260     for (i = 0; i < MAX_REQ; i++) {
4261         QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next);
4262         s->pdus[i].s = s;
4263         s->pdus[i].idx = i;
4264     }
4265 
4266     v9fs_path_init(&path);
4267 
4268     fse = get_fsdev_fsentry(s->fsconf.fsdev_id);
4269 
4270     if (!fse) {
4271         /* We don't have a fsdev identified by fsdev_id */
4272         error_setg(errp, "9pfs device couldn't find fsdev with the "
4273                    "id = %s",
4274                    s->fsconf.fsdev_id ? s->fsconf.fsdev_id : "NULL");
4275         goto out;
4276     }
4277 
4278     if (!s->fsconf.tag) {
4279         /* we haven't specified a mount_tag */
4280         error_setg(errp, "fsdev with id %s needs mount_tag arguments",
4281                    s->fsconf.fsdev_id);
4282         goto out;
4283     }
4284 
4285     s->ctx.export_flags = fse->export_flags;
4286     s->ctx.fs_root = g_strdup(fse->path);
4287     s->ctx.exops.get_st_gen = NULL;
4288     len = strlen(s->fsconf.tag);
4289     if (len > MAX_TAG_LEN - 1) {
4290         error_setg(errp, "mount tag '%s' (%d bytes) is longer than "
4291                    "maximum (%d bytes)", s->fsconf.tag, len, MAX_TAG_LEN - 1);
4292         goto out;
4293     }
4294 
4295     s->tag = g_strdup(s->fsconf.tag);
4296     s->ctx.uid = -1;
4297 
4298     s->ops = fse->ops;
4299 
4300     s->ctx.fmode = fse->fmode;
4301     s->ctx.dmode = fse->dmode;
4302 
4303     s->fids = g_hash_table_new(NULL, NULL);
4304     qemu_co_rwlock_init(&s->rename_lock);
4305 
4306     if (s->ops->init(&s->ctx, errp) < 0) {
4307         error_prepend(errp, "cannot initialize fsdev '%s': ",
4308                       s->fsconf.fsdev_id);
4309         goto out;
4310     }
4311 
4312     /*
4313      * Check details of export path, We need to use fs driver
4314      * call back to do that. Since we are in the init path, we don't
4315      * use co-routines here.
4316      */
4317     if (s->ops->name_to_path(&s->ctx, NULL, "/", &path) < 0) {
4318         error_setg(errp,
4319                    "error in converting name to path %s", strerror(errno));
4320         goto out;
4321     }
4322     if (s->ops->lstat(&s->ctx, &path, &stat)) {
4323         error_setg(errp, "share path %s does not exist", fse->path);
4324         goto out;
4325     } else if (!S_ISDIR(stat.st_mode)) {
4326         error_setg(errp, "share path %s is not a directory", fse->path);
4327         goto out;
4328     }
4329 
4330     s->dev_id = stat.st_dev;
4331 
4332     /* init inode remapping : */
4333     /* hash table for variable length inode suffixes */
4334     qpd_table_init(&s->qpd_table);
4335     /* hash table for slow/full inode remapping (most users won't need it) */
4336     qpf_table_init(&s->qpf_table);
4337     /* hash table for quick inode remapping */
4338     qpp_table_init(&s->qpp_table);
4339     s->qp_ndevices = 0;
4340     s->qp_affix_next = 1; /* reserve 0 to detect overflow */
4341     s->qp_fullpath_next = 1;
4342 
4343     s->ctx.fst = &fse->fst;
4344     fsdev_throttle_init(s->ctx.fst);
4345 
4346     s->reclaiming = false;
4347 
4348     rc = 0;
4349 out:
4350     if (rc) {
4351         v9fs_device_unrealize_common(s);
4352     }
4353     v9fs_path_free(&path);
4354     return rc;
4355 }
4356 
v9fs_device_unrealize_common(V9fsState * s)4357 void v9fs_device_unrealize_common(V9fsState *s)
4358 {
4359     if (s->ops && s->ops->cleanup) {
4360         s->ops->cleanup(&s->ctx);
4361     }
4362     if (s->ctx.fst) {
4363         fsdev_throttle_cleanup(s->ctx.fst);
4364     }
4365     if (s->fids) {
4366         g_hash_table_destroy(s->fids);
4367         s->fids = NULL;
4368     }
4369     g_free(s->tag);
4370     qp_table_destroy(&s->qpd_table);
4371     qp_table_destroy(&s->qpp_table);
4372     qp_table_destroy(&s->qpf_table);
4373     g_free(s->ctx.fs_root);
4374 }
4375 
4376 typedef struct VirtfsCoResetData {
4377     V9fsPDU pdu;
4378     bool done;
4379 } VirtfsCoResetData;
4380 
virtfs_co_reset(void * opaque)4381 static void coroutine_fn virtfs_co_reset(void *opaque)
4382 {
4383     VirtfsCoResetData *data = opaque;
4384 
4385     virtfs_reset(&data->pdu);
4386     data->done = true;
4387 }
4388 
v9fs_reset(V9fsState * s)4389 void v9fs_reset(V9fsState *s)
4390 {
4391     VirtfsCoResetData data = { .pdu = { .s = s }, .done = false };
4392     Coroutine *co;
4393 
4394     while (!QLIST_EMPTY(&s->active_list)) {
4395         aio_poll(qemu_get_aio_context(), true);
4396     }
4397 
4398     co = qemu_coroutine_create(virtfs_co_reset, &data);
4399     qemu_coroutine_enter(co);
4400 
4401     while (!data.done) {
4402         aio_poll(qemu_get_aio_context(), true);
4403     }
4404 }
4405 
v9fs_set_fd_limit(void)4406 static void __attribute__((__constructor__)) v9fs_set_fd_limit(void)
4407 {
4408     struct rlimit rlim;
4409     if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) {
4410         error_report("Failed to get the resource limit");
4411         exit(1);
4412     }
4413     open_fd_hw = rlim.rlim_cur - MIN(400, rlim.rlim_cur / 3);
4414     open_fd_rc = rlim.rlim_cur / 2;
4415 }
4416