1 /* 2 * 9p backend 3 * 4 * Copyright IBM, Corp. 2011 5 * 6 * Authors: 7 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 /* 15 * Not so fast! You might want to read the 9p developer docs first: 16 * https://wiki.qemu.org/Documentation/9p 17 */ 18 19 #include "qemu/osdep.h" 20 #include "fsdev/qemu-fsdev.h" 21 #include "qemu/thread.h" 22 #include "qemu/coroutine.h" 23 #include "qemu/main-loop.h" 24 #include "coth.h" 25 #include "9p-xattr.h" 26 #include "9p-util.h" 27 28 /* 29 * Intended to be called from bottom-half (e.g. background I/O thread) 30 * context. 31 */ 32 static int do_readdir(V9fsPDU *pdu, V9fsFidState *fidp, struct dirent **dent) 33 { 34 int err = 0; 35 V9fsState *s = pdu->s; 36 struct dirent *entry; 37 38 errno = 0; 39 entry = s->ops->readdir(&s->ctx, &fidp->fs); 40 if (!entry && errno) { 41 *dent = NULL; 42 err = -errno; 43 } else { 44 *dent = entry; 45 } 46 return err; 47 } 48 49 /* 50 * TODO: This will be removed for performance reasons. 51 * Use v9fs_co_readdir_many() instead. 52 */ 53 int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp, 54 struct dirent **dent) 55 { 56 int err; 57 58 if (v9fs_request_cancelled(pdu)) { 59 return -EINTR; 60 } 61 v9fs_co_run_in_worker({ 62 err = do_readdir(pdu, fidp, dent); 63 }); 64 return err; 65 } 66 67 /* 68 * This is solely executed on a background IO thread. 69 * 70 * See v9fs_co_readdir_many() (as its only user) below for details. 71 */ 72 static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, 73 struct V9fsDirEnt **entries, off_t offset, 74 int32_t maxsize, bool dostat) 75 { 76 V9fsState *s = pdu->s; 77 V9fsString name; 78 int len, err = 0; 79 int32_t size = 0; 80 off_t saved_dir_pos; 81 struct dirent *dent; 82 struct V9fsDirEnt *e = NULL; 83 V9fsPath path; 84 struct stat stbuf; 85 86 *entries = NULL; 87 v9fs_path_init(&path); 88 89 /* 90 * TODO: Here should be a warn_report_once() if lock failed. 91 * 92 * With a good 9p client we should not get into concurrency here, 93 * because a good client would not use the same fid for concurrent 94 * requests. We do the lock here for safety reasons though. However 95 * the client would then suffer performance issues, so better log that 96 * issue here. 97 */ 98 v9fs_readdir_lock(&fidp->fs.dir); 99 100 /* seek directory to requested initial position */ 101 if (offset == 0) { 102 s->ops->rewinddir(&s->ctx, &fidp->fs); 103 } else { 104 s->ops->seekdir(&s->ctx, &fidp->fs, offset); 105 } 106 107 /* save the directory position */ 108 saved_dir_pos = s->ops->telldir(&s->ctx, &fidp->fs); 109 if (saved_dir_pos < 0) { 110 err = saved_dir_pos; 111 goto out; 112 } 113 114 while (true) { 115 /* interrupt loop if request was cancelled by a Tflush request */ 116 if (v9fs_request_cancelled(pdu)) { 117 err = -EINTR; 118 break; 119 } 120 121 /* get directory entry from fs driver */ 122 err = do_readdir(pdu, fidp, &dent); 123 if (err || !dent) { 124 break; 125 } 126 127 /* 128 * stop this loop as soon as it would exceed the allowed maximum 129 * response message size for the directory entries collected so far, 130 * because anything beyond that size would need to be discarded by 131 * 9p controller (main thread / top half) anyway 132 */ 133 v9fs_string_init(&name); 134 v9fs_string_sprintf(&name, "%s", dent->d_name); 135 len = v9fs_readdir_response_size(&name); 136 v9fs_string_free(&name); 137 if (size + len > maxsize) { 138 /* this is not an error case actually */ 139 break; 140 } 141 142 /* append next node to result chain */ 143 if (!e) { 144 *entries = e = g_new0(V9fsDirEnt, 1); 145 } else { 146 e = e->next = g_new0(V9fsDirEnt, 1); 147 } 148 e->dent = qemu_dirent_dup(dent); 149 150 /* perform a full stat() for directory entry if requested by caller */ 151 if (dostat) { 152 err = s->ops->name_to_path( 153 &s->ctx, &fidp->path, dent->d_name, &path 154 ); 155 if (err < 0) { 156 err = -errno; 157 break; 158 } 159 160 err = s->ops->lstat(&s->ctx, &path, &stbuf); 161 if (err < 0) { 162 err = -errno; 163 break; 164 } 165 166 e->st = g_new0(struct stat, 1); 167 memcpy(e->st, &stbuf, sizeof(struct stat)); 168 } 169 170 size += len; 171 saved_dir_pos = qemu_dirent_off(dent); 172 } 173 174 /* restore (last) saved position */ 175 s->ops->seekdir(&s->ctx, &fidp->fs, saved_dir_pos); 176 177 out: 178 v9fs_readdir_unlock(&fidp->fs.dir); 179 v9fs_path_free(&path); 180 if (err < 0) { 181 return err; 182 } 183 return size; 184 } 185 186 /** 187 * v9fs_co_readdir_many() - Reads multiple directory entries in one rush. 188 * 189 * @pdu: the causing 9p (T_readdir) client request 190 * @fidp: already opened directory where readdir shall be performed on 191 * @entries: output for directory entries (must not be NULL) 192 * @offset: initial position inside the directory the function shall 193 * seek to before retrieving the directory entries 194 * @maxsize: maximum result message body size (in bytes) 195 * @dostat: whether a stat() should be performed and returned for 196 * each directory entry 197 * Return: resulting response message body size (in bytes) on success, 198 * negative error code otherwise 199 * 200 * Retrieves the requested (max. amount of) directory entries from the fs 201 * driver. This function must only be called by the main IO thread (top half). 202 * Internally this function call will be dispatched to a background IO thread 203 * (bottom half) where it is eventually executed by the fs driver. 204 * 205 * Acquiring multiple directory entries in one rush from the fs 206 * driver, instead of retrieving each directory entry individually, is very 207 * beneficial from performance point of view. Because for every fs driver 208 * request latency is added, which in practice could lead to overall 209 * latencies of several hundred ms for reading all entries (of just a single 210 * directory) if every directory entry was individually requested from fs 211 * driver. 212 * 213 * NOTE: You must ALWAYS call v9fs_free_dirents(entries) after calling 214 * v9fs_co_readdir_many(), both on success and on error cases of this 215 * function, to avoid memory leaks once @entries are no longer needed. 216 */ 217 int coroutine_fn v9fs_co_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, 218 struct V9fsDirEnt **entries, 219 off_t offset, int32_t maxsize, 220 bool dostat) 221 { 222 int err = 0; 223 224 if (v9fs_request_cancelled(pdu)) { 225 return -EINTR; 226 } 227 v9fs_co_run_in_worker({ 228 err = do_readdir_many(pdu, fidp, entries, offset, maxsize, dostat); 229 }); 230 return err; 231 } 232 233 off_t v9fs_co_telldir(V9fsPDU *pdu, V9fsFidState *fidp) 234 { 235 off_t err; 236 V9fsState *s = pdu->s; 237 238 if (v9fs_request_cancelled(pdu)) { 239 return -EINTR; 240 } 241 v9fs_co_run_in_worker( 242 { 243 err = s->ops->telldir(&s->ctx, &fidp->fs); 244 if (err < 0) { 245 err = -errno; 246 } 247 }); 248 return err; 249 } 250 251 void coroutine_fn v9fs_co_seekdir(V9fsPDU *pdu, V9fsFidState *fidp, 252 off_t offset) 253 { 254 V9fsState *s = pdu->s; 255 if (v9fs_request_cancelled(pdu)) { 256 return; 257 } 258 v9fs_co_run_in_worker( 259 { 260 s->ops->seekdir(&s->ctx, &fidp->fs, offset); 261 }); 262 } 263 264 void coroutine_fn v9fs_co_rewinddir(V9fsPDU *pdu, V9fsFidState *fidp) 265 { 266 V9fsState *s = pdu->s; 267 if (v9fs_request_cancelled(pdu)) { 268 return; 269 } 270 v9fs_co_run_in_worker( 271 { 272 s->ops->rewinddir(&s->ctx, &fidp->fs); 273 }); 274 } 275 276 int coroutine_fn v9fs_co_mkdir(V9fsPDU *pdu, V9fsFidState *fidp, 277 V9fsString *name, mode_t mode, uid_t uid, 278 gid_t gid, struct stat *stbuf) 279 { 280 int err; 281 FsCred cred; 282 V9fsPath path; 283 V9fsState *s = pdu->s; 284 285 if (v9fs_request_cancelled(pdu)) { 286 return -EINTR; 287 } 288 cred_init(&cred); 289 cred.fc_mode = mode; 290 cred.fc_uid = uid; 291 cred.fc_gid = gid; 292 v9fs_path_read_lock(s); 293 v9fs_co_run_in_worker( 294 { 295 err = s->ops->mkdir(&s->ctx, &fidp->path, name->data, &cred); 296 if (err < 0) { 297 err = -errno; 298 } else { 299 v9fs_path_init(&path); 300 err = v9fs_name_to_path(s, &fidp->path, name->data, &path); 301 if (!err) { 302 err = s->ops->lstat(&s->ctx, &path, stbuf); 303 if (err < 0) { 304 err = -errno; 305 } 306 } 307 v9fs_path_free(&path); 308 } 309 }); 310 v9fs_path_unlock(s); 311 return err; 312 } 313 314 int coroutine_fn v9fs_co_opendir(V9fsPDU *pdu, V9fsFidState *fidp) 315 { 316 int err; 317 V9fsState *s = pdu->s; 318 319 if (v9fs_request_cancelled(pdu)) { 320 return -EINTR; 321 } 322 v9fs_path_read_lock(s); 323 v9fs_co_run_in_worker( 324 { 325 err = s->ops->opendir(&s->ctx, &fidp->path, &fidp->fs); 326 if (err < 0) { 327 err = -errno; 328 } else { 329 err = 0; 330 } 331 }); 332 v9fs_path_unlock(s); 333 if (!err) { 334 total_open_fd++; 335 if (total_open_fd > open_fd_hw) { 336 v9fs_reclaim_fd(pdu); 337 } 338 } 339 return err; 340 } 341 342 int coroutine_fn v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs) 343 { 344 int err; 345 V9fsState *s = pdu->s; 346 347 if (v9fs_request_cancelled(pdu)) { 348 return -EINTR; 349 } 350 v9fs_co_run_in_worker( 351 { 352 err = s->ops->closedir(&s->ctx, fs); 353 if (err < 0) { 354 err = -errno; 355 } 356 }); 357 if (!err) { 358 total_open_fd--; 359 } 360 return err; 361 } 362