1 /* 2 * 9p backend 3 * 4 * Copyright IBM, Corp. 2011 5 * 6 * Authors: 7 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 /* 15 * Not so fast! You might want to read the 9p developer docs first: 16 * https://wiki.qemu.org/Documentation/9p 17 */ 18 19 #include "qemu/osdep.h" 20 #include "fsdev/qemu-fsdev.h" 21 #include "qemu/thread.h" 22 #include "qemu/coroutine.h" 23 #include "qemu/main-loop.h" 24 #include "coth.h" 25 26 /* 27 * Intended to be called from bottom-half (e.g. background I/O thread) 28 * context. 29 */ 30 static int do_readdir(V9fsPDU *pdu, V9fsFidState *fidp, struct dirent **dent) 31 { 32 int err = 0; 33 V9fsState *s = pdu->s; 34 struct dirent *entry; 35 36 errno = 0; 37 entry = s->ops->readdir(&s->ctx, &fidp->fs); 38 if (!entry && errno) { 39 *dent = NULL; 40 err = -errno; 41 } else { 42 *dent = entry; 43 } 44 return err; 45 } 46 47 /* 48 * TODO: This will be removed for performance reasons. 49 * Use v9fs_co_readdir_many() instead. 50 */ 51 int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp, 52 struct dirent **dent) 53 { 54 int err; 55 56 if (v9fs_request_cancelled(pdu)) { 57 return -EINTR; 58 } 59 v9fs_co_run_in_worker({ 60 err = do_readdir(pdu, fidp, dent); 61 }); 62 return err; 63 } 64 65 /* 66 * This is solely executed on a background IO thread. 67 * 68 * See v9fs_co_readdir_many() (as its only user) below for details. 69 */ 70 static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, 71 struct V9fsDirEnt **entries, off_t offset, 72 int32_t maxsize, bool dostat) 73 { 74 V9fsState *s = pdu->s; 75 V9fsString name; 76 int len, err = 0; 77 int32_t size = 0; 78 off_t saved_dir_pos; 79 struct dirent *dent; 80 struct V9fsDirEnt *e = NULL; 81 V9fsPath path; 82 struct stat stbuf; 83 84 *entries = NULL; 85 v9fs_path_init(&path); 86 87 /* 88 * TODO: Here should be a warn_report_once() if lock failed. 89 * 90 * With a good 9p client we should not get into concurrency here, 91 * because a good client would not use the same fid for concurrent 92 * requests. We do the lock here for safety reasons though. However 93 * the client would then suffer performance issues, so better log that 94 * issue here. 95 */ 96 v9fs_readdir_lock(&fidp->fs.dir); 97 98 /* seek directory to requested initial position */ 99 if (offset == 0) { 100 s->ops->rewinddir(&s->ctx, &fidp->fs); 101 } else { 102 s->ops->seekdir(&s->ctx, &fidp->fs, offset); 103 } 104 105 /* save the directory position */ 106 saved_dir_pos = s->ops->telldir(&s->ctx, &fidp->fs); 107 if (saved_dir_pos < 0) { 108 err = saved_dir_pos; 109 goto out; 110 } 111 112 while (true) { 113 /* interrupt loop if request was cancelled by a Tflush request */ 114 if (v9fs_request_cancelled(pdu)) { 115 err = -EINTR; 116 break; 117 } 118 119 /* get directory entry from fs driver */ 120 err = do_readdir(pdu, fidp, &dent); 121 if (err || !dent) { 122 break; 123 } 124 125 /* 126 * stop this loop as soon as it would exceed the allowed maximum 127 * response message size for the directory entries collected so far, 128 * because anything beyond that size would need to be discarded by 129 * 9p controller (main thread / top half) anyway 130 */ 131 v9fs_string_init(&name); 132 v9fs_string_sprintf(&name, "%s", dent->d_name); 133 len = v9fs_readdir_response_size(&name); 134 v9fs_string_free(&name); 135 if (size + len > maxsize) { 136 /* this is not an error case actually */ 137 break; 138 } 139 140 /* append next node to result chain */ 141 if (!e) { 142 *entries = e = g_malloc0(sizeof(V9fsDirEnt)); 143 } else { 144 e = e->next = g_malloc0(sizeof(V9fsDirEnt)); 145 } 146 e->dent = qemu_dirent_dup(dent); 147 148 /* perform a full stat() for directory entry if requested by caller */ 149 if (dostat) { 150 err = s->ops->name_to_path( 151 &s->ctx, &fidp->path, dent->d_name, &path 152 ); 153 if (err < 0) { 154 err = -errno; 155 break; 156 } 157 158 err = s->ops->lstat(&s->ctx, &path, &stbuf); 159 if (err < 0) { 160 err = -errno; 161 break; 162 } 163 164 e->st = g_malloc0(sizeof(struct stat)); 165 memcpy(e->st, &stbuf, sizeof(struct stat)); 166 } 167 168 size += len; 169 saved_dir_pos = dent->d_off; 170 } 171 172 /* restore (last) saved position */ 173 s->ops->seekdir(&s->ctx, &fidp->fs, saved_dir_pos); 174 175 out: 176 v9fs_readdir_unlock(&fidp->fs.dir); 177 v9fs_path_free(&path); 178 if (err < 0) { 179 return err; 180 } 181 return size; 182 } 183 184 /** 185 * @brief Reads multiple directory entries in one rush. 186 * 187 * Retrieves the requested (max. amount of) directory entries from the fs 188 * driver. This function must only be called by the main IO thread (top half). 189 * Internally this function call will be dispatched to a background IO thread 190 * (bottom half) where it is eventually executed by the fs driver. 191 * 192 * @discussion Acquiring multiple directory entries in one rush from the fs 193 * driver, instead of retrieving each directory entry individually, is very 194 * beneficial from performance point of view. Because for every fs driver 195 * request latency is added, which in practice could lead to overall 196 * latencies of several hundred ms for reading all entries (of just a single 197 * directory) if every directory entry was individually requested from fs 198 * driver. 199 * 200 * @note You must @b ALWAYS call @c v9fs_free_dirents(entries) after calling 201 * v9fs_co_readdir_many(), both on success and on error cases of this 202 * function, to avoid memory leaks once @p entries are no longer needed. 203 * 204 * @param pdu - the causing 9p (T_readdir) client request 205 * @param fidp - already opened directory where readdir shall be performed on 206 * @param entries - output for directory entries (must not be NULL) 207 * @param offset - initial position inside the directory the function shall 208 * seek to before retrieving the directory entries 209 * @param maxsize - maximum result message body size (in bytes) 210 * @param dostat - whether a stat() should be performed and returned for 211 * each directory entry 212 * @returns resulting response message body size (in bytes) on success, 213 * negative error code otherwise 214 */ 215 int coroutine_fn v9fs_co_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, 216 struct V9fsDirEnt **entries, 217 off_t offset, int32_t maxsize, 218 bool dostat) 219 { 220 int err = 0; 221 222 if (v9fs_request_cancelled(pdu)) { 223 return -EINTR; 224 } 225 v9fs_co_run_in_worker({ 226 err = do_readdir_many(pdu, fidp, entries, offset, maxsize, dostat); 227 }); 228 return err; 229 } 230 231 off_t v9fs_co_telldir(V9fsPDU *pdu, V9fsFidState *fidp) 232 { 233 off_t err; 234 V9fsState *s = pdu->s; 235 236 if (v9fs_request_cancelled(pdu)) { 237 return -EINTR; 238 } 239 v9fs_co_run_in_worker( 240 { 241 err = s->ops->telldir(&s->ctx, &fidp->fs); 242 if (err < 0) { 243 err = -errno; 244 } 245 }); 246 return err; 247 } 248 249 void coroutine_fn v9fs_co_seekdir(V9fsPDU *pdu, V9fsFidState *fidp, 250 off_t offset) 251 { 252 V9fsState *s = pdu->s; 253 if (v9fs_request_cancelled(pdu)) { 254 return; 255 } 256 v9fs_co_run_in_worker( 257 { 258 s->ops->seekdir(&s->ctx, &fidp->fs, offset); 259 }); 260 } 261 262 void coroutine_fn v9fs_co_rewinddir(V9fsPDU *pdu, V9fsFidState *fidp) 263 { 264 V9fsState *s = pdu->s; 265 if (v9fs_request_cancelled(pdu)) { 266 return; 267 } 268 v9fs_co_run_in_worker( 269 { 270 s->ops->rewinddir(&s->ctx, &fidp->fs); 271 }); 272 } 273 274 int coroutine_fn v9fs_co_mkdir(V9fsPDU *pdu, V9fsFidState *fidp, 275 V9fsString *name, mode_t mode, uid_t uid, 276 gid_t gid, struct stat *stbuf) 277 { 278 int err; 279 FsCred cred; 280 V9fsPath path; 281 V9fsState *s = pdu->s; 282 283 if (v9fs_request_cancelled(pdu)) { 284 return -EINTR; 285 } 286 cred_init(&cred); 287 cred.fc_mode = mode; 288 cred.fc_uid = uid; 289 cred.fc_gid = gid; 290 v9fs_path_read_lock(s); 291 v9fs_co_run_in_worker( 292 { 293 err = s->ops->mkdir(&s->ctx, &fidp->path, name->data, &cred); 294 if (err < 0) { 295 err = -errno; 296 } else { 297 v9fs_path_init(&path); 298 err = v9fs_name_to_path(s, &fidp->path, name->data, &path); 299 if (!err) { 300 err = s->ops->lstat(&s->ctx, &path, stbuf); 301 if (err < 0) { 302 err = -errno; 303 } 304 } 305 v9fs_path_free(&path); 306 } 307 }); 308 v9fs_path_unlock(s); 309 return err; 310 } 311 312 int coroutine_fn v9fs_co_opendir(V9fsPDU *pdu, V9fsFidState *fidp) 313 { 314 int err; 315 V9fsState *s = pdu->s; 316 317 if (v9fs_request_cancelled(pdu)) { 318 return -EINTR; 319 } 320 v9fs_path_read_lock(s); 321 v9fs_co_run_in_worker( 322 { 323 err = s->ops->opendir(&s->ctx, &fidp->path, &fidp->fs); 324 if (err < 0) { 325 err = -errno; 326 } else { 327 err = 0; 328 } 329 }); 330 v9fs_path_unlock(s); 331 if (!err) { 332 total_open_fd++; 333 if (total_open_fd > open_fd_hw) { 334 v9fs_reclaim_fd(pdu); 335 } 336 } 337 return err; 338 } 339 340 int coroutine_fn v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs) 341 { 342 int err; 343 V9fsState *s = pdu->s; 344 345 if (v9fs_request_cancelled(pdu)) { 346 return -EINTR; 347 } 348 v9fs_co_run_in_worker( 349 { 350 err = s->ops->closedir(&s->ctx, fs); 351 if (err < 0) { 352 err = -errno; 353 } 354 }); 355 if (!err) { 356 total_open_fd--; 357 } 358 return err; 359 } 360