1 /* 2 * 9p backend 3 * 4 * Copyright IBM, Corp. 2011 5 * 6 * Authors: 7 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 /* 15 * Not so fast! You might want to read the 9p developer docs first: 16 * https://wiki.qemu.org/Documentation/9p 17 */ 18 19 #include "qemu/osdep.h" 20 #include "fsdev/qemu-fsdev.h" 21 #include "qemu/thread.h" 22 #include "qemu/main-loop.h" 23 #include "coth.h" 24 #include "9p-xattr.h" 25 #include "9p-util.h" 26 27 /* 28 * Intended to be called from bottom-half (e.g. background I/O thread) 29 * context. 30 */ 31 static int do_readdir(V9fsPDU *pdu, V9fsFidState *fidp, struct dirent **dent) 32 { 33 int err = 0; 34 V9fsState *s = pdu->s; 35 struct dirent *entry; 36 37 errno = 0; 38 entry = s->ops->readdir(&s->ctx, &fidp->fs); 39 if (!entry && errno) { 40 *dent = NULL; 41 err = -errno; 42 } else { 43 *dent = entry; 44 } 45 return err; 46 } 47 48 /* 49 * TODO: This will be removed for performance reasons. 50 * Use v9fs_co_readdir_many() instead. 51 */ 52 int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp, 53 struct dirent **dent) 54 { 55 int err; 56 57 if (v9fs_request_cancelled(pdu)) { 58 return -EINTR; 59 } 60 v9fs_co_run_in_worker({ 61 err = do_readdir(pdu, fidp, dent); 62 }); 63 return err; 64 } 65 66 /* 67 * This is solely executed on a background IO thread. 68 * 69 * See v9fs_co_readdir_many() (as its only user) below for details. 70 */ 71 static int coroutine_fn 72 do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, struct V9fsDirEnt **entries, 73 off_t offset, int32_t maxsize, bool dostat) 74 { 75 V9fsState *s = pdu->s; 76 V9fsString name; 77 int len, err = 0; 78 int32_t size = 0; 79 off_t saved_dir_pos; 80 struct dirent *dent; 81 struct V9fsDirEnt *e = NULL; 82 V9fsPath path; 83 struct stat stbuf; 84 85 *entries = NULL; 86 v9fs_path_init(&path); 87 88 /* 89 * TODO: Here should be a warn_report_once() if lock failed. 90 * 91 * With a good 9p client we should not get into concurrency here, 92 * because a good client would not use the same fid for concurrent 93 * requests. We do the lock here for safety reasons though. However 94 * the client would then suffer performance issues, so better log that 95 * issue here. 96 */ 97 v9fs_readdir_lock(&fidp->fs.dir); 98 99 /* seek directory to requested initial position */ 100 if (offset == 0) { 101 s->ops->rewinddir(&s->ctx, &fidp->fs); 102 } else { 103 s->ops->seekdir(&s->ctx, &fidp->fs, offset); 104 } 105 106 /* save the directory position */ 107 saved_dir_pos = s->ops->telldir(&s->ctx, &fidp->fs); 108 if (saved_dir_pos < 0) { 109 err = saved_dir_pos; 110 goto out; 111 } 112 113 while (true) { 114 /* interrupt loop if request was cancelled by a Tflush request */ 115 if (v9fs_request_cancelled(pdu)) { 116 err = -EINTR; 117 break; 118 } 119 120 /* get directory entry from fs driver */ 121 err = do_readdir(pdu, fidp, &dent); 122 if (err || !dent) { 123 break; 124 } 125 126 /* 127 * stop this loop as soon as it would exceed the allowed maximum 128 * response message size for the directory entries collected so far, 129 * because anything beyond that size would need to be discarded by 130 * 9p controller (main thread / top half) anyway 131 */ 132 v9fs_string_init(&name); 133 v9fs_string_sprintf(&name, "%s", dent->d_name); 134 len = v9fs_readdir_response_size(&name); 135 v9fs_string_free(&name); 136 if (size + len > maxsize) { 137 /* this is not an error case actually */ 138 break; 139 } 140 141 /* append next node to result chain */ 142 if (!e) { 143 *entries = e = g_new0(V9fsDirEnt, 1); 144 } else { 145 e = e->next = g_new0(V9fsDirEnt, 1); 146 } 147 e->dent = qemu_dirent_dup(dent); 148 149 /* perform a full stat() for directory entry if requested by caller */ 150 if (dostat) { 151 err = s->ops->name_to_path( 152 &s->ctx, &fidp->path, dent->d_name, &path 153 ); 154 if (err < 0) { 155 err = -errno; 156 break; 157 } 158 159 err = s->ops->lstat(&s->ctx, &path, &stbuf); 160 if (err < 0) { 161 err = -errno; 162 break; 163 } 164 165 e->st = g_new0(struct stat, 1); 166 memcpy(e->st, &stbuf, sizeof(struct stat)); 167 } 168 169 size += len; 170 saved_dir_pos = qemu_dirent_off(dent); 171 } 172 173 /* restore (last) saved position */ 174 s->ops->seekdir(&s->ctx, &fidp->fs, saved_dir_pos); 175 176 out: 177 v9fs_readdir_unlock(&fidp->fs.dir); 178 v9fs_path_free(&path); 179 if (err < 0) { 180 return err; 181 } 182 return size; 183 } 184 185 /** 186 * v9fs_co_readdir_many() - Reads multiple directory entries in one rush. 187 * 188 * @pdu: the causing 9p (T_readdir) client request 189 * @fidp: already opened directory where readdir shall be performed on 190 * @entries: output for directory entries (must not be NULL) 191 * @offset: initial position inside the directory the function shall 192 * seek to before retrieving the directory entries 193 * @maxsize: maximum result message body size (in bytes) 194 * @dostat: whether a stat() should be performed and returned for 195 * each directory entry 196 * Return: resulting response message body size (in bytes) on success, 197 * negative error code otherwise 198 * 199 * Retrieves the requested (max. amount of) directory entries from the fs 200 * driver. This function must only be called by the main IO thread (top half). 201 * Internally this function call will be dispatched to a background IO thread 202 * (bottom half) where it is eventually executed by the fs driver. 203 * 204 * Acquiring multiple directory entries in one rush from the fs 205 * driver, instead of retrieving each directory entry individually, is very 206 * beneficial from performance point of view. Because for every fs driver 207 * request latency is added, which in practice could lead to overall 208 * latencies of several hundred ms for reading all entries (of just a single 209 * directory) if every directory entry was individually requested from fs 210 * driver. 211 * 212 * NOTE: You must ALWAYS call v9fs_free_dirents(entries) after calling 213 * v9fs_co_readdir_many(), both on success and on error cases of this 214 * function, to avoid memory leaks once @entries are no longer needed. 215 */ 216 int coroutine_fn v9fs_co_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, 217 struct V9fsDirEnt **entries, 218 off_t offset, int32_t maxsize, 219 bool dostat) 220 { 221 int err = 0; 222 223 if (v9fs_request_cancelled(pdu)) { 224 return -EINTR; 225 } 226 v9fs_co_run_in_worker({ 227 err = do_readdir_many(pdu, fidp, entries, offset, maxsize, dostat); 228 }); 229 return err; 230 } 231 232 off_t v9fs_co_telldir(V9fsPDU *pdu, V9fsFidState *fidp) 233 { 234 off_t err; 235 V9fsState *s = pdu->s; 236 237 if (v9fs_request_cancelled(pdu)) { 238 return -EINTR; 239 } 240 v9fs_co_run_in_worker( 241 { 242 err = s->ops->telldir(&s->ctx, &fidp->fs); 243 if (err < 0) { 244 err = -errno; 245 } 246 }); 247 return err; 248 } 249 250 void coroutine_fn v9fs_co_seekdir(V9fsPDU *pdu, V9fsFidState *fidp, 251 off_t offset) 252 { 253 V9fsState *s = pdu->s; 254 if (v9fs_request_cancelled(pdu)) { 255 return; 256 } 257 v9fs_co_run_in_worker( 258 { 259 s->ops->seekdir(&s->ctx, &fidp->fs, offset); 260 }); 261 } 262 263 void coroutine_fn v9fs_co_rewinddir(V9fsPDU *pdu, V9fsFidState *fidp) 264 { 265 V9fsState *s = pdu->s; 266 if (v9fs_request_cancelled(pdu)) { 267 return; 268 } 269 v9fs_co_run_in_worker( 270 { 271 s->ops->rewinddir(&s->ctx, &fidp->fs); 272 }); 273 } 274 275 int coroutine_fn v9fs_co_mkdir(V9fsPDU *pdu, V9fsFidState *fidp, 276 V9fsString *name, mode_t mode, uid_t uid, 277 gid_t gid, struct stat *stbuf) 278 { 279 int err; 280 FsCred cred; 281 V9fsPath path; 282 V9fsState *s = pdu->s; 283 284 if (v9fs_request_cancelled(pdu)) { 285 return -EINTR; 286 } 287 cred_init(&cred); 288 cred.fc_mode = mode; 289 cred.fc_uid = uid; 290 cred.fc_gid = gid; 291 v9fs_path_read_lock(s); 292 v9fs_co_run_in_worker( 293 { 294 err = s->ops->mkdir(&s->ctx, &fidp->path, name->data, &cred); 295 if (err < 0) { 296 err = -errno; 297 } else { 298 v9fs_path_init(&path); 299 err = v9fs_name_to_path(s, &fidp->path, name->data, &path); 300 if (!err) { 301 err = s->ops->lstat(&s->ctx, &path, stbuf); 302 if (err < 0) { 303 err = -errno; 304 } 305 } 306 v9fs_path_free(&path); 307 } 308 }); 309 v9fs_path_unlock(s); 310 return err; 311 } 312 313 int coroutine_fn v9fs_co_opendir(V9fsPDU *pdu, V9fsFidState *fidp) 314 { 315 int err; 316 V9fsState *s = pdu->s; 317 318 if (v9fs_request_cancelled(pdu)) { 319 return -EINTR; 320 } 321 v9fs_path_read_lock(s); 322 v9fs_co_run_in_worker( 323 { 324 err = s->ops->opendir(&s->ctx, &fidp->path, &fidp->fs); 325 if (err < 0) { 326 err = -errno; 327 } else { 328 err = 0; 329 } 330 }); 331 v9fs_path_unlock(s); 332 if (!err) { 333 total_open_fd++; 334 if (total_open_fd > open_fd_hw) { 335 v9fs_reclaim_fd(pdu); 336 } 337 } 338 return err; 339 } 340 341 int coroutine_fn v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs) 342 { 343 int err; 344 V9fsState *s = pdu->s; 345 346 if (v9fs_request_cancelled(pdu)) { 347 return -EINTR; 348 } 349 v9fs_co_run_in_worker( 350 { 351 err = s->ops->closedir(&s->ctx, fs); 352 if (err < 0) { 353 err = -errno; 354 } 355 }); 356 if (!err) { 357 total_open_fd--; 358 } 359 return err; 360 } 361