1 /* 2 * 9p backend 3 * 4 * Copyright IBM, Corp. 2011 5 * 6 * Authors: 7 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "fsdev/qemu-fsdev.h" 16 #include "qemu/thread.h" 17 #include "qemu/coroutine.h" 18 #include "coth.h" 19 20 int coroutine_fn v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t st_mode, 21 V9fsStatDotl *v9stat) 22 { 23 int err = 0; 24 V9fsState *s = pdu->s; 25 26 if (v9fs_request_cancelled(pdu)) { 27 return -EINTR; 28 } 29 if (s->ctx.exops.get_st_gen) { 30 v9fs_path_read_lock(s); 31 v9fs_co_run_in_worker( 32 { 33 err = s->ctx.exops.get_st_gen(&s->ctx, path, st_mode, 34 &v9stat->st_gen); 35 if (err < 0) { 36 err = -errno; 37 } 38 }); 39 v9fs_path_unlock(s); 40 } 41 return err; 42 } 43 44 int coroutine_fn v9fs_co_lstat(V9fsPDU *pdu, V9fsPath *path, struct stat *stbuf) 45 { 46 int err; 47 V9fsState *s = pdu->s; 48 49 if (v9fs_request_cancelled(pdu)) { 50 return -EINTR; 51 } 52 v9fs_path_read_lock(s); 53 v9fs_co_run_in_worker( 54 { 55 err = s->ops->lstat(&s->ctx, path, stbuf); 56 if (err < 0) { 57 err = -errno; 58 } 59 }); 60 v9fs_path_unlock(s); 61 return err; 62 } 63 64 int coroutine_fn v9fs_co_fstat(V9fsPDU *pdu, V9fsFidState *fidp, 65 struct stat *stbuf) 66 { 67 int err; 68 V9fsState *s = pdu->s; 69 70 if (v9fs_request_cancelled(pdu)) { 71 return -EINTR; 72 } 73 v9fs_co_run_in_worker( 74 { 75 err = s->ops->fstat(&s->ctx, fidp->fid_type, &fidp->fs, stbuf); 76 if (err < 0) { 77 err = -errno; 78 } 79 }); 80 /* 81 * Some FS driver (local:mapped-file) can't support fetching attributes 82 * using file descriptor. Use Path name in that case. 83 */ 84 if (err == -EOPNOTSUPP) { 85 err = v9fs_co_lstat(pdu, &fidp->path, stbuf); 86 if (err == -ENOENT) { 87 /* 88 * fstat on an unlinked file. Work with partial results 89 * returned from s->ops->fstat 90 */ 91 err = 0; 92 } 93 } 94 return err; 95 } 96 97 int coroutine_fn v9fs_co_open(V9fsPDU *pdu, V9fsFidState *fidp, int flags) 98 { 99 int err; 100 V9fsState *s = pdu->s; 101 102 if (v9fs_request_cancelled(pdu)) { 103 return -EINTR; 104 } 105 v9fs_path_read_lock(s); 106 v9fs_co_run_in_worker( 107 { 108 err = s->ops->open(&s->ctx, &fidp->path, flags, &fidp->fs); 109 if (err == -1) { 110 err = -errno; 111 } else { 112 err = 0; 113 } 114 }); 115 v9fs_path_unlock(s); 116 if (!err) { 117 total_open_fd++; 118 if (total_open_fd > open_fd_hw) { 119 v9fs_reclaim_fd(pdu); 120 } 121 } 122 return err; 123 } 124 125 int coroutine_fn v9fs_co_open2(V9fsPDU *pdu, V9fsFidState *fidp, 126 V9fsString *name, gid_t gid, int flags, int mode, 127 struct stat *stbuf) 128 { 129 int err; 130 FsCred cred; 131 V9fsPath path; 132 V9fsState *s = pdu->s; 133 134 if (v9fs_request_cancelled(pdu)) { 135 return -EINTR; 136 } 137 cred_init(&cred); 138 cred.fc_mode = mode & 07777; 139 cred.fc_uid = fidp->uid; 140 cred.fc_gid = gid; 141 /* 142 * Hold the directory fid lock so that directory path name 143 * don't change. Take the write lock to be sure this fid 144 * cannot be used by another operation. 145 */ 146 v9fs_path_write_lock(s); 147 v9fs_co_run_in_worker( 148 { 149 err = s->ops->open2(&s->ctx, &fidp->path, 150 name->data, flags, &cred, &fidp->fs); 151 if (err < 0) { 152 err = -errno; 153 } else { 154 v9fs_path_init(&path); 155 err = v9fs_name_to_path(s, &fidp->path, name->data, &path); 156 if (!err) { 157 err = s->ops->lstat(&s->ctx, &path, stbuf); 158 if (err < 0) { 159 err = -errno; 160 s->ops->close(&s->ctx, &fidp->fs); 161 } else { 162 v9fs_path_copy(&fidp->path, &path); 163 } 164 } else { 165 s->ops->close(&s->ctx, &fidp->fs); 166 } 167 v9fs_path_free(&path); 168 } 169 }); 170 v9fs_path_unlock(s); 171 if (!err) { 172 total_open_fd++; 173 if (total_open_fd > open_fd_hw) { 174 v9fs_reclaim_fd(pdu); 175 } 176 } 177 return err; 178 } 179 180 int coroutine_fn v9fs_co_close(V9fsPDU *pdu, V9fsFidOpenState *fs) 181 { 182 int err; 183 V9fsState *s = pdu->s; 184 185 if (v9fs_request_cancelled(pdu)) { 186 return -EINTR; 187 } 188 v9fs_co_run_in_worker( 189 { 190 err = s->ops->close(&s->ctx, fs); 191 if (err < 0) { 192 err = -errno; 193 } 194 }); 195 if (!err) { 196 total_open_fd--; 197 } 198 return err; 199 } 200 201 int coroutine_fn v9fs_co_fsync(V9fsPDU *pdu, V9fsFidState *fidp, int datasync) 202 { 203 int err; 204 V9fsState *s = pdu->s; 205 206 if (v9fs_request_cancelled(pdu)) { 207 return -EINTR; 208 } 209 v9fs_co_run_in_worker( 210 { 211 err = s->ops->fsync(&s->ctx, fidp->fid_type, &fidp->fs, datasync); 212 if (err < 0) { 213 err = -errno; 214 } 215 }); 216 return err; 217 } 218 219 int coroutine_fn v9fs_co_link(V9fsPDU *pdu, V9fsFidState *oldfid, 220 V9fsFidState *newdirfid, V9fsString *name) 221 { 222 int err; 223 V9fsState *s = pdu->s; 224 225 if (v9fs_request_cancelled(pdu)) { 226 return -EINTR; 227 } 228 v9fs_path_read_lock(s); 229 v9fs_co_run_in_worker( 230 { 231 err = s->ops->link(&s->ctx, &oldfid->path, 232 &newdirfid->path, name->data); 233 if (err < 0) { 234 err = -errno; 235 } 236 }); 237 v9fs_path_unlock(s); 238 return err; 239 } 240 241 int coroutine_fn v9fs_co_pwritev(V9fsPDU *pdu, V9fsFidState *fidp, 242 struct iovec *iov, int iovcnt, int64_t offset) 243 { 244 int err; 245 V9fsState *s = pdu->s; 246 247 if (v9fs_request_cancelled(pdu)) { 248 return -EINTR; 249 } 250 fsdev_co_throttle_request(s->ctx.fst, true, iov, iovcnt); 251 v9fs_co_run_in_worker( 252 { 253 err = s->ops->pwritev(&s->ctx, &fidp->fs, iov, iovcnt, offset); 254 if (err < 0) { 255 err = -errno; 256 } 257 }); 258 return err; 259 } 260 261 int coroutine_fn v9fs_co_preadv(V9fsPDU *pdu, V9fsFidState *fidp, 262 struct iovec *iov, int iovcnt, int64_t offset) 263 { 264 int err; 265 V9fsState *s = pdu->s; 266 267 if (v9fs_request_cancelled(pdu)) { 268 return -EINTR; 269 } 270 fsdev_co_throttle_request(s->ctx.fst, false, iov, iovcnt); 271 v9fs_co_run_in_worker( 272 { 273 err = s->ops->preadv(&s->ctx, &fidp->fs, iov, iovcnt, offset); 274 if (err < 0) { 275 err = -errno; 276 } 277 }); 278 return err; 279 } 280