1 // SPDX-License-Identifier: GPL-2.0-or-later 2 #include <linux/fdtable.h> 3 #include <linux/anon_inodes.h> 4 #include <linux/uio.h> 5 #include "internal.h" 6 7 static int cachefiles_ondemand_fd_release(struct inode *inode, 8 struct file *file) 9 { 10 struct cachefiles_object *object = file->private_data; 11 struct cachefiles_cache *cache = object->volume->cache; 12 int object_id = object->ondemand_id; 13 struct cachefiles_req *req; 14 XA_STATE(xas, &cache->reqs, 0); 15 16 xa_lock(&cache->reqs); 17 object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; 18 19 /* 20 * Flush all pending READ requests since their completion depends on 21 * anon_fd. 22 */ 23 xas_for_each(&xas, req, ULONG_MAX) { 24 if (req->msg.opcode == CACHEFILES_OP_READ) { 25 req->error = -EIO; 26 complete(&req->done); 27 xas_store(&xas, NULL); 28 } 29 } 30 xa_unlock(&cache->reqs); 31 32 xa_erase(&cache->ondemand_ids, object_id); 33 trace_cachefiles_ondemand_fd_release(object, object_id); 34 cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd); 35 cachefiles_put_unbind_pincount(cache); 36 return 0; 37 } 38 39 static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb, 40 struct iov_iter *iter) 41 { 42 struct cachefiles_object *object = kiocb->ki_filp->private_data; 43 struct cachefiles_cache *cache = object->volume->cache; 44 struct file *file = object->file; 45 size_t len = iter->count; 46 loff_t pos = kiocb->ki_pos; 47 const struct cred *saved_cred; 48 int ret; 49 50 if (!file) 51 return -ENOBUFS; 52 53 cachefiles_begin_secure(cache, &saved_cred); 54 ret = __cachefiles_prepare_write(object, file, &pos, &len, true); 55 cachefiles_end_secure(cache, saved_cred); 56 if (ret < 0) 57 return ret; 58 59 trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len); 60 ret = __cachefiles_write(object, file, pos, iter, NULL, NULL); 61 if (!ret) 62 ret = len; 63 64 return ret; 65 } 66 67 static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos, 68 int whence) 69 { 70 struct cachefiles_object *object = filp->private_data; 71 struct file *file = object->file; 72 73 if (!file) 74 return -ENOBUFS; 75 76 return vfs_llseek(file, pos, whence); 77 } 78 79 static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl, 80 unsigned long arg) 81 { 82 struct cachefiles_object *object = filp->private_data; 83 struct cachefiles_cache *cache = object->volume->cache; 84 struct cachefiles_req *req; 85 unsigned long id; 86 87 if (ioctl != CACHEFILES_IOC_READ_COMPLETE) 88 return -EINVAL; 89 90 if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) 91 return -EOPNOTSUPP; 92 93 id = arg; 94 req = xa_erase(&cache->reqs, id); 95 if (!req) 96 return -EINVAL; 97 98 trace_cachefiles_ondemand_cread(object, id); 99 complete(&req->done); 100 return 0; 101 } 102 103 static const struct file_operations cachefiles_ondemand_fd_fops = { 104 .owner = THIS_MODULE, 105 .release = cachefiles_ondemand_fd_release, 106 .write_iter = cachefiles_ondemand_fd_write_iter, 107 .llseek = cachefiles_ondemand_fd_llseek, 108 .unlocked_ioctl = cachefiles_ondemand_fd_ioctl, 109 }; 110 111 /* 112 * OPEN request Completion (copen) 113 * - command: "copen <id>,<cache_size>" 114 * <cache_size> indicates the object size if >=0, error code if negative 115 */ 116 int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) 117 { 118 struct cachefiles_req *req; 119 struct fscache_cookie *cookie; 120 char *pid, *psize; 121 unsigned long id; 122 long size; 123 int ret; 124 125 if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) 126 return -EOPNOTSUPP; 127 128 if (!*args) { 129 pr_err("Empty id specified\n"); 130 return -EINVAL; 131 } 132 133 pid = args; 134 psize = strchr(args, ','); 135 if (!psize) { 136 pr_err("Cache size is not specified\n"); 137 return -EINVAL; 138 } 139 140 *psize = 0; 141 psize++; 142 143 ret = kstrtoul(pid, 0, &id); 144 if (ret) 145 return ret; 146 147 req = xa_erase(&cache->reqs, id); 148 if (!req) 149 return -EINVAL; 150 151 /* fail OPEN request if copen format is invalid */ 152 ret = kstrtol(psize, 0, &size); 153 if (ret) { 154 req->error = ret; 155 goto out; 156 } 157 158 /* fail OPEN request if daemon reports an error */ 159 if (size < 0) { 160 if (!IS_ERR_VALUE(size)) 161 size = -EINVAL; 162 req->error = size; 163 goto out; 164 } 165 166 cookie = req->object->cookie; 167 cookie->object_size = size; 168 if (size) 169 clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); 170 else 171 set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); 172 trace_cachefiles_ondemand_copen(req->object, id, size); 173 174 out: 175 complete(&req->done); 176 return ret; 177 } 178 179 static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) 180 { 181 struct cachefiles_object *object; 182 struct cachefiles_cache *cache; 183 struct cachefiles_open *load; 184 struct file *file; 185 u32 object_id; 186 int ret, fd; 187 188 object = cachefiles_grab_object(req->object, 189 cachefiles_obj_get_ondemand_fd); 190 cache = object->volume->cache; 191 192 ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL, 193 XA_LIMIT(1, INT_MAX), 194 &cache->ondemand_id_next, GFP_KERNEL); 195 if (ret < 0) 196 goto err; 197 198 fd = get_unused_fd_flags(O_WRONLY); 199 if (fd < 0) { 200 ret = fd; 201 goto err_free_id; 202 } 203 204 file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops, 205 object, O_WRONLY); 206 if (IS_ERR(file)) { 207 ret = PTR_ERR(file); 208 goto err_put_fd; 209 } 210 211 file->f_mode |= FMODE_PWRITE | FMODE_LSEEK; 212 fd_install(fd, file); 213 214 load = (void *)req->msg.data; 215 load->fd = fd; 216 req->msg.object_id = object_id; 217 object->ondemand_id = object_id; 218 219 cachefiles_get_unbind_pincount(cache); 220 trace_cachefiles_ondemand_open(object, &req->msg, load); 221 return 0; 222 223 err_put_fd: 224 put_unused_fd(fd); 225 err_free_id: 226 xa_erase(&cache->ondemand_ids, object_id); 227 err: 228 cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd); 229 return ret; 230 } 231 232 ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, 233 char __user *_buffer, size_t buflen) 234 { 235 struct cachefiles_req *req; 236 struct cachefiles_msg *msg; 237 unsigned long id = 0; 238 size_t n; 239 int ret = 0; 240 XA_STATE(xas, &cache->reqs, 0); 241 242 /* 243 * Search for a request that has not ever been processed, to prevent 244 * requests from being processed repeatedly. 245 */ 246 xa_lock(&cache->reqs); 247 req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW); 248 if (!req) { 249 xa_unlock(&cache->reqs); 250 return 0; 251 } 252 253 msg = &req->msg; 254 n = msg->len; 255 256 if (n > buflen) { 257 xa_unlock(&cache->reqs); 258 return -EMSGSIZE; 259 } 260 261 xas_clear_mark(&xas, CACHEFILES_REQ_NEW); 262 xa_unlock(&cache->reqs); 263 264 id = xas.xa_index; 265 msg->msg_id = id; 266 267 if (msg->opcode == CACHEFILES_OP_OPEN) { 268 ret = cachefiles_ondemand_get_fd(req); 269 if (ret) 270 goto error; 271 } 272 273 if (copy_to_user(_buffer, msg, n) != 0) { 274 ret = -EFAULT; 275 goto err_put_fd; 276 } 277 278 /* CLOSE request has no reply */ 279 if (msg->opcode == CACHEFILES_OP_CLOSE) { 280 xa_erase(&cache->reqs, id); 281 complete(&req->done); 282 } 283 284 return n; 285 286 err_put_fd: 287 if (msg->opcode == CACHEFILES_OP_OPEN) 288 close_fd(((struct cachefiles_open *)msg->data)->fd); 289 error: 290 xa_erase(&cache->reqs, id); 291 req->error = ret; 292 complete(&req->done); 293 return ret; 294 } 295 296 typedef int (*init_req_fn)(struct cachefiles_req *req, void *private); 297 298 static int cachefiles_ondemand_send_req(struct cachefiles_object *object, 299 enum cachefiles_opcode opcode, 300 size_t data_len, 301 init_req_fn init_req, 302 void *private) 303 { 304 struct cachefiles_cache *cache = object->volume->cache; 305 struct cachefiles_req *req; 306 XA_STATE(xas, &cache->reqs, 0); 307 int ret; 308 309 if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) 310 return 0; 311 312 if (test_bit(CACHEFILES_DEAD, &cache->flags)) 313 return -EIO; 314 315 req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL); 316 if (!req) 317 return -ENOMEM; 318 319 req->object = object; 320 init_completion(&req->done); 321 req->msg.opcode = opcode; 322 req->msg.len = sizeof(struct cachefiles_msg) + data_len; 323 324 ret = init_req(req, private); 325 if (ret) 326 goto out; 327 328 do { 329 /* 330 * Stop enqueuing the request when daemon is dying. The 331 * following two operations need to be atomic as a whole. 332 * 1) check cache state, and 333 * 2) enqueue request if cache is alive. 334 * Otherwise the request may be enqueued after xarray has been 335 * flushed, leaving the orphan request never being completed. 336 * 337 * CPU 1 CPU 2 338 * ===== ===== 339 * test CACHEFILES_DEAD bit 340 * set CACHEFILES_DEAD bit 341 * flush requests in the xarray 342 * enqueue the request 343 */ 344 xas_lock(&xas); 345 346 if (test_bit(CACHEFILES_DEAD, &cache->flags)) { 347 xas_unlock(&xas); 348 ret = -EIO; 349 goto out; 350 } 351 352 /* coupled with the barrier in cachefiles_flush_reqs() */ 353 smp_mb(); 354 355 if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) { 356 WARN_ON_ONCE(object->ondemand_id == 0); 357 xas_unlock(&xas); 358 ret = -EIO; 359 goto out; 360 } 361 362 xas.xa_index = 0; 363 xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK); 364 if (xas.xa_node == XAS_RESTART) 365 xas_set_err(&xas, -EBUSY); 366 xas_store(&xas, req); 367 xas_clear_mark(&xas, XA_FREE_MARK); 368 xas_set_mark(&xas, CACHEFILES_REQ_NEW); 369 xas_unlock(&xas); 370 } while (xas_nomem(&xas, GFP_KERNEL)); 371 372 ret = xas_error(&xas); 373 if (ret) 374 goto out; 375 376 wake_up_all(&cache->daemon_pollwq); 377 wait_for_completion(&req->done); 378 ret = req->error; 379 out: 380 kfree(req); 381 return ret; 382 } 383 384 static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req, 385 void *private) 386 { 387 struct cachefiles_object *object = req->object; 388 struct fscache_cookie *cookie = object->cookie; 389 struct fscache_volume *volume = object->volume->vcookie; 390 struct cachefiles_open *load = (void *)req->msg.data; 391 size_t volume_key_size, cookie_key_size; 392 void *volume_key, *cookie_key; 393 394 /* 395 * Volume key is a NUL-terminated string. key[0] stores strlen() of the 396 * string, followed by the content of the string (excluding '\0'). 397 */ 398 volume_key_size = volume->key[0] + 1; 399 volume_key = volume->key + 1; 400 401 /* Cookie key is binary data, which is netfs specific. */ 402 cookie_key_size = cookie->key_len; 403 cookie_key = fscache_get_key(cookie); 404 405 if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) { 406 pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n"); 407 return -EINVAL; 408 } 409 410 load->volume_key_size = volume_key_size; 411 load->cookie_key_size = cookie_key_size; 412 memcpy(load->data, volume_key, volume_key_size); 413 memcpy(load->data + volume_key_size, cookie_key, cookie_key_size); 414 415 return 0; 416 } 417 418 static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, 419 void *private) 420 { 421 struct cachefiles_object *object = req->object; 422 int object_id = object->ondemand_id; 423 424 /* 425 * It's possible that object id is still 0 if the cookie looking up 426 * phase failed before OPEN request has ever been sent. Also avoid 427 * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means 428 * anon_fd has already been closed. 429 */ 430 if (object_id <= 0) 431 return -ENOENT; 432 433 req->msg.object_id = object_id; 434 trace_cachefiles_ondemand_close(object, &req->msg); 435 return 0; 436 } 437 438 struct cachefiles_read_ctx { 439 loff_t off; 440 size_t len; 441 }; 442 443 static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, 444 void *private) 445 { 446 struct cachefiles_object *object = req->object; 447 struct cachefiles_read *load = (void *)req->msg.data; 448 struct cachefiles_read_ctx *read_ctx = private; 449 int object_id = object->ondemand_id; 450 451 /* Stop enqueuing requests when daemon has closed anon_fd. */ 452 if (object_id <= 0) { 453 WARN_ON_ONCE(object_id == 0); 454 pr_info_once("READ: anonymous fd closed prematurely.\n"); 455 return -EIO; 456 } 457 458 req->msg.object_id = object_id; 459 load->off = read_ctx->off; 460 load->len = read_ctx->len; 461 trace_cachefiles_ondemand_read(object, &req->msg, load); 462 return 0; 463 } 464 465 int cachefiles_ondemand_init_object(struct cachefiles_object *object) 466 { 467 struct fscache_cookie *cookie = object->cookie; 468 struct fscache_volume *volume = object->volume->vcookie; 469 size_t volume_key_size, cookie_key_size, data_len; 470 471 /* 472 * CacheFiles will firstly check the cache file under the root cache 473 * directory. If the coherency check failed, it will fallback to 474 * creating a new tmpfile as the cache file. Reuse the previously 475 * allocated object ID if any. 476 */ 477 if (object->ondemand_id > 0) 478 return 0; 479 480 volume_key_size = volume->key[0] + 1; 481 cookie_key_size = cookie->key_len; 482 data_len = sizeof(struct cachefiles_open) + 483 volume_key_size + cookie_key_size; 484 485 return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN, 486 data_len, cachefiles_ondemand_init_open_req, NULL); 487 } 488 489 void cachefiles_ondemand_clean_object(struct cachefiles_object *object) 490 { 491 cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0, 492 cachefiles_ondemand_init_close_req, NULL); 493 } 494 495 int cachefiles_ondemand_read(struct cachefiles_object *object, 496 loff_t pos, size_t len) 497 { 498 struct cachefiles_read_ctx read_ctx = {pos, len}; 499 500 return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ, 501 sizeof(struct cachefiles_read), 502 cachefiles_ondemand_init_read_req, &read_ctx); 503 } 504