1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions to handle the cached directory entries 4 * 5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com> 6 */ 7 8 #include <linux/namei.h> 9 #include "cifsglob.h" 10 #include "cifsproto.h" 11 #include "cifs_debug.h" 12 #include "smb2proto.h" 13 #include "cached_dir.h" 14 15 static struct cached_fid *init_cached_dir(const char *path); 16 static void free_cached_dir(struct cached_fid *cfid); 17 static void smb2_close_cached_fid(struct kref *ref); 18 static void cfids_laundromat_worker(struct work_struct *work); 19 20 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, 21 const char *path, 22 bool lookup_only, 23 __u32 max_cached_dirs) 24 { 25 struct cached_fid *cfid; 26 27 spin_lock(&cfids->cfid_list_lock); 28 list_for_each_entry(cfid, &cfids->entries, entry) { 29 if (!strcmp(cfid->path, path)) { 30 /* 31 * If it doesn't have a lease it is either not yet 32 * fully cached or it may be in the process of 33 * being deleted due to a lease break. 34 */ 35 if (!cfid->time || !cfid->has_lease) { 36 spin_unlock(&cfids->cfid_list_lock); 37 return NULL; 38 } 39 kref_get(&cfid->refcount); 40 spin_unlock(&cfids->cfid_list_lock); 41 return cfid; 42 } 43 } 44 if (lookup_only) { 45 spin_unlock(&cfids->cfid_list_lock); 46 return NULL; 47 } 48 if (cfids->num_entries >= max_cached_dirs) { 49 spin_unlock(&cfids->cfid_list_lock); 50 return NULL; 51 } 52 cfid = init_cached_dir(path); 53 if (cfid == NULL) { 54 spin_unlock(&cfids->cfid_list_lock); 55 return NULL; 56 } 57 cfid->cfids = cfids; 58 cfids->num_entries++; 59 list_add(&cfid->entry, &cfids->entries); 60 cfid->on_list = true; 61 kref_get(&cfid->refcount); 62 spin_unlock(&cfids->cfid_list_lock); 63 return cfid; 64 } 65 66 static struct dentry * 67 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path) 68 { 69 struct dentry *dentry; 70 const char *s, *p; 71 char sep; 72 73 sep = CIFS_DIR_SEP(cifs_sb); 74 dentry = dget(cifs_sb->root); 75 s = path; 76 77 do { 78 struct inode *dir = d_inode(dentry); 79 struct dentry *child; 80 81 if (!S_ISDIR(dir->i_mode)) { 82 dput(dentry); 83 dentry = ERR_PTR(-ENOTDIR); 84 break; 85 } 86 87 /* skip separators */ 88 while (*s == sep) 89 s++; 90 if (!*s) 91 break; 92 p = s++; 93 /* next separator */ 94 while (*s && *s != sep) 95 s++; 96 97 child = lookup_positive_unlocked(p, dentry, s - p); 98 dput(dentry); 99 dentry = child; 100 } while (!IS_ERR(dentry)); 101 return dentry; 102 } 103 104 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb, 105 const char *path) 106 { 107 size_t len = 0; 108 109 if (!*path) 110 return path; 111 112 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 113 cifs_sb->prepath) { 114 len = strlen(cifs_sb->prepath) + 1; 115 if (unlikely(len > strlen(path))) 116 return ERR_PTR(-EINVAL); 117 } 118 return path + len; 119 } 120 121 /* 122 * Open the and cache a directory handle. 123 * If error then *cfid is not initialized. 124 */ 125 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, 126 const char *path, 127 struct cifs_sb_info *cifs_sb, 128 bool lookup_only, struct cached_fid **ret_cfid) 129 { 130 struct cifs_ses *ses; 131 struct TCP_Server_Info *server; 132 struct cifs_open_parms oparms; 133 struct smb2_create_rsp *o_rsp = NULL; 134 struct smb2_query_info_rsp *qi_rsp = NULL; 135 int resp_buftype[2]; 136 struct smb_rqst rqst[2]; 137 struct kvec rsp_iov[2]; 138 struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; 139 struct kvec qi_iov[1]; 140 int rc, flags = 0; 141 __le16 *utf16_path = NULL; 142 u8 oplock = SMB2_OPLOCK_LEVEL_II; 143 struct cifs_fid *pfid; 144 struct dentry *dentry = NULL; 145 struct cached_fid *cfid; 146 struct cached_fids *cfids; 147 const char *npath; 148 149 if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache || 150 is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0)) 151 return -EOPNOTSUPP; 152 153 ses = tcon->ses; 154 server = ses->server; 155 cfids = tcon->cfids; 156 157 if (!server->ops->new_lease_key) 158 return -EIO; 159 160 if (cifs_sb->root == NULL) 161 return -ENOENT; 162 163 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 164 if (!utf16_path) 165 return -ENOMEM; 166 167 cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs); 168 if (cfid == NULL) { 169 kfree(utf16_path); 170 return -ENOENT; 171 } 172 /* 173 * Return cached fid if it has a lease. Otherwise, it is either a new 174 * entry or laundromat worker removed it from @cfids->entries. Caller 175 * will put last reference if the latter. 176 */ 177 spin_lock(&cfids->cfid_list_lock); 178 if (cfid->has_lease) { 179 spin_unlock(&cfids->cfid_list_lock); 180 *ret_cfid = cfid; 181 kfree(utf16_path); 182 return 0; 183 } 184 spin_unlock(&cfids->cfid_list_lock); 185 186 /* 187 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up 188 * calling ->lookup() which already adds those through 189 * build_path_from_dentry(). Also, do it earlier as we might reconnect 190 * below when trying to send compounded request and then potentially 191 * having a different prefix path (e.g. after DFS failover). 192 */ 193 npath = path_no_prefix(cifs_sb, path); 194 if (IS_ERR(npath)) { 195 rc = PTR_ERR(npath); 196 goto out; 197 } 198 199 if (!npath[0]) { 200 dentry = dget(cifs_sb->root); 201 } else { 202 dentry = path_to_dentry(cifs_sb, npath); 203 if (IS_ERR(dentry)) { 204 rc = -ENOENT; 205 goto out; 206 } 207 } 208 cfid->dentry = dentry; 209 210 /* 211 * We do not hold the lock for the open because in case 212 * SMB2_open needs to reconnect. 213 * This is safe because no other thread will be able to get a ref 214 * to the cfid until we have finished opening the file and (possibly) 215 * acquired a lease. 216 */ 217 if (smb3_encryption_required(tcon)) 218 flags |= CIFS_TRANSFORM_REQ; 219 220 pfid = &cfid->fid; 221 server->ops->new_lease_key(pfid); 222 223 memset(rqst, 0, sizeof(rqst)); 224 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; 225 memset(rsp_iov, 0, sizeof(rsp_iov)); 226 227 /* Open */ 228 memset(&open_iov, 0, sizeof(open_iov)); 229 rqst[0].rq_iov = open_iov; 230 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; 231 232 oparms = (struct cifs_open_parms) { 233 .tcon = tcon, 234 .path = path, 235 .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE), 236 .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES, 237 .disposition = FILE_OPEN, 238 .fid = pfid, 239 }; 240 241 rc = SMB2_open_init(tcon, server, 242 &rqst[0], &oplock, &oparms, utf16_path); 243 if (rc) 244 goto oshr_free; 245 smb2_set_next_command(tcon, &rqst[0]); 246 247 memset(&qi_iov, 0, sizeof(qi_iov)); 248 rqst[1].rq_iov = qi_iov; 249 rqst[1].rq_nvec = 1; 250 251 rc = SMB2_query_info_init(tcon, server, 252 &rqst[1], COMPOUND_FID, 253 COMPOUND_FID, FILE_ALL_INFORMATION, 254 SMB2_O_INFO_FILE, 0, 255 sizeof(struct smb2_file_all_info) + 256 PATH_MAX * 2, 0, NULL); 257 if (rc) 258 goto oshr_free; 259 260 smb2_set_related(&rqst[1]); 261 262 /* 263 * Set @cfid->has_lease to true before sending out compounded request so 264 * its lease reference can be put in cached_dir_lease_break() due to a 265 * potential lease break right after the request is sent or while @cfid 266 * is still being cached. Concurrent processes won't be to use it yet 267 * due to @cfid->time being zero. 268 */ 269 cfid->has_lease = true; 270 271 rc = compound_send_recv(xid, ses, server, 272 flags, 2, rqst, 273 resp_buftype, rsp_iov); 274 if (rc) { 275 if (rc == -EREMCHG) { 276 tcon->need_reconnect = true; 277 pr_warn_once("server share %s deleted\n", 278 tcon->tree_name); 279 } 280 goto oshr_free; 281 } 282 cfid->tcon = tcon; 283 cfid->is_open = true; 284 285 spin_lock(&cfids->cfid_list_lock); 286 287 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; 288 oparms.fid->persistent_fid = o_rsp->PersistentFileId; 289 oparms.fid->volatile_fid = o_rsp->VolatileFileId; 290 #ifdef CONFIG_CIFS_DEBUG2 291 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); 292 #endif /* CIFS_DEBUG2 */ 293 294 rc = -EINVAL; 295 if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) { 296 spin_unlock(&cfids->cfid_list_lock); 297 goto oshr_free; 298 } 299 300 smb2_parse_contexts(server, o_rsp, 301 &oparms.fid->epoch, 302 oparms.fid->lease_key, &oplock, 303 NULL, NULL); 304 if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) { 305 spin_unlock(&cfids->cfid_list_lock); 306 goto oshr_free; 307 } 308 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 309 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) { 310 spin_unlock(&cfids->cfid_list_lock); 311 goto oshr_free; 312 } 313 if (!smb2_validate_and_copy_iov( 314 le16_to_cpu(qi_rsp->OutputBufferOffset), 315 sizeof(struct smb2_file_all_info), 316 &rsp_iov[1], sizeof(struct smb2_file_all_info), 317 (char *)&cfid->file_all_info)) 318 cfid->file_all_info_is_valid = true; 319 320 cfid->time = jiffies; 321 spin_unlock(&cfids->cfid_list_lock); 322 /* At this point the directory handle is fully cached */ 323 rc = 0; 324 325 oshr_free: 326 SMB2_open_free(&rqst[0]); 327 SMB2_query_info_free(&rqst[1]); 328 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 329 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 330 if (rc) { 331 spin_lock(&cfids->cfid_list_lock); 332 if (cfid->on_list) { 333 list_del(&cfid->entry); 334 cfid->on_list = false; 335 cfids->num_entries--; 336 } 337 if (cfid->has_lease) { 338 /* 339 * We are guaranteed to have two references at this 340 * point. One for the caller and one for a potential 341 * lease. Release the Lease-ref so that the directory 342 * will be closed when the caller closes the cached 343 * handle. 344 */ 345 cfid->has_lease = false; 346 spin_unlock(&cfids->cfid_list_lock); 347 kref_put(&cfid->refcount, smb2_close_cached_fid); 348 goto out; 349 } 350 spin_unlock(&cfids->cfid_list_lock); 351 } 352 out: 353 if (rc) { 354 if (cfid->is_open) 355 SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, 356 cfid->fid.volatile_fid); 357 free_cached_dir(cfid); 358 } else { 359 *ret_cfid = cfid; 360 atomic_inc(&tcon->num_remote_opens); 361 } 362 kfree(utf16_path); 363 return rc; 364 } 365 366 int open_cached_dir_by_dentry(struct cifs_tcon *tcon, 367 struct dentry *dentry, 368 struct cached_fid **ret_cfid) 369 { 370 struct cached_fid *cfid; 371 struct cached_fids *cfids = tcon->cfids; 372 373 if (cfids == NULL) 374 return -ENOENT; 375 376 spin_lock(&cfids->cfid_list_lock); 377 list_for_each_entry(cfid, &cfids->entries, entry) { 378 if (dentry && cfid->dentry == dentry) { 379 cifs_dbg(FYI, "found a cached root file handle by dentry\n"); 380 kref_get(&cfid->refcount); 381 *ret_cfid = cfid; 382 spin_unlock(&cfids->cfid_list_lock); 383 return 0; 384 } 385 } 386 spin_unlock(&cfids->cfid_list_lock); 387 return -ENOENT; 388 } 389 390 static void 391 smb2_close_cached_fid(struct kref *ref) 392 { 393 struct cached_fid *cfid = container_of(ref, struct cached_fid, 394 refcount); 395 396 spin_lock(&cfid->cfids->cfid_list_lock); 397 if (cfid->on_list) { 398 list_del(&cfid->entry); 399 cfid->on_list = false; 400 cfid->cfids->num_entries--; 401 } 402 spin_unlock(&cfid->cfids->cfid_list_lock); 403 404 dput(cfid->dentry); 405 cfid->dentry = NULL; 406 407 if (cfid->is_open) { 408 SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, 409 cfid->fid.volatile_fid); 410 atomic_dec(&cfid->tcon->num_remote_opens); 411 } 412 413 free_cached_dir(cfid); 414 } 415 416 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, 417 const char *name, struct cifs_sb_info *cifs_sb) 418 { 419 struct cached_fid *cfid = NULL; 420 int rc; 421 422 rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid); 423 if (rc) { 424 cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name); 425 return; 426 } 427 spin_lock(&cfid->cfids->cfid_list_lock); 428 if (cfid->has_lease) { 429 cfid->has_lease = false; 430 kref_put(&cfid->refcount, smb2_close_cached_fid); 431 } 432 spin_unlock(&cfid->cfids->cfid_list_lock); 433 close_cached_dir(cfid); 434 } 435 436 437 void close_cached_dir(struct cached_fid *cfid) 438 { 439 kref_put(&cfid->refcount, smb2_close_cached_fid); 440 } 441 442 /* 443 * Called from cifs_kill_sb when we unmount a share 444 */ 445 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) 446 { 447 struct rb_root *root = &cifs_sb->tlink_tree; 448 struct rb_node *node; 449 struct cached_fid *cfid; 450 struct cifs_tcon *tcon; 451 struct tcon_link *tlink; 452 struct cached_fids *cfids; 453 454 for (node = rb_first(root); node; node = rb_next(node)) { 455 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 456 tcon = tlink_tcon(tlink); 457 if (IS_ERR(tcon)) 458 continue; 459 cfids = tcon->cfids; 460 if (cfids == NULL) 461 continue; 462 list_for_each_entry(cfid, &cfids->entries, entry) { 463 dput(cfid->dentry); 464 cfid->dentry = NULL; 465 } 466 } 467 } 468 469 /* 470 * Invalidate all cached dirs when a TCON has been reset 471 * due to a session loss. 472 */ 473 void invalidate_all_cached_dirs(struct cifs_tcon *tcon) 474 { 475 struct cached_fids *cfids = tcon->cfids; 476 struct cached_fid *cfid, *q; 477 LIST_HEAD(entry); 478 479 if (cfids == NULL) 480 return; 481 482 spin_lock(&cfids->cfid_list_lock); 483 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 484 list_move(&cfid->entry, &entry); 485 cfids->num_entries--; 486 cfid->is_open = false; 487 cfid->on_list = false; 488 /* To prevent race with smb2_cached_lease_break() */ 489 kref_get(&cfid->refcount); 490 } 491 spin_unlock(&cfids->cfid_list_lock); 492 493 list_for_each_entry_safe(cfid, q, &entry, entry) { 494 list_del(&cfid->entry); 495 cancel_work_sync(&cfid->lease_break); 496 if (cfid->has_lease) { 497 /* 498 * We lease was never cancelled from the server so we 499 * need to drop the reference. 500 */ 501 spin_lock(&cfids->cfid_list_lock); 502 cfid->has_lease = false; 503 spin_unlock(&cfids->cfid_list_lock); 504 kref_put(&cfid->refcount, smb2_close_cached_fid); 505 } 506 /* Drop the extra reference opened above*/ 507 kref_put(&cfid->refcount, smb2_close_cached_fid); 508 } 509 } 510 511 static void 512 smb2_cached_lease_break(struct work_struct *work) 513 { 514 struct cached_fid *cfid = container_of(work, 515 struct cached_fid, lease_break); 516 517 spin_lock(&cfid->cfids->cfid_list_lock); 518 cfid->has_lease = false; 519 spin_unlock(&cfid->cfids->cfid_list_lock); 520 kref_put(&cfid->refcount, smb2_close_cached_fid); 521 } 522 523 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) 524 { 525 struct cached_fids *cfids = tcon->cfids; 526 struct cached_fid *cfid; 527 528 if (cfids == NULL) 529 return false; 530 531 spin_lock(&cfids->cfid_list_lock); 532 list_for_each_entry(cfid, &cfids->entries, entry) { 533 if (cfid->has_lease && 534 !memcmp(lease_key, 535 cfid->fid.lease_key, 536 SMB2_LEASE_KEY_SIZE)) { 537 cfid->time = 0; 538 /* 539 * We found a lease remove it from the list 540 * so no threads can access it. 541 */ 542 list_del(&cfid->entry); 543 cfid->on_list = false; 544 cfids->num_entries--; 545 546 queue_work(cifsiod_wq, 547 &cfid->lease_break); 548 spin_unlock(&cfids->cfid_list_lock); 549 return true; 550 } 551 } 552 spin_unlock(&cfids->cfid_list_lock); 553 return false; 554 } 555 556 static struct cached_fid *init_cached_dir(const char *path) 557 { 558 struct cached_fid *cfid; 559 560 cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC); 561 if (!cfid) 562 return NULL; 563 cfid->path = kstrdup(path, GFP_ATOMIC); 564 if (!cfid->path) { 565 kfree(cfid); 566 return NULL; 567 } 568 569 INIT_WORK(&cfid->lease_break, smb2_cached_lease_break); 570 INIT_LIST_HEAD(&cfid->entry); 571 INIT_LIST_HEAD(&cfid->dirents.entries); 572 mutex_init(&cfid->dirents.de_mutex); 573 spin_lock_init(&cfid->fid_lock); 574 kref_init(&cfid->refcount); 575 return cfid; 576 } 577 578 static void free_cached_dir(struct cached_fid *cfid) 579 { 580 struct cached_dirent *dirent, *q; 581 582 dput(cfid->dentry); 583 cfid->dentry = NULL; 584 585 /* 586 * Delete all cached dirent names 587 */ 588 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { 589 list_del(&dirent->entry); 590 kfree(dirent->name); 591 kfree(dirent); 592 } 593 594 kfree(cfid->path); 595 cfid->path = NULL; 596 kfree(cfid); 597 } 598 599 static void cfids_laundromat_worker(struct work_struct *work) 600 { 601 struct cached_fids *cfids; 602 struct cached_fid *cfid, *q; 603 LIST_HEAD(entry); 604 605 cfids = container_of(work, struct cached_fids, laundromat_work.work); 606 607 spin_lock(&cfids->cfid_list_lock); 608 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 609 if (cfid->time && 610 time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) { 611 cfid->on_list = false; 612 list_move(&cfid->entry, &entry); 613 cfids->num_entries--; 614 /* To prevent race with smb2_cached_lease_break() */ 615 kref_get(&cfid->refcount); 616 } 617 } 618 spin_unlock(&cfids->cfid_list_lock); 619 620 list_for_each_entry_safe(cfid, q, &entry, entry) { 621 list_del(&cfid->entry); 622 /* 623 * Cancel and wait for the work to finish in case we are racing 624 * with it. 625 */ 626 cancel_work_sync(&cfid->lease_break); 627 if (cfid->has_lease) { 628 /* 629 * Our lease has not yet been cancelled from the server 630 * so we need to drop the reference. 631 */ 632 spin_lock(&cfids->cfid_list_lock); 633 cfid->has_lease = false; 634 spin_unlock(&cfids->cfid_list_lock); 635 kref_put(&cfid->refcount, smb2_close_cached_fid); 636 } 637 /* Drop the extra reference opened above */ 638 kref_put(&cfid->refcount, smb2_close_cached_fid); 639 } 640 queue_delayed_work(cifsiod_wq, &cfids->laundromat_work, 641 dir_cache_timeout * HZ); 642 } 643 644 struct cached_fids *init_cached_dirs(void) 645 { 646 struct cached_fids *cfids; 647 648 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL); 649 if (!cfids) 650 return NULL; 651 spin_lock_init(&cfids->cfid_list_lock); 652 INIT_LIST_HEAD(&cfids->entries); 653 654 INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker); 655 queue_delayed_work(cifsiod_wq, &cfids->laundromat_work, 656 dir_cache_timeout * HZ); 657 658 return cfids; 659 } 660 661 /* 662 * Called from tconInfoFree when we are tearing down the tcon. 663 * There are no active users or open files/directories at this point. 664 */ 665 void free_cached_dirs(struct cached_fids *cfids) 666 { 667 struct cached_fid *cfid, *q; 668 LIST_HEAD(entry); 669 670 if (cfids == NULL) 671 return; 672 673 cancel_delayed_work_sync(&cfids->laundromat_work); 674 675 spin_lock(&cfids->cfid_list_lock); 676 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 677 cfid->on_list = false; 678 cfid->is_open = false; 679 list_move(&cfid->entry, &entry); 680 } 681 spin_unlock(&cfids->cfid_list_lock); 682 683 list_for_each_entry_safe(cfid, q, &entry, entry) { 684 list_del(&cfid->entry); 685 free_cached_dir(cfid); 686 } 687 688 kfree(cfids); 689 } 690