1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions to handle the cached directory entries 4 * 5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com> 6 */ 7 8 #include <linux/namei.h> 9 #include "cifsglob.h" 10 #include "cifsproto.h" 11 #include "cifs_debug.h" 12 #include "smb2proto.h" 13 #include "cached_dir.h" 14 15 static struct cached_fid *init_cached_dir(const char *path); 16 static void free_cached_dir(struct cached_fid *cfid); 17 static void smb2_close_cached_fid(struct kref *ref); 18 19 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, 20 const char *path, 21 bool lookup_only) 22 { 23 struct cached_fid *cfid; 24 25 spin_lock(&cfids->cfid_list_lock); 26 list_for_each_entry(cfid, &cfids->entries, entry) { 27 if (!strcmp(cfid->path, path)) { 28 /* 29 * If it doesn't have a lease it is either not yet 30 * fully cached or it may be in the process of 31 * being deleted due to a lease break. 32 */ 33 if (!cfid->has_lease) { 34 spin_unlock(&cfids->cfid_list_lock); 35 return NULL; 36 } 37 kref_get(&cfid->refcount); 38 spin_unlock(&cfids->cfid_list_lock); 39 return cfid; 40 } 41 } 42 if (lookup_only) { 43 spin_unlock(&cfids->cfid_list_lock); 44 return NULL; 45 } 46 if (cfids->num_entries >= MAX_CACHED_FIDS) { 47 spin_unlock(&cfids->cfid_list_lock); 48 return NULL; 49 } 50 cfid = init_cached_dir(path); 51 if (cfid == NULL) { 52 spin_unlock(&cfids->cfid_list_lock); 53 return NULL; 54 } 55 cfid->cfids = cfids; 56 cfids->num_entries++; 57 list_add(&cfid->entry, &cfids->entries); 58 cfid->on_list = true; 59 kref_get(&cfid->refcount); 60 spin_unlock(&cfids->cfid_list_lock); 61 return cfid; 62 } 63 64 static struct dentry * 65 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path) 66 { 67 struct dentry *dentry; 68 const char *s, *p; 69 char sep; 70 71 sep = CIFS_DIR_SEP(cifs_sb); 72 dentry = dget(cifs_sb->root); 73 s = path; 74 75 do { 76 struct inode *dir = d_inode(dentry); 77 struct dentry *child; 78 79 if (!S_ISDIR(dir->i_mode)) { 80 dput(dentry); 81 dentry = ERR_PTR(-ENOTDIR); 82 break; 83 } 84 85 /* skip separators */ 86 while (*s == sep) 87 s++; 88 if (!*s) 89 break; 90 p = s++; 91 /* next separator */ 92 while (*s && *s != sep) 93 s++; 94 95 child = lookup_positive_unlocked(p, dentry, s - p); 96 dput(dentry); 97 dentry = child; 98 } while (!IS_ERR(dentry)); 99 return dentry; 100 } 101 102 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb, 103 const char *path) 104 { 105 size_t len = 0; 106 107 if (!*path) 108 return path; 109 110 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 111 cifs_sb->prepath) { 112 len = strlen(cifs_sb->prepath) + 1; 113 if (unlikely(len > strlen(path))) 114 return ERR_PTR(-EINVAL); 115 } 116 return path + len; 117 } 118 119 /* 120 * Open the and cache a directory handle. 121 * If error then *cfid is not initialized. 122 */ 123 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, 124 const char *path, 125 struct cifs_sb_info *cifs_sb, 126 bool lookup_only, struct cached_fid **ret_cfid) 127 { 128 struct cifs_ses *ses; 129 struct TCP_Server_Info *server; 130 struct cifs_open_parms oparms; 131 struct smb2_create_rsp *o_rsp = NULL; 132 struct smb2_query_info_rsp *qi_rsp = NULL; 133 int resp_buftype[2]; 134 struct smb_rqst rqst[2]; 135 struct kvec rsp_iov[2]; 136 struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; 137 struct kvec qi_iov[1]; 138 int rc, flags = 0; 139 __le16 *utf16_path = NULL; 140 u8 oplock = SMB2_OPLOCK_LEVEL_II; 141 struct cifs_fid *pfid; 142 struct dentry *dentry = NULL; 143 struct cached_fid *cfid; 144 struct cached_fids *cfids; 145 const char *npath; 146 147 if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache || 148 is_smb1_server(tcon->ses->server)) 149 return -EOPNOTSUPP; 150 151 ses = tcon->ses; 152 server = ses->server; 153 cfids = tcon->cfids; 154 155 if (!server->ops->new_lease_key) 156 return -EIO; 157 158 if (cifs_sb->root == NULL) 159 return -ENOENT; 160 161 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 162 if (!utf16_path) 163 return -ENOMEM; 164 165 cfid = find_or_create_cached_dir(cfids, path, lookup_only); 166 if (cfid == NULL) { 167 kfree(utf16_path); 168 return -ENOENT; 169 } 170 /* 171 * At this point we either have a lease already and we can just 172 * return it. If not we are guaranteed to be the only thread accessing 173 * this cfid. 174 */ 175 if (cfid->has_lease) { 176 *ret_cfid = cfid; 177 kfree(utf16_path); 178 return 0; 179 } 180 181 /* 182 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up 183 * calling ->lookup() which already adds those through 184 * build_path_from_dentry(). Also, do it earlier as we might reconnect 185 * below when trying to send compounded request and then potentially 186 * having a different prefix path (e.g. after DFS failover). 187 */ 188 npath = path_no_prefix(cifs_sb, path); 189 if (IS_ERR(npath)) { 190 rc = PTR_ERR(npath); 191 kfree(utf16_path); 192 return rc; 193 } 194 195 /* 196 * We do not hold the lock for the open because in case 197 * SMB2_open needs to reconnect. 198 * This is safe because no other thread will be able to get a ref 199 * to the cfid until we have finished opening the file and (possibly) 200 * acquired a lease. 201 */ 202 if (smb3_encryption_required(tcon)) 203 flags |= CIFS_TRANSFORM_REQ; 204 205 pfid = &cfid->fid; 206 server->ops->new_lease_key(pfid); 207 208 memset(rqst, 0, sizeof(rqst)); 209 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; 210 memset(rsp_iov, 0, sizeof(rsp_iov)); 211 212 /* Open */ 213 memset(&open_iov, 0, sizeof(open_iov)); 214 rqst[0].rq_iov = open_iov; 215 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; 216 217 oparms = (struct cifs_open_parms) { 218 .tcon = tcon, 219 .path = path, 220 .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE), 221 .desired_access = FILE_READ_ATTRIBUTES, 222 .disposition = FILE_OPEN, 223 .fid = pfid, 224 }; 225 226 rc = SMB2_open_init(tcon, server, 227 &rqst[0], &oplock, &oparms, utf16_path); 228 if (rc) 229 goto oshr_free; 230 smb2_set_next_command(tcon, &rqst[0]); 231 232 memset(&qi_iov, 0, sizeof(qi_iov)); 233 rqst[1].rq_iov = qi_iov; 234 rqst[1].rq_nvec = 1; 235 236 rc = SMB2_query_info_init(tcon, server, 237 &rqst[1], COMPOUND_FID, 238 COMPOUND_FID, FILE_ALL_INFORMATION, 239 SMB2_O_INFO_FILE, 0, 240 sizeof(struct smb2_file_all_info) + 241 PATH_MAX * 2, 0, NULL); 242 if (rc) 243 goto oshr_free; 244 245 smb2_set_related(&rqst[1]); 246 247 rc = compound_send_recv(xid, ses, server, 248 flags, 2, rqst, 249 resp_buftype, rsp_iov); 250 if (rc) { 251 if (rc == -EREMCHG) { 252 tcon->need_reconnect = true; 253 pr_warn_once("server share %s deleted\n", 254 tcon->tree_name); 255 } 256 goto oshr_free; 257 } 258 cfid->tcon = tcon; 259 cfid->is_open = true; 260 261 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; 262 oparms.fid->persistent_fid = o_rsp->PersistentFileId; 263 oparms.fid->volatile_fid = o_rsp->VolatileFileId; 264 #ifdef CONFIG_CIFS_DEBUG2 265 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); 266 #endif /* CIFS_DEBUG2 */ 267 268 if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) 269 goto oshr_free; 270 271 smb2_parse_contexts(server, o_rsp, 272 &oparms.fid->epoch, 273 oparms.fid->lease_key, &oplock, 274 NULL, NULL); 275 if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) 276 goto oshr_free; 277 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 278 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) 279 goto oshr_free; 280 if (!smb2_validate_and_copy_iov( 281 le16_to_cpu(qi_rsp->OutputBufferOffset), 282 sizeof(struct smb2_file_all_info), 283 &rsp_iov[1], sizeof(struct smb2_file_all_info), 284 (char *)&cfid->file_all_info)) 285 cfid->file_all_info_is_valid = true; 286 287 if (!npath[0]) 288 dentry = dget(cifs_sb->root); 289 else { 290 dentry = path_to_dentry(cifs_sb, npath); 291 if (IS_ERR(dentry)) { 292 rc = -ENOENT; 293 goto oshr_free; 294 } 295 } 296 cfid->dentry = dentry; 297 cfid->time = jiffies; 298 cfid->has_lease = true; 299 300 oshr_free: 301 kfree(utf16_path); 302 SMB2_open_free(&rqst[0]); 303 SMB2_query_info_free(&rqst[1]); 304 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 305 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 306 spin_lock(&cfids->cfid_list_lock); 307 if (rc && !cfid->has_lease) { 308 if (cfid->on_list) { 309 list_del(&cfid->entry); 310 cfid->on_list = false; 311 cfids->num_entries--; 312 } 313 rc = -ENOENT; 314 } 315 spin_unlock(&cfids->cfid_list_lock); 316 if (!rc && !cfid->has_lease) { 317 /* 318 * We are guaranteed to have two references at this point. 319 * One for the caller and one for a potential lease. 320 * Release the Lease-ref so that the directory will be closed 321 * when the caller closes the cached handle. 322 */ 323 kref_put(&cfid->refcount, smb2_close_cached_fid); 324 } 325 if (rc) { 326 if (cfid->is_open) 327 SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, 328 cfid->fid.volatile_fid); 329 free_cached_dir(cfid); 330 cfid = NULL; 331 } 332 333 if (rc == 0) { 334 *ret_cfid = cfid; 335 atomic_inc(&tcon->num_remote_opens); 336 } 337 338 return rc; 339 } 340 341 int open_cached_dir_by_dentry(struct cifs_tcon *tcon, 342 struct dentry *dentry, 343 struct cached_fid **ret_cfid) 344 { 345 struct cached_fid *cfid; 346 struct cached_fids *cfids = tcon->cfids; 347 348 if (cfids == NULL) 349 return -ENOENT; 350 351 spin_lock(&cfids->cfid_list_lock); 352 list_for_each_entry(cfid, &cfids->entries, entry) { 353 if (dentry && cfid->dentry == dentry) { 354 cifs_dbg(FYI, "found a cached root file handle by dentry\n"); 355 kref_get(&cfid->refcount); 356 *ret_cfid = cfid; 357 spin_unlock(&cfids->cfid_list_lock); 358 return 0; 359 } 360 } 361 spin_unlock(&cfids->cfid_list_lock); 362 return -ENOENT; 363 } 364 365 static void 366 smb2_close_cached_fid(struct kref *ref) 367 { 368 struct cached_fid *cfid = container_of(ref, struct cached_fid, 369 refcount); 370 371 spin_lock(&cfid->cfids->cfid_list_lock); 372 if (cfid->on_list) { 373 list_del(&cfid->entry); 374 cfid->on_list = false; 375 cfid->cfids->num_entries--; 376 } 377 spin_unlock(&cfid->cfids->cfid_list_lock); 378 379 dput(cfid->dentry); 380 cfid->dentry = NULL; 381 382 if (cfid->is_open) { 383 SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, 384 cfid->fid.volatile_fid); 385 atomic_dec(&cfid->tcon->num_remote_opens); 386 } 387 388 free_cached_dir(cfid); 389 } 390 391 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, 392 const char *name, struct cifs_sb_info *cifs_sb) 393 { 394 struct cached_fid *cfid = NULL; 395 int rc; 396 397 rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid); 398 if (rc) { 399 cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name); 400 return; 401 } 402 spin_lock(&cfid->cfids->cfid_list_lock); 403 if (cfid->has_lease) { 404 cfid->has_lease = false; 405 kref_put(&cfid->refcount, smb2_close_cached_fid); 406 } 407 spin_unlock(&cfid->cfids->cfid_list_lock); 408 close_cached_dir(cfid); 409 } 410 411 412 void close_cached_dir(struct cached_fid *cfid) 413 { 414 kref_put(&cfid->refcount, smb2_close_cached_fid); 415 } 416 417 /* 418 * Called from cifs_kill_sb when we unmount a share 419 */ 420 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) 421 { 422 struct rb_root *root = &cifs_sb->tlink_tree; 423 struct rb_node *node; 424 struct cached_fid *cfid; 425 struct cifs_tcon *tcon; 426 struct tcon_link *tlink; 427 struct cached_fids *cfids; 428 429 for (node = rb_first(root); node; node = rb_next(node)) { 430 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 431 tcon = tlink_tcon(tlink); 432 if (IS_ERR(tcon)) 433 continue; 434 cfids = tcon->cfids; 435 if (cfids == NULL) 436 continue; 437 list_for_each_entry(cfid, &cfids->entries, entry) { 438 dput(cfid->dentry); 439 cfid->dentry = NULL; 440 } 441 } 442 } 443 444 /* 445 * Invalidate all cached dirs when a TCON has been reset 446 * due to a session loss. 447 */ 448 void invalidate_all_cached_dirs(struct cifs_tcon *tcon) 449 { 450 struct cached_fids *cfids = tcon->cfids; 451 struct cached_fid *cfid, *q; 452 LIST_HEAD(entry); 453 454 spin_lock(&cfids->cfid_list_lock); 455 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 456 list_move(&cfid->entry, &entry); 457 cfids->num_entries--; 458 cfid->is_open = false; 459 cfid->on_list = false; 460 /* To prevent race with smb2_cached_lease_break() */ 461 kref_get(&cfid->refcount); 462 } 463 spin_unlock(&cfids->cfid_list_lock); 464 465 list_for_each_entry_safe(cfid, q, &entry, entry) { 466 list_del(&cfid->entry); 467 cancel_work_sync(&cfid->lease_break); 468 if (cfid->has_lease) { 469 /* 470 * We lease was never cancelled from the server so we 471 * need to drop the reference. 472 */ 473 spin_lock(&cfids->cfid_list_lock); 474 cfid->has_lease = false; 475 spin_unlock(&cfids->cfid_list_lock); 476 kref_put(&cfid->refcount, smb2_close_cached_fid); 477 } 478 /* Drop the extra reference opened above*/ 479 kref_put(&cfid->refcount, smb2_close_cached_fid); 480 } 481 } 482 483 static void 484 smb2_cached_lease_break(struct work_struct *work) 485 { 486 struct cached_fid *cfid = container_of(work, 487 struct cached_fid, lease_break); 488 489 spin_lock(&cfid->cfids->cfid_list_lock); 490 cfid->has_lease = false; 491 spin_unlock(&cfid->cfids->cfid_list_lock); 492 kref_put(&cfid->refcount, smb2_close_cached_fid); 493 } 494 495 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) 496 { 497 struct cached_fids *cfids = tcon->cfids; 498 struct cached_fid *cfid; 499 500 if (cfids == NULL) 501 return false; 502 503 spin_lock(&cfids->cfid_list_lock); 504 list_for_each_entry(cfid, &cfids->entries, entry) { 505 if (cfid->has_lease && 506 !memcmp(lease_key, 507 cfid->fid.lease_key, 508 SMB2_LEASE_KEY_SIZE)) { 509 cfid->time = 0; 510 /* 511 * We found a lease remove it from the list 512 * so no threads can access it. 513 */ 514 list_del(&cfid->entry); 515 cfid->on_list = false; 516 cfids->num_entries--; 517 518 queue_work(cifsiod_wq, 519 &cfid->lease_break); 520 spin_unlock(&cfids->cfid_list_lock); 521 return true; 522 } 523 } 524 spin_unlock(&cfids->cfid_list_lock); 525 return false; 526 } 527 528 static struct cached_fid *init_cached_dir(const char *path) 529 { 530 struct cached_fid *cfid; 531 532 cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC); 533 if (!cfid) 534 return NULL; 535 cfid->path = kstrdup(path, GFP_ATOMIC); 536 if (!cfid->path) { 537 kfree(cfid); 538 return NULL; 539 } 540 541 INIT_WORK(&cfid->lease_break, smb2_cached_lease_break); 542 INIT_LIST_HEAD(&cfid->entry); 543 INIT_LIST_HEAD(&cfid->dirents.entries); 544 mutex_init(&cfid->dirents.de_mutex); 545 spin_lock_init(&cfid->fid_lock); 546 kref_init(&cfid->refcount); 547 return cfid; 548 } 549 550 static void free_cached_dir(struct cached_fid *cfid) 551 { 552 struct cached_dirent *dirent, *q; 553 554 dput(cfid->dentry); 555 cfid->dentry = NULL; 556 557 /* 558 * Delete all cached dirent names 559 */ 560 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { 561 list_del(&dirent->entry); 562 kfree(dirent->name); 563 kfree(dirent); 564 } 565 566 kfree(cfid->path); 567 cfid->path = NULL; 568 kfree(cfid); 569 } 570 571 static int 572 cifs_cfids_laundromat_thread(void *p) 573 { 574 struct cached_fids *cfids = p; 575 struct cached_fid *cfid, *q; 576 struct list_head entry; 577 578 while (!kthread_should_stop()) { 579 ssleep(1); 580 INIT_LIST_HEAD(&entry); 581 if (kthread_should_stop()) 582 return 0; 583 spin_lock(&cfids->cfid_list_lock); 584 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 585 if (time_after(jiffies, cfid->time + HZ * 30)) { 586 list_del(&cfid->entry); 587 list_add(&cfid->entry, &entry); 588 cfids->num_entries--; 589 } 590 } 591 spin_unlock(&cfids->cfid_list_lock); 592 593 list_for_each_entry_safe(cfid, q, &entry, entry) { 594 cfid->on_list = false; 595 list_del(&cfid->entry); 596 /* 597 * Cancel, and wait for the work to finish in 598 * case we are racing with it. 599 */ 600 cancel_work_sync(&cfid->lease_break); 601 if (cfid->has_lease) { 602 /* 603 * We lease has not yet been cancelled from 604 * the server so we need to drop the reference. 605 */ 606 spin_lock(&cfids->cfid_list_lock); 607 cfid->has_lease = false; 608 spin_unlock(&cfids->cfid_list_lock); 609 kref_put(&cfid->refcount, smb2_close_cached_fid); 610 } 611 } 612 } 613 614 return 0; 615 } 616 617 618 struct cached_fids *init_cached_dirs(void) 619 { 620 struct cached_fids *cfids; 621 622 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL); 623 if (!cfids) 624 return NULL; 625 spin_lock_init(&cfids->cfid_list_lock); 626 INIT_LIST_HEAD(&cfids->entries); 627 628 /* 629 * since we're in a cifs function already, we know that 630 * this will succeed. No need for try_module_get(). 631 */ 632 __module_get(THIS_MODULE); 633 cfids->laundromat = kthread_run(cifs_cfids_laundromat_thread, 634 cfids, "cifsd-cfid-laundromat"); 635 if (IS_ERR(cfids->laundromat)) { 636 cifs_dbg(VFS, "Failed to start cfids laundromat thread.\n"); 637 kfree(cfids); 638 module_put(THIS_MODULE); 639 return NULL; 640 } 641 return cfids; 642 } 643 644 /* 645 * Called from tconInfoFree when we are tearing down the tcon. 646 * There are no active users or open files/directories at this point. 647 */ 648 void free_cached_dirs(struct cached_fids *cfids) 649 { 650 struct cached_fid *cfid, *q; 651 LIST_HEAD(entry); 652 653 if (cfids->laundromat) { 654 kthread_stop(cfids->laundromat); 655 cfids->laundromat = NULL; 656 module_put(THIS_MODULE); 657 } 658 659 spin_lock(&cfids->cfid_list_lock); 660 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 661 cfid->on_list = false; 662 cfid->is_open = false; 663 list_move(&cfid->entry, &entry); 664 } 665 spin_unlock(&cfids->cfid_list_lock); 666 667 list_for_each_entry_safe(cfid, q, &entry, entry) { 668 list_del(&cfid->entry); 669 free_cached_dir(cfid); 670 } 671 672 kfree(cfids); 673 } 674