1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _FS_CEPH_MDS_CLIENT_H 3 #define _FS_CEPH_MDS_CLIENT_H 4 5 #include <linux/completion.h> 6 #include <linux/kref.h> 7 #include <linux/list.h> 8 #include <linux/mutex.h> 9 #include <linux/rbtree.h> 10 #include <linux/spinlock.h> 11 #include <linux/refcount.h> 12 #include <linux/utsname.h> 13 14 #include <linux/ceph/types.h> 15 #include <linux/ceph/messenger.h> 16 #include <linux/ceph/mdsmap.h> 17 #include <linux/ceph/auth.h> 18 19 /* 20 * Some lock dependencies: 21 * 22 * session->s_mutex 23 * mdsc->mutex 24 * 25 * mdsc->snap_rwsem 26 * 27 * ci->i_ceph_lock 28 * mdsc->snap_flush_lock 29 * mdsc->cap_delay_lock 30 * 31 */ 32 33 struct ceph_fs_client; 34 struct ceph_cap; 35 36 /* 37 * parsed info about a single inode. pointers are into the encoded 38 * on-wire structures within the mds reply message payload. 39 */ 40 struct ceph_mds_reply_info_in { 41 struct ceph_mds_reply_inode *in; 42 struct ceph_dir_layout dir_layout; 43 u32 symlink_len; 44 char *symlink; 45 u32 xattr_len; 46 char *xattr_data; 47 u64 inline_version; 48 u32 inline_len; 49 char *inline_data; 50 u32 pool_ns_len; 51 char *pool_ns_data; 52 }; 53 54 struct ceph_mds_reply_dir_entry { 55 char *name; 56 u32 name_len; 57 struct ceph_mds_reply_lease *lease; 58 struct ceph_mds_reply_info_in inode; 59 loff_t offset; 60 }; 61 62 /* 63 * parsed info about an mds reply, including information about 64 * either: 1) the target inode and/or its parent directory and dentry, 65 * and directory contents (for readdir results), or 66 * 2) the file range lock info (for fcntl F_GETLK results). 67 */ 68 struct ceph_mds_reply_info_parsed { 69 struct ceph_mds_reply_head *head; 70 71 /* trace */ 72 struct ceph_mds_reply_info_in diri, targeti; 73 struct ceph_mds_reply_dirfrag *dirfrag; 74 char *dname; 75 u32 dname_len; 76 struct ceph_mds_reply_lease *dlease; 77 78 /* extra */ 79 union { 80 /* for fcntl F_GETLK results */ 81 struct ceph_filelock *filelock_reply; 82 83 /* for readdir results */ 84 struct { 85 struct ceph_mds_reply_dirfrag *dir_dir; 86 size_t dir_buf_size; 87 int dir_nr; 88 bool dir_end; 89 bool dir_complete; 90 bool hash_order; 91 bool offset_hash; 92 struct ceph_mds_reply_dir_entry *dir_entries; 93 }; 94 95 /* for create results */ 96 struct { 97 bool has_create_ino; 98 u64 ino; 99 }; 100 }; 101 102 /* encoded blob describing snapshot contexts for certain 103 operations (e.g., open) */ 104 void *snapblob; 105 int snapblob_len; 106 }; 107 108 109 /* 110 * cap releases are batched and sent to the MDS en masse. 111 * 112 * Account for per-message overhead of mds_cap_release header 113 * and __le32 for osd epoch barrier trailing field. 114 */ 115 #define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - sizeof(u32) - \ 116 sizeof(struct ceph_mds_cap_release)) / \ 117 sizeof(struct ceph_mds_cap_item)) 118 119 120 /* 121 * state associated with each MDS<->client session 122 */ 123 enum { 124 CEPH_MDS_SESSION_NEW = 1, 125 CEPH_MDS_SESSION_OPENING = 2, 126 CEPH_MDS_SESSION_OPEN = 3, 127 CEPH_MDS_SESSION_HUNG = 4, 128 CEPH_MDS_SESSION_CLOSING = 5, 129 CEPH_MDS_SESSION_RESTARTING = 6, 130 CEPH_MDS_SESSION_RECONNECTING = 7, 131 CEPH_MDS_SESSION_REJECTED = 8, 132 }; 133 134 struct ceph_mds_session { 135 struct ceph_mds_client *s_mdsc; 136 int s_mds; 137 int s_state; 138 unsigned long s_ttl; /* time until mds kills us */ 139 u64 s_seq; /* incoming msg seq # */ 140 struct mutex s_mutex; /* serialize session messages */ 141 142 struct ceph_connection s_con; 143 144 struct ceph_auth_handshake s_auth; 145 146 /* protected by s_gen_ttl_lock */ 147 spinlock_t s_gen_ttl_lock; 148 u32 s_cap_gen; /* inc each time we get mds stale msg */ 149 unsigned long s_cap_ttl; /* when session caps expire */ 150 151 /* protected by s_cap_lock */ 152 spinlock_t s_cap_lock; 153 struct list_head s_caps; /* all caps issued by this session */ 154 int s_nr_caps, s_trim_caps; 155 int s_num_cap_releases; 156 int s_cap_reconnect; 157 int s_readonly; 158 struct list_head s_cap_releases; /* waiting cap_release messages */ 159 struct ceph_cap *s_cap_iterator; 160 161 /* protected by mutex */ 162 struct list_head s_cap_flushing; /* inodes w/ flushing caps */ 163 unsigned long s_renew_requested; /* last time we sent a renew req */ 164 u64 s_renew_seq; 165 166 refcount_t s_ref; 167 struct list_head s_waiting; /* waiting requests */ 168 struct list_head s_unsafe; /* unsafe requests */ 169 }; 170 171 /* 172 * modes of choosing which MDS to send a request to 173 */ 174 enum { 175 USE_ANY_MDS, 176 USE_RANDOM_MDS, 177 USE_AUTH_MDS, /* prefer authoritative mds for this metadata item */ 178 }; 179 180 struct ceph_mds_request; 181 struct ceph_mds_client; 182 183 /* 184 * request completion callback 185 */ 186 typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc, 187 struct ceph_mds_request *req); 188 /* 189 * wait for request completion callback 190 */ 191 typedef int (*ceph_mds_request_wait_callback_t) (struct ceph_mds_client *mdsc, 192 struct ceph_mds_request *req); 193 194 /* 195 * an in-flight mds request 196 */ 197 struct ceph_mds_request { 198 u64 r_tid; /* transaction id */ 199 struct rb_node r_node; 200 struct ceph_mds_client *r_mdsc; 201 202 int r_op; /* mds op code */ 203 204 /* operation on what? */ 205 struct inode *r_inode; /* arg1 */ 206 struct dentry *r_dentry; /* arg1 */ 207 struct dentry *r_old_dentry; /* arg2: rename from or link from */ 208 struct inode *r_old_dentry_dir; /* arg2: old dentry's parent dir */ 209 char *r_path1, *r_path2; 210 struct ceph_vino r_ino1, r_ino2; 211 212 struct inode *r_parent; /* parent dir inode */ 213 struct inode *r_target_inode; /* resulting inode */ 214 215 #define CEPH_MDS_R_DIRECT_IS_HASH (1) /* r_direct_hash is valid */ 216 #define CEPH_MDS_R_ABORTED (2) /* call was aborted */ 217 #define CEPH_MDS_R_GOT_UNSAFE (3) /* got an unsafe reply */ 218 #define CEPH_MDS_R_GOT_SAFE (4) /* got a safe reply */ 219 #define CEPH_MDS_R_GOT_RESULT (5) /* got a result */ 220 #define CEPH_MDS_R_DID_PREPOPULATE (6) /* prepopulated readdir */ 221 #define CEPH_MDS_R_PARENT_LOCKED (7) /* is r_parent->i_rwsem wlocked? */ 222 unsigned long r_req_flags; 223 224 struct mutex r_fill_mutex; 225 226 union ceph_mds_request_args r_args; 227 int r_fmode; /* file mode, if expecting cap */ 228 kuid_t r_uid; 229 kgid_t r_gid; 230 struct timespec r_stamp; 231 232 /* for choosing which mds to send this request to */ 233 int r_direct_mode; 234 u32 r_direct_hash; /* choose dir frag based on this dentry hash */ 235 236 /* data payload is used for xattr ops */ 237 struct ceph_pagelist *r_pagelist; 238 239 /* what caps shall we drop? */ 240 int r_inode_drop, r_inode_unless; 241 int r_dentry_drop, r_dentry_unless; 242 int r_old_dentry_drop, r_old_dentry_unless; 243 struct inode *r_old_inode; 244 int r_old_inode_drop, r_old_inode_unless; 245 246 struct ceph_msg *r_request; /* original request */ 247 int r_request_release_offset; 248 struct ceph_msg *r_reply; 249 struct ceph_mds_reply_info_parsed r_reply_info; 250 struct page *r_locked_page; 251 int r_err; 252 253 unsigned long r_timeout; /* optional. jiffies, 0 is "wait forever" */ 254 unsigned long r_started; /* start time to measure timeout against */ 255 unsigned long r_request_started; /* start time for mds request only, 256 used to measure lease durations */ 257 258 /* link unsafe requests to parent directory, for fsync */ 259 struct inode *r_unsafe_dir; 260 struct list_head r_unsafe_dir_item; 261 262 /* unsafe requests that modify the target inode */ 263 struct list_head r_unsafe_target_item; 264 265 struct ceph_mds_session *r_session; 266 267 int r_attempts; /* resend attempts */ 268 int r_num_fwd; /* number of forward attempts */ 269 int r_resend_mds; /* mds to resend to next, if any*/ 270 u32 r_sent_on_mseq; /* cap mseq request was sent at*/ 271 272 struct kref r_kref; 273 struct list_head r_wait; 274 struct completion r_completion; 275 struct completion r_safe_completion; 276 ceph_mds_request_callback_t r_callback; 277 ceph_mds_request_wait_callback_t r_wait_for_completion; 278 struct list_head r_unsafe_item; /* per-session unsafe list item */ 279 280 long long r_dir_release_cnt; 281 long long r_dir_ordered_cnt; 282 int r_readdir_cache_idx; 283 u32 r_readdir_offset; 284 285 struct ceph_cap_reservation r_caps_reservation; 286 int r_num_caps; 287 }; 288 289 struct ceph_pool_perm { 290 struct rb_node node; 291 int perm; 292 s64 pool; 293 size_t pool_ns_len; 294 char pool_ns[]; 295 }; 296 297 /* 298 * mds client state 299 */ 300 struct ceph_mds_client { 301 struct ceph_fs_client *fsc; 302 struct mutex mutex; /* all nested structures */ 303 304 struct ceph_mdsmap *mdsmap; 305 struct completion safe_umount_waiters; 306 wait_queue_head_t session_close_wq; 307 struct list_head waiting_for_map; 308 int mdsmap_err; 309 310 struct ceph_mds_session **sessions; /* NULL for mds if no session */ 311 atomic_t num_sessions; 312 int max_sessions; /* len of s_mds_sessions */ 313 int stopping; /* true if shutting down */ 314 315 /* 316 * snap_rwsem will cover cap linkage into snaprealms, and 317 * realm snap contexts. (later, we can do per-realm snap 318 * contexts locks..) the empty list contains realms with no 319 * references (implying they contain no inodes with caps) that 320 * should be destroyed. 321 */ 322 u64 last_snap_seq; 323 struct rw_semaphore snap_rwsem; 324 struct rb_root snap_realms; 325 struct list_head snap_empty; 326 spinlock_t snap_empty_lock; /* protect snap_empty */ 327 328 u64 last_tid; /* most recent mds request */ 329 u64 oldest_tid; /* oldest incomplete mds request, 330 excluding setfilelock requests */ 331 struct rb_root request_tree; /* pending mds requests */ 332 struct delayed_work delayed_work; /* delayed work */ 333 unsigned long last_renew_caps; /* last time we renewed our caps */ 334 struct list_head cap_delay_list; /* caps with delayed release */ 335 spinlock_t cap_delay_lock; /* protects cap_delay_list */ 336 struct list_head snap_flush_list; /* cap_snaps ready to flush */ 337 spinlock_t snap_flush_lock; 338 339 u64 last_cap_flush_tid; 340 struct list_head cap_flush_list; 341 struct list_head cap_dirty; /* inodes with dirty caps */ 342 struct list_head cap_dirty_migrating; /* ...that are migration... */ 343 int num_cap_flushing; /* # caps we are flushing */ 344 spinlock_t cap_dirty_lock; /* protects above items */ 345 wait_queue_head_t cap_flushing_wq; 346 347 /* 348 * Cap reservations 349 * 350 * Maintain a global pool of preallocated struct ceph_caps, referenced 351 * by struct ceph_caps_reservations. This ensures that we preallocate 352 * memory needed to successfully process an MDS response. (If an MDS 353 * sends us cap information and we fail to process it, we will have 354 * problems due to the client and MDS being out of sync.) 355 * 356 * Reservations are 'owned' by a ceph_cap_reservation context. 357 */ 358 spinlock_t caps_list_lock; 359 struct list_head caps_list; /* unused (reserved or 360 unreserved) */ 361 int caps_total_count; /* total caps allocated */ 362 int caps_use_count; /* in use */ 363 int caps_reserve_count; /* unused, reserved */ 364 int caps_avail_count; /* unused, unreserved */ 365 int caps_min_count; /* keep at least this many 366 (unreserved) */ 367 spinlock_t dentry_lru_lock; 368 struct list_head dentry_lru; 369 int num_dentry; 370 371 struct rw_semaphore pool_perm_rwsem; 372 struct rb_root pool_perm_tree; 373 374 char nodename[__NEW_UTS_LEN + 1]; 375 }; 376 377 extern const char *ceph_mds_op_name(int op); 378 379 extern struct ceph_mds_session * 380 __ceph_lookup_mds_session(struct ceph_mds_client *, int mds); 381 382 static inline struct ceph_mds_session * 383 ceph_get_mds_session(struct ceph_mds_session *s) 384 { 385 refcount_inc(&s->s_ref); 386 return s; 387 } 388 389 extern const char *ceph_session_state_name(int s); 390 391 extern void ceph_put_mds_session(struct ceph_mds_session *s); 392 393 extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc, 394 struct ceph_msg *msg, int mds); 395 396 extern int ceph_mdsc_init(struct ceph_fs_client *fsc); 397 extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc); 398 extern void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc); 399 extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc); 400 401 extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc); 402 403 extern void ceph_invalidate_dir_request(struct ceph_mds_request *req); 404 extern int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, 405 struct inode *dir); 406 extern struct ceph_mds_request * 407 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode); 408 extern void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, 409 struct ceph_mds_request *req); 410 extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, 411 struct inode *dir, 412 struct ceph_mds_request *req); 413 static inline void ceph_mdsc_get_request(struct ceph_mds_request *req) 414 { 415 kref_get(&req->r_kref); 416 } 417 extern void ceph_mdsc_release_request(struct kref *kref); 418 static inline void ceph_mdsc_put_request(struct ceph_mds_request *req) 419 { 420 kref_put(&req->r_kref, ceph_mdsc_release_request); 421 } 422 423 extern void ceph_send_cap_releases(struct ceph_mds_client *mdsc, 424 struct ceph_mds_session *session); 425 426 extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc); 427 428 extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, 429 int stop_on_nosnap); 430 431 extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry); 432 extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, 433 struct inode *inode, 434 struct dentry *dentry, char action, 435 u32 seq); 436 437 extern void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, 438 struct ceph_msg *msg); 439 extern void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, 440 struct ceph_msg *msg); 441 442 extern struct ceph_mds_session * 443 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target); 444 extern void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, 445 struct ceph_mds_session *session); 446 447 extern int ceph_trim_caps(struct ceph_mds_client *mdsc, 448 struct ceph_mds_session *session, 449 int max_caps); 450 #endif 451