1 #ifndef _FS_CEPH_MDS_CLIENT_H 2 #define _FS_CEPH_MDS_CLIENT_H 3 4 #include <linux/completion.h> 5 #include <linux/kref.h> 6 #include <linux/list.h> 7 #include <linux/mutex.h> 8 #include <linux/rbtree.h> 9 #include <linux/spinlock.h> 10 11 #include <linux/ceph/types.h> 12 #include <linux/ceph/messenger.h> 13 #include <linux/ceph/mdsmap.h> 14 15 /* 16 * Some lock dependencies: 17 * 18 * session->s_mutex 19 * mdsc->mutex 20 * 21 * mdsc->snap_rwsem 22 * 23 * ci->i_ceph_lock 24 * mdsc->snap_flush_lock 25 * mdsc->cap_delay_lock 26 * 27 */ 28 29 struct ceph_fs_client; 30 struct ceph_cap; 31 32 /* 33 * parsed info about a single inode. pointers are into the encoded 34 * on-wire structures within the mds reply message payload. 35 */ 36 struct ceph_mds_reply_info_in { 37 struct ceph_mds_reply_inode *in; 38 struct ceph_dir_layout dir_layout; 39 u32 symlink_len; 40 char *symlink; 41 u32 xattr_len; 42 char *xattr_data; 43 }; 44 45 /* 46 * parsed info about an mds reply, including information about 47 * either: 1) the target inode and/or its parent directory and dentry, 48 * and directory contents (for readdir results), or 49 * 2) the file range lock info (for fcntl F_GETLK results). 50 */ 51 struct ceph_mds_reply_info_parsed { 52 struct ceph_mds_reply_head *head; 53 54 /* trace */ 55 struct ceph_mds_reply_info_in diri, targeti; 56 struct ceph_mds_reply_dirfrag *dirfrag; 57 char *dname; 58 u32 dname_len; 59 struct ceph_mds_reply_lease *dlease; 60 61 /* extra */ 62 union { 63 /* for fcntl F_GETLK results */ 64 struct ceph_filelock *filelock_reply; 65 66 /* for readdir results */ 67 struct { 68 struct ceph_mds_reply_dirfrag *dir_dir; 69 int dir_nr; 70 char **dir_dname; 71 u32 *dir_dname_len; 72 struct ceph_mds_reply_lease **dir_dlease; 73 struct ceph_mds_reply_info_in *dir_in; 74 u8 dir_complete, dir_end; 75 }; 76 }; 77 78 /* encoded blob describing snapshot contexts for certain 79 operations (e.g., open) */ 80 void *snapblob; 81 int snapblob_len; 82 }; 83 84 85 /* 86 * cap releases are batched and sent to the MDS en masse. 87 */ 88 #define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \ 89 sizeof(struct ceph_mds_cap_release)) / \ 90 sizeof(struct ceph_mds_cap_item)) 91 92 93 /* 94 * state associated with each MDS<->client session 95 */ 96 enum { 97 CEPH_MDS_SESSION_NEW = 1, 98 CEPH_MDS_SESSION_OPENING = 2, 99 CEPH_MDS_SESSION_OPEN = 3, 100 CEPH_MDS_SESSION_HUNG = 4, 101 CEPH_MDS_SESSION_CLOSING = 5, 102 CEPH_MDS_SESSION_RESTARTING = 6, 103 CEPH_MDS_SESSION_RECONNECTING = 7, 104 }; 105 106 struct ceph_mds_session { 107 struct ceph_mds_client *s_mdsc; 108 int s_mds; 109 int s_state; 110 unsigned long s_ttl; /* time until mds kills us */ 111 u64 s_seq; /* incoming msg seq # */ 112 struct mutex s_mutex; /* serialize session messages */ 113 114 struct ceph_connection s_con; 115 116 struct ceph_authorizer *s_authorizer; 117 void *s_authorizer_buf, *s_authorizer_reply_buf; 118 size_t s_authorizer_buf_len, s_authorizer_reply_buf_len; 119 120 /* protected by s_gen_ttl_lock */ 121 spinlock_t s_gen_ttl_lock; 122 u32 s_cap_gen; /* inc each time we get mds stale msg */ 123 unsigned long s_cap_ttl; /* when session caps expire */ 124 125 /* protected by s_cap_lock */ 126 spinlock_t s_cap_lock; 127 struct list_head s_caps; /* all caps issued by this session */ 128 int s_nr_caps, s_trim_caps; 129 int s_num_cap_releases; 130 struct list_head s_cap_releases; /* waiting cap_release messages */ 131 struct list_head s_cap_releases_done; /* ready to send */ 132 struct ceph_cap *s_cap_iterator; 133 134 /* protected by mutex */ 135 struct list_head s_cap_flushing; /* inodes w/ flushing caps */ 136 struct list_head s_cap_snaps_flushing; 137 unsigned long s_renew_requested; /* last time we sent a renew req */ 138 u64 s_renew_seq; 139 140 atomic_t s_ref; 141 struct list_head s_waiting; /* waiting requests */ 142 struct list_head s_unsafe; /* unsafe requests */ 143 }; 144 145 /* 146 * modes of choosing which MDS to send a request to 147 */ 148 enum { 149 USE_ANY_MDS, 150 USE_RANDOM_MDS, 151 USE_AUTH_MDS, /* prefer authoritative mds for this metadata item */ 152 }; 153 154 struct ceph_mds_request; 155 struct ceph_mds_client; 156 157 /* 158 * request completion callback 159 */ 160 typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc, 161 struct ceph_mds_request *req); 162 163 /* 164 * an in-flight mds request 165 */ 166 struct ceph_mds_request { 167 u64 r_tid; /* transaction id */ 168 struct rb_node r_node; 169 struct ceph_mds_client *r_mdsc; 170 171 int r_op; /* mds op code */ 172 173 /* operation on what? */ 174 struct inode *r_inode; /* arg1 */ 175 struct dentry *r_dentry; /* arg1 */ 176 struct dentry *r_old_dentry; /* arg2: rename from or link from */ 177 struct inode *r_old_dentry_dir; /* arg2: old dentry's parent dir */ 178 char *r_path1, *r_path2; 179 struct ceph_vino r_ino1, r_ino2; 180 181 struct inode *r_locked_dir; /* dir (if any) i_mutex locked by vfs */ 182 struct inode *r_target_inode; /* resulting inode */ 183 184 struct mutex r_fill_mutex; 185 186 union ceph_mds_request_args r_args; 187 int r_fmode; /* file mode, if expecting cap */ 188 uid_t r_uid; 189 gid_t r_gid; 190 191 /* for choosing which mds to send this request to */ 192 int r_direct_mode; 193 u32 r_direct_hash; /* choose dir frag based on this dentry hash */ 194 bool r_direct_is_hash; /* true if r_direct_hash is valid */ 195 196 /* data payload is used for xattr ops */ 197 struct page **r_pages; 198 int r_num_pages; 199 int r_data_len; 200 201 /* what caps shall we drop? */ 202 int r_inode_drop, r_inode_unless; 203 int r_dentry_drop, r_dentry_unless; 204 int r_old_dentry_drop, r_old_dentry_unless; 205 struct inode *r_old_inode; 206 int r_old_inode_drop, r_old_inode_unless; 207 208 struct ceph_msg *r_request; /* original request */ 209 int r_request_release_offset; 210 struct ceph_msg *r_reply; 211 struct ceph_mds_reply_info_parsed r_reply_info; 212 int r_err; 213 bool r_aborted; 214 215 unsigned long r_timeout; /* optional. jiffies */ 216 unsigned long r_started; /* start time to measure timeout against */ 217 unsigned long r_request_started; /* start time for mds request only, 218 used to measure lease durations */ 219 220 /* link unsafe requests to parent directory, for fsync */ 221 struct inode *r_unsafe_dir; 222 struct list_head r_unsafe_dir_item; 223 224 struct ceph_mds_session *r_session; 225 226 int r_attempts; /* resend attempts */ 227 int r_num_fwd; /* number of forward attempts */ 228 int r_resend_mds; /* mds to resend to next, if any*/ 229 u32 r_sent_on_mseq; /* cap mseq request was sent at*/ 230 231 struct kref r_kref; 232 struct list_head r_wait; 233 struct completion r_completion; 234 struct completion r_safe_completion; 235 ceph_mds_request_callback_t r_callback; 236 struct list_head r_unsafe_item; /* per-session unsafe list item */ 237 bool r_got_unsafe, r_got_safe, r_got_result; 238 239 bool r_did_prepopulate; 240 u32 r_readdir_offset; 241 242 struct ceph_cap_reservation r_caps_reservation; 243 int r_num_caps; 244 }; 245 246 /* 247 * mds client state 248 */ 249 struct ceph_mds_client { 250 struct ceph_fs_client *fsc; 251 struct mutex mutex; /* all nested structures */ 252 253 struct ceph_mdsmap *mdsmap; 254 struct completion safe_umount_waiters; 255 wait_queue_head_t session_close_wq; 256 struct list_head waiting_for_map; 257 258 struct ceph_mds_session **sessions; /* NULL for mds if no session */ 259 int max_sessions; /* len of s_mds_sessions */ 260 int stopping; /* true if shutting down */ 261 262 /* 263 * snap_rwsem will cover cap linkage into snaprealms, and 264 * realm snap contexts. (later, we can do per-realm snap 265 * contexts locks..) the empty list contains realms with no 266 * references (implying they contain no inodes with caps) that 267 * should be destroyed. 268 */ 269 struct rw_semaphore snap_rwsem; 270 struct rb_root snap_realms; 271 struct list_head snap_empty; 272 spinlock_t snap_empty_lock; /* protect snap_empty */ 273 274 u64 last_tid; /* most recent mds request */ 275 struct rb_root request_tree; /* pending mds requests */ 276 struct delayed_work delayed_work; /* delayed work */ 277 unsigned long last_renew_caps; /* last time we renewed our caps */ 278 struct list_head cap_delay_list; /* caps with delayed release */ 279 spinlock_t cap_delay_lock; /* protects cap_delay_list */ 280 struct list_head snap_flush_list; /* cap_snaps ready to flush */ 281 spinlock_t snap_flush_lock; 282 283 u64 cap_flush_seq; 284 struct list_head cap_dirty; /* inodes with dirty caps */ 285 struct list_head cap_dirty_migrating; /* ...that are migration... */ 286 int num_cap_flushing; /* # caps we are flushing */ 287 spinlock_t cap_dirty_lock; /* protects above items */ 288 wait_queue_head_t cap_flushing_wq; 289 290 /* 291 * Cap reservations 292 * 293 * Maintain a global pool of preallocated struct ceph_caps, referenced 294 * by struct ceph_caps_reservations. This ensures that we preallocate 295 * memory needed to successfully process an MDS response. (If an MDS 296 * sends us cap information and we fail to process it, we will have 297 * problems due to the client and MDS being out of sync.) 298 * 299 * Reservations are 'owned' by a ceph_cap_reservation context. 300 */ 301 spinlock_t caps_list_lock; 302 struct list_head caps_list; /* unused (reserved or 303 unreserved) */ 304 int caps_total_count; /* total caps allocated */ 305 int caps_use_count; /* in use */ 306 int caps_reserve_count; /* unused, reserved */ 307 int caps_avail_count; /* unused, unreserved */ 308 int caps_min_count; /* keep at least this many 309 (unreserved) */ 310 spinlock_t dentry_lru_lock; 311 struct list_head dentry_lru; 312 int num_dentry; 313 }; 314 315 extern const char *ceph_mds_op_name(int op); 316 317 extern struct ceph_mds_session * 318 __ceph_lookup_mds_session(struct ceph_mds_client *, int mds); 319 320 static inline struct ceph_mds_session * 321 ceph_get_mds_session(struct ceph_mds_session *s) 322 { 323 atomic_inc(&s->s_ref); 324 return s; 325 } 326 327 extern void ceph_put_mds_session(struct ceph_mds_session *s); 328 329 extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc, 330 struct ceph_msg *msg, int mds); 331 332 extern int ceph_mdsc_init(struct ceph_fs_client *fsc); 333 extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc); 334 extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc); 335 336 extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc); 337 338 extern void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, 339 struct inode *inode, 340 struct dentry *dn); 341 342 extern void ceph_invalidate_dir_request(struct ceph_mds_request *req); 343 344 extern struct ceph_mds_request * 345 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode); 346 extern void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, 347 struct ceph_mds_request *req); 348 extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, 349 struct inode *dir, 350 struct ceph_mds_request *req); 351 static inline void ceph_mdsc_get_request(struct ceph_mds_request *req) 352 { 353 kref_get(&req->r_kref); 354 } 355 extern void ceph_mdsc_release_request(struct kref *kref); 356 static inline void ceph_mdsc_put_request(struct ceph_mds_request *req) 357 { 358 kref_put(&req->r_kref, ceph_mdsc_release_request); 359 } 360 361 extern int ceph_add_cap_releases(struct ceph_mds_client *mdsc, 362 struct ceph_mds_session *session); 363 extern void ceph_send_cap_releases(struct ceph_mds_client *mdsc, 364 struct ceph_mds_session *session); 365 366 extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc); 367 368 extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, 369 int stop_on_nosnap); 370 371 extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry); 372 extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, 373 struct inode *inode, 374 struct dentry *dentry, char action, 375 u32 seq); 376 377 extern void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, 378 struct ceph_msg *msg); 379 380 extern void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, 381 struct ceph_mds_session *session); 382 383 #endif 384