1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* -*- mode: c; c-basic-offset: 8; -*- 3 * vim: noexpandtab sw=8 ts=8 sts=0: 4 * 5 * dlmcommon.h 6 * 7 * Copyright (C) 2004 Oracle. All rights reserved. 8 */ 9 10 #ifndef DLMCOMMON_H 11 #define DLMCOMMON_H 12 13 #include <linux/kref.h> 14 15 #define DLM_HB_NODE_DOWN_PRI (0xf000000) 16 #define DLM_HB_NODE_UP_PRI (0x8000000) 17 18 #define DLM_LOCKID_NAME_MAX 32 19 20 #define DLM_DOMAIN_NAME_MAX_LEN 255 21 #define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES 22 #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes 23 #define DLM_THREAD_MS 200 // flush at least every 200 ms 24 25 #define DLM_HASH_SIZE_DEFAULT (1 << 17) 26 #if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE 27 # define DLM_HASH_PAGES 1 28 #else 29 # define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE) 30 #endif 31 #define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head)) 32 #define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE) 33 34 /* Intended to make it easier for us to switch out hash functions */ 35 #define dlm_lockid_hash(_n, _l) full_name_hash(NULL, _n, _l) 36 37 enum dlm_mle_type { 38 DLM_MLE_BLOCK = 0, 39 DLM_MLE_MASTER = 1, 40 DLM_MLE_MIGRATION = 2, 41 DLM_MLE_NUM_TYPES = 3, 42 }; 43 44 struct dlm_master_list_entry { 45 struct hlist_node master_hash_node; 46 struct list_head hb_events; 47 struct dlm_ctxt *dlm; 48 spinlock_t spinlock; 49 wait_queue_head_t wq; 50 atomic_t woken; 51 struct kref mle_refs; 52 int inuse; 53 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 54 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 55 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 56 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 57 u8 master; 58 u8 new_master; 59 enum dlm_mle_type type; 60 struct o2hb_callback_func mle_hb_up; 61 struct o2hb_callback_func mle_hb_down; 62 struct dlm_lock_resource *mleres; 63 unsigned char mname[DLM_LOCKID_NAME_MAX]; 64 unsigned int mnamelen; 65 unsigned int mnamehash; 66 }; 67 68 enum dlm_ast_type { 69 DLM_AST = 0, 70 DLM_BAST = 1, 71 DLM_ASTUNLOCK = 2, 72 }; 73 74 75 #define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \ 76 LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \ 77 LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE) 78 79 #define DLM_RECOVERY_LOCK_NAME "$RECOVERY" 80 #define DLM_RECOVERY_LOCK_NAME_LEN 9 81 82 static inline int dlm_is_recovery_lock(const char *lock_name, int name_len) 83 { 84 if (name_len == DLM_RECOVERY_LOCK_NAME_LEN && 85 memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0) 86 return 1; 87 return 0; 88 } 89 90 #define DLM_RECO_STATE_ACTIVE 0x0001 91 #define DLM_RECO_STATE_FINALIZE 0x0002 92 93 struct dlm_recovery_ctxt 94 { 95 struct list_head resources; 96 struct list_head node_data; 97 u8 new_master; 98 u8 dead_node; 99 u16 state; 100 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 101 wait_queue_head_t event; 102 }; 103 104 enum dlm_ctxt_state { 105 DLM_CTXT_NEW = 0, 106 DLM_CTXT_JOINED = 1, 107 DLM_CTXT_IN_SHUTDOWN = 2, 108 DLM_CTXT_LEAVING = 3, 109 }; 110 111 struct dlm_ctxt 112 { 113 struct list_head list; 114 struct hlist_head **lockres_hash; 115 struct list_head dirty_list; 116 struct list_head purge_list; 117 struct list_head pending_asts; 118 struct list_head pending_basts; 119 struct list_head tracking_list; 120 unsigned int purge_count; 121 spinlock_t spinlock; 122 spinlock_t ast_lock; 123 spinlock_t track_lock; 124 char *name; 125 u8 node_num; 126 u32 key; 127 u8 joining_node; 128 u8 migrate_done; /* set to 1 means node has migrated all lock resources */ 129 wait_queue_head_t dlm_join_events; 130 unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 131 unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 132 unsigned long exit_domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 133 unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 134 struct dlm_recovery_ctxt reco; 135 spinlock_t master_lock; 136 struct hlist_head **master_hash; 137 struct list_head mle_hb_events; 138 139 /* these give a really vague idea of the system load */ 140 atomic_t mle_tot_count[DLM_MLE_NUM_TYPES]; 141 atomic_t mle_cur_count[DLM_MLE_NUM_TYPES]; 142 atomic_t res_tot_count; 143 atomic_t res_cur_count; 144 145 struct dentry *dlm_debugfs_subroot; 146 147 /* NOTE: Next three are protected by dlm_domain_lock */ 148 struct kref dlm_refs; 149 enum dlm_ctxt_state dlm_state; 150 unsigned int num_joins; 151 152 struct o2hb_callback_func dlm_hb_up; 153 struct o2hb_callback_func dlm_hb_down; 154 struct task_struct *dlm_thread_task; 155 struct task_struct *dlm_reco_thread_task; 156 struct workqueue_struct *dlm_worker; 157 wait_queue_head_t dlm_thread_wq; 158 wait_queue_head_t dlm_reco_thread_wq; 159 wait_queue_head_t ast_wq; 160 wait_queue_head_t migration_wq; 161 162 struct work_struct dispatched_work; 163 struct list_head work_list; 164 spinlock_t work_lock; 165 struct list_head dlm_domain_handlers; 166 struct list_head dlm_eviction_callbacks; 167 168 /* The filesystem specifies this at domain registration. We 169 * cache it here to know what to tell other nodes. */ 170 struct dlm_protocol_version fs_locking_proto; 171 /* This is the inter-dlm communication version */ 172 struct dlm_protocol_version dlm_locking_proto; 173 }; 174 175 static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i) 176 { 177 return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE); 178 } 179 180 static inline struct hlist_head *dlm_master_hash(struct dlm_ctxt *dlm, 181 unsigned i) 182 { 183 return dlm->master_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + 184 (i % DLM_BUCKETS_PER_PAGE); 185 } 186 187 /* these keventd work queue items are for less-frequently 188 * called functions that cannot be directly called from the 189 * net message handlers for some reason, usually because 190 * they need to send net messages of their own. */ 191 void dlm_dispatch_work(struct work_struct *work); 192 193 struct dlm_lock_resource; 194 struct dlm_work_item; 195 196 typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *); 197 198 struct dlm_request_all_locks_priv 199 { 200 u8 reco_master; 201 u8 dead_node; 202 }; 203 204 struct dlm_mig_lockres_priv 205 { 206 struct dlm_lock_resource *lockres; 207 u8 real_master; 208 u8 extra_ref; 209 }; 210 211 struct dlm_assert_master_priv 212 { 213 struct dlm_lock_resource *lockres; 214 u8 request_from; 215 u32 flags; 216 unsigned ignore_higher:1; 217 }; 218 219 struct dlm_deref_lockres_priv 220 { 221 struct dlm_lock_resource *deref_res; 222 u8 deref_node; 223 }; 224 225 struct dlm_work_item 226 { 227 struct list_head list; 228 dlm_workfunc_t *func; 229 struct dlm_ctxt *dlm; 230 void *data; 231 union { 232 struct dlm_request_all_locks_priv ral; 233 struct dlm_mig_lockres_priv ml; 234 struct dlm_assert_master_priv am; 235 struct dlm_deref_lockres_priv dl; 236 } u; 237 }; 238 239 static inline void dlm_init_work_item(struct dlm_ctxt *dlm, 240 struct dlm_work_item *i, 241 dlm_workfunc_t *f, void *data) 242 { 243 memset(i, 0, sizeof(*i)); 244 i->func = f; 245 INIT_LIST_HEAD(&i->list); 246 i->data = data; 247 i->dlm = dlm; /* must have already done a dlm_grab on this! */ 248 } 249 250 251 252 static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm, 253 u8 node) 254 { 255 assert_spin_locked(&dlm->spinlock); 256 257 dlm->joining_node = node; 258 wake_up(&dlm->dlm_join_events); 259 } 260 261 #define DLM_LOCK_RES_UNINITED 0x00000001 262 #define DLM_LOCK_RES_RECOVERING 0x00000002 263 #define DLM_LOCK_RES_READY 0x00000004 264 #define DLM_LOCK_RES_DIRTY 0x00000008 265 #define DLM_LOCK_RES_IN_PROGRESS 0x00000010 266 #define DLM_LOCK_RES_MIGRATING 0x00000020 267 #define DLM_LOCK_RES_DROPPING_REF 0x00000040 268 #define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000 269 #define DLM_LOCK_RES_SETREF_INPROG 0x00002000 270 #define DLM_LOCK_RES_RECOVERY_WAITING 0x00004000 271 272 /* max milliseconds to wait to sync up a network failure with a node death */ 273 #define DLM_NODE_DEATH_WAIT_MAX (5 * 1000) 274 275 #define DLM_PURGE_INTERVAL_MS (8 * 1000) 276 277 struct dlm_lock_resource 278 { 279 /* WARNING: Please see the comment in dlm_init_lockres before 280 * adding fields here. */ 281 struct hlist_node hash_node; 282 struct qstr lockname; 283 struct kref refs; 284 285 /* 286 * Please keep granted, converting, and blocked in this order, 287 * as some funcs want to iterate over all lists. 288 * 289 * All four lists are protected by the hash's reference. 290 */ 291 struct list_head granted; 292 struct list_head converting; 293 struct list_head blocked; 294 struct list_head purge; 295 296 /* 297 * These two lists require you to hold an additional reference 298 * while they are on the list. 299 */ 300 struct list_head dirty; 301 struct list_head recovering; // dlm_recovery_ctxt.resources list 302 303 /* Added during init and removed during release */ 304 struct list_head tracking; /* dlm->tracking_list */ 305 306 /* unused lock resources have their last_used stamped and are 307 * put on a list for the dlm thread to run. */ 308 unsigned long last_used; 309 310 struct dlm_ctxt *dlm; 311 312 unsigned migration_pending:1; 313 atomic_t asts_reserved; 314 spinlock_t spinlock; 315 wait_queue_head_t wq; 316 u8 owner; //node which owns the lock resource, or unknown 317 u16 state; 318 char lvb[DLM_LVB_LEN]; 319 unsigned int inflight_locks; 320 unsigned int inflight_assert_workers; 321 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 322 }; 323 324 struct dlm_migratable_lock 325 { 326 __be64 cookie; 327 328 /* these 3 are just padding for the in-memory structure, but 329 * list and flags are actually used when sent over the wire */ 330 __be16 pad1; 331 u8 list; // 0=granted, 1=converting, 2=blocked 332 u8 flags; 333 334 s8 type; 335 s8 convert_type; 336 s8 highest_blocked; 337 u8 node; 338 }; // 16 bytes 339 340 struct dlm_lock 341 { 342 struct dlm_migratable_lock ml; 343 344 struct list_head list; 345 struct list_head ast_list; 346 struct list_head bast_list; 347 struct dlm_lock_resource *lockres; 348 spinlock_t spinlock; 349 struct kref lock_refs; 350 351 // ast and bast must be callable while holding a spinlock! 352 dlm_astlockfunc_t *ast; 353 dlm_bastlockfunc_t *bast; 354 void *astdata; 355 struct dlm_lockstatus *lksb; 356 unsigned ast_pending:1, 357 bast_pending:1, 358 convert_pending:1, 359 lock_pending:1, 360 cancel_pending:1, 361 unlock_pending:1, 362 lksb_kernel_allocated:1; 363 }; 364 365 enum dlm_lockres_list { 366 DLM_GRANTED_LIST = 0, 367 DLM_CONVERTING_LIST = 1, 368 DLM_BLOCKED_LIST = 2, 369 }; 370 371 static inline int dlm_lvb_is_empty(char *lvb) 372 { 373 int i; 374 for (i=0; i<DLM_LVB_LEN; i++) 375 if (lvb[i]) 376 return 0; 377 return 1; 378 } 379 380 static inline char *dlm_list_in_text(enum dlm_lockres_list idx) 381 { 382 if (idx == DLM_GRANTED_LIST) 383 return "granted"; 384 else if (idx == DLM_CONVERTING_LIST) 385 return "converting"; 386 else if (idx == DLM_BLOCKED_LIST) 387 return "blocked"; 388 else 389 return "unknown"; 390 } 391 392 static inline struct list_head * 393 dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) 394 { 395 struct list_head *ret = NULL; 396 if (idx == DLM_GRANTED_LIST) 397 ret = &res->granted; 398 else if (idx == DLM_CONVERTING_LIST) 399 ret = &res->converting; 400 else if (idx == DLM_BLOCKED_LIST) 401 ret = &res->blocked; 402 else 403 BUG(); 404 return ret; 405 } 406 407 408 409 410 struct dlm_node_iter 411 { 412 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 413 int curnode; 414 }; 415 416 417 enum { 418 DLM_MASTER_REQUEST_MSG = 500, 419 DLM_UNUSED_MSG1 = 501, 420 DLM_ASSERT_MASTER_MSG = 502, 421 DLM_CREATE_LOCK_MSG = 503, 422 DLM_CONVERT_LOCK_MSG = 504, 423 DLM_PROXY_AST_MSG = 505, 424 DLM_UNLOCK_LOCK_MSG = 506, 425 DLM_DEREF_LOCKRES_MSG = 507, 426 DLM_MIGRATE_REQUEST_MSG = 508, 427 DLM_MIG_LOCKRES_MSG = 509, 428 DLM_QUERY_JOIN_MSG = 510, 429 DLM_ASSERT_JOINED_MSG = 511, 430 DLM_CANCEL_JOIN_MSG = 512, 431 DLM_EXIT_DOMAIN_MSG = 513, 432 DLM_MASTER_REQUERY_MSG = 514, 433 DLM_LOCK_REQUEST_MSG = 515, 434 DLM_RECO_DATA_DONE_MSG = 516, 435 DLM_BEGIN_RECO_MSG = 517, 436 DLM_FINALIZE_RECO_MSG = 518, 437 DLM_QUERY_REGION = 519, 438 DLM_QUERY_NODEINFO = 520, 439 DLM_BEGIN_EXIT_DOMAIN_MSG = 521, 440 DLM_DEREF_LOCKRES_DONE = 522, 441 }; 442 443 struct dlm_reco_node_data 444 { 445 int state; 446 u8 node_num; 447 struct list_head list; 448 }; 449 450 enum { 451 DLM_RECO_NODE_DATA_DEAD = -1, 452 DLM_RECO_NODE_DATA_INIT = 0, 453 DLM_RECO_NODE_DATA_REQUESTING = 1, 454 DLM_RECO_NODE_DATA_REQUESTED = 2, 455 DLM_RECO_NODE_DATA_RECEIVING = 3, 456 DLM_RECO_NODE_DATA_DONE = 4, 457 DLM_RECO_NODE_DATA_FINALIZE_SENT = 5, 458 }; 459 460 461 enum { 462 DLM_MASTER_RESP_NO = 0, 463 DLM_MASTER_RESP_YES = 1, 464 DLM_MASTER_RESP_MAYBE = 2, 465 DLM_MASTER_RESP_ERROR = 3, 466 }; 467 468 469 struct dlm_master_request 470 { 471 u8 node_idx; 472 u8 namelen; 473 __be16 pad1; 474 __be32 flags; 475 476 u8 name[O2NM_MAX_NAME_LEN]; 477 }; 478 479 #define DLM_ASSERT_RESPONSE_REASSERT 0x00000001 480 #define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002 481 482 #define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001 483 #define DLM_ASSERT_MASTER_REQUERY 0x00000002 484 #define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004 485 struct dlm_assert_master 486 { 487 u8 node_idx; 488 u8 namelen; 489 __be16 pad1; 490 __be32 flags; 491 492 u8 name[O2NM_MAX_NAME_LEN]; 493 }; 494 495 #define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001 496 497 struct dlm_migrate_request 498 { 499 u8 master; 500 u8 new_master; 501 u8 namelen; 502 u8 pad1; 503 __be32 pad2; 504 u8 name[O2NM_MAX_NAME_LEN]; 505 }; 506 507 struct dlm_master_requery 508 { 509 u8 pad1; 510 u8 pad2; 511 u8 node_idx; 512 u8 namelen; 513 __be32 pad3; 514 u8 name[O2NM_MAX_NAME_LEN]; 515 }; 516 517 #define DLM_MRES_RECOVERY 0x01 518 #define DLM_MRES_MIGRATION 0x02 519 #define DLM_MRES_ALL_DONE 0x04 520 521 /* 522 * We would like to get one whole lockres into a single network 523 * message whenever possible. Generally speaking, there will be 524 * at most one dlm_lock on a lockres for each node in the cluster, 525 * plus (infrequently) any additional locks coming in from userdlm. 526 * 527 * struct _dlm_lockres_page 528 * { 529 * dlm_migratable_lockres mres; 530 * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS]; 531 * u8 pad[DLM_MIG_LOCKRES_RESERVED]; 532 * }; 533 * 534 * from ../cluster/tcp.h 535 * O2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg)) 536 * (roughly 4080 bytes) 537 * and sizeof(dlm_migratable_lockres) = 112 bytes 538 * and sizeof(dlm_migratable_lock) = 16 bytes 539 * 540 * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and 541 * DLM_MIG_LOCKRES_RESERVED=128 means we have this: 542 * 543 * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) + 544 * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED = 545 * NET_MAX_PAYLOAD_BYTES 546 * (240 * 16) + 112 + 128 = 4080 547 * 548 * So a lockres would need more than 240 locks before it would 549 * use more than one network packet to recover. Not too bad. 550 */ 551 #define DLM_MAX_MIGRATABLE_LOCKS 240 552 553 struct dlm_migratable_lockres 554 { 555 u8 master; 556 u8 lockname_len; 557 u8 num_locks; // locks sent in this structure 558 u8 flags; 559 __be32 total_locks; // locks to be sent for this migration cookie 560 __be64 mig_cookie; // cookie for this lockres migration 561 // or zero if not needed 562 // 16 bytes 563 u8 lockname[DLM_LOCKID_NAME_MAX]; 564 // 48 bytes 565 u8 lvb[DLM_LVB_LEN]; 566 // 112 bytes 567 struct dlm_migratable_lock ml[0]; // 16 bytes each, begins at byte 112 568 }; 569 #define DLM_MIG_LOCKRES_MAX_LEN \ 570 (sizeof(struct dlm_migratable_lockres) + \ 571 (sizeof(struct dlm_migratable_lock) * \ 572 DLM_MAX_MIGRATABLE_LOCKS) ) 573 574 /* from above, 128 bytes 575 * for some undetermined future use */ 576 #define DLM_MIG_LOCKRES_RESERVED (O2NET_MAX_PAYLOAD_BYTES - \ 577 DLM_MIG_LOCKRES_MAX_LEN) 578 579 struct dlm_create_lock 580 { 581 __be64 cookie; 582 583 __be32 flags; 584 u8 pad1; 585 u8 node_idx; 586 s8 requested_type; 587 u8 namelen; 588 589 u8 name[O2NM_MAX_NAME_LEN]; 590 }; 591 592 struct dlm_convert_lock 593 { 594 __be64 cookie; 595 596 __be32 flags; 597 u8 pad1; 598 u8 node_idx; 599 s8 requested_type; 600 u8 namelen; 601 602 u8 name[O2NM_MAX_NAME_LEN]; 603 604 s8 lvb[0]; 605 }; 606 #define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN) 607 608 struct dlm_unlock_lock 609 { 610 __be64 cookie; 611 612 __be32 flags; 613 __be16 pad1; 614 u8 node_idx; 615 u8 namelen; 616 617 u8 name[O2NM_MAX_NAME_LEN]; 618 619 s8 lvb[0]; 620 }; 621 #define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN) 622 623 struct dlm_proxy_ast 624 { 625 __be64 cookie; 626 627 __be32 flags; 628 u8 node_idx; 629 u8 type; 630 u8 blocked_type; 631 u8 namelen; 632 633 u8 name[O2NM_MAX_NAME_LEN]; 634 635 s8 lvb[0]; 636 }; 637 #define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN) 638 639 #define DLM_MOD_KEY (0x666c6172) 640 enum dlm_query_join_response_code { 641 JOIN_DISALLOW = 0, 642 JOIN_OK = 1, 643 JOIN_OK_NO_MAP = 2, 644 JOIN_PROTOCOL_MISMATCH = 3, 645 }; 646 647 struct dlm_query_join_packet { 648 u8 code; /* Response code. dlm_minor and fs_minor 649 are only valid if this is JOIN_OK */ 650 u8 dlm_minor; /* The minor version of the protocol the 651 dlm is speaking. */ 652 u8 fs_minor; /* The minor version of the protocol the 653 filesystem is speaking. */ 654 u8 reserved; 655 }; 656 657 union dlm_query_join_response { 658 __be32 intval; 659 struct dlm_query_join_packet packet; 660 }; 661 662 struct dlm_lock_request 663 { 664 u8 node_idx; 665 u8 dead_node; 666 __be16 pad1; 667 __be32 pad2; 668 }; 669 670 struct dlm_reco_data_done 671 { 672 u8 node_idx; 673 u8 dead_node; 674 __be16 pad1; 675 __be32 pad2; 676 677 /* unused for now */ 678 /* eventually we can use this to attempt 679 * lvb recovery based on each node's info */ 680 u8 reco_lvb[DLM_LVB_LEN]; 681 }; 682 683 struct dlm_begin_reco 684 { 685 u8 node_idx; 686 u8 dead_node; 687 __be16 pad1; 688 __be32 pad2; 689 }; 690 691 struct dlm_query_join_request 692 { 693 u8 node_idx; 694 u8 pad1[2]; 695 u8 name_len; 696 struct dlm_protocol_version dlm_proto; 697 struct dlm_protocol_version fs_proto; 698 u8 domain[O2NM_MAX_NAME_LEN]; 699 u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)]; 700 }; 701 702 struct dlm_assert_joined 703 { 704 u8 node_idx; 705 u8 pad1[2]; 706 u8 name_len; 707 u8 domain[O2NM_MAX_NAME_LEN]; 708 }; 709 710 struct dlm_cancel_join 711 { 712 u8 node_idx; 713 u8 pad1[2]; 714 u8 name_len; 715 u8 domain[O2NM_MAX_NAME_LEN]; 716 }; 717 718 struct dlm_query_region { 719 u8 qr_node; 720 u8 qr_numregions; 721 u8 qr_namelen; 722 u8 pad1; 723 u8 qr_domain[O2NM_MAX_NAME_LEN]; 724 u8 qr_regions[O2HB_MAX_REGION_NAME_LEN * O2NM_MAX_REGIONS]; 725 }; 726 727 struct dlm_node_info { 728 u8 ni_nodenum; 729 u8 pad1; 730 __be16 ni_ipv4_port; 731 __be32 ni_ipv4_address; 732 }; 733 734 struct dlm_query_nodeinfo { 735 u8 qn_nodenum; 736 u8 qn_numnodes; 737 u8 qn_namelen; 738 u8 pad1; 739 u8 qn_domain[O2NM_MAX_NAME_LEN]; 740 struct dlm_node_info qn_nodes[O2NM_MAX_NODES]; 741 }; 742 743 struct dlm_exit_domain 744 { 745 u8 node_idx; 746 u8 pad1[3]; 747 }; 748 749 struct dlm_finalize_reco 750 { 751 u8 node_idx; 752 u8 dead_node; 753 u8 flags; 754 u8 pad1; 755 __be32 pad2; 756 }; 757 758 struct dlm_deref_lockres 759 { 760 u32 pad1; 761 u16 pad2; 762 u8 node_idx; 763 u8 namelen; 764 765 u8 name[O2NM_MAX_NAME_LEN]; 766 }; 767 768 enum { 769 DLM_DEREF_RESPONSE_DONE = 0, 770 DLM_DEREF_RESPONSE_INPROG = 1, 771 }; 772 773 struct dlm_deref_lockres_done { 774 u32 pad1; 775 u16 pad2; 776 u8 node_idx; 777 u8 namelen; 778 779 u8 name[O2NM_MAX_NAME_LEN]; 780 }; 781 782 static inline enum dlm_status 783 __dlm_lockres_state_to_status(struct dlm_lock_resource *res) 784 { 785 enum dlm_status status = DLM_NORMAL; 786 787 assert_spin_locked(&res->spinlock); 788 789 if (res->state & (DLM_LOCK_RES_RECOVERING| 790 DLM_LOCK_RES_RECOVERY_WAITING)) 791 status = DLM_RECOVERING; 792 else if (res->state & DLM_LOCK_RES_MIGRATING) 793 status = DLM_MIGRATING; 794 else if (res->state & DLM_LOCK_RES_IN_PROGRESS) 795 status = DLM_FORWARD; 796 797 return status; 798 } 799 800 static inline u8 dlm_get_lock_cookie_node(u64 cookie) 801 { 802 u8 ret; 803 cookie >>= 56; 804 ret = (u8)(cookie & 0xffULL); 805 return ret; 806 } 807 808 static inline unsigned long long dlm_get_lock_cookie_seq(u64 cookie) 809 { 810 unsigned long long ret; 811 ret = ((unsigned long long)cookie) & 0x00ffffffffffffffULL; 812 return ret; 813 } 814 815 struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, 816 struct dlm_lockstatus *lksb); 817 void dlm_lock_get(struct dlm_lock *lock); 818 void dlm_lock_put(struct dlm_lock *lock); 819 820 void dlm_lock_attach_lockres(struct dlm_lock *lock, 821 struct dlm_lock_resource *res); 822 823 int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data, 824 void **ret_data); 825 int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data, 826 void **ret_data); 827 int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, 828 void **ret_data); 829 830 void dlm_revert_pending_convert(struct dlm_lock_resource *res, 831 struct dlm_lock *lock); 832 void dlm_revert_pending_lock(struct dlm_lock_resource *res, 833 struct dlm_lock *lock); 834 835 int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data, 836 void **ret_data); 837 void dlm_commit_pending_cancel(struct dlm_lock_resource *res, 838 struct dlm_lock *lock); 839 void dlm_commit_pending_unlock(struct dlm_lock_resource *res, 840 struct dlm_lock *lock); 841 842 int dlm_launch_thread(struct dlm_ctxt *dlm); 843 void dlm_complete_thread(struct dlm_ctxt *dlm); 844 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); 845 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); 846 void dlm_wait_for_recovery(struct dlm_ctxt *dlm); 847 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); 848 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); 849 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); 850 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); 851 852 void dlm_put(struct dlm_ctxt *dlm); 853 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); 854 int dlm_domain_fully_joined(struct dlm_ctxt *dlm); 855 856 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, 857 struct dlm_lock_resource *res); 858 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, 859 struct dlm_lock_resource *res); 860 static inline void dlm_lockres_get(struct dlm_lock_resource *res) 861 { 862 /* This is called on every lookup, so it might be worth 863 * inlining. */ 864 kref_get(&res->refs); 865 } 866 void dlm_lockres_put(struct dlm_lock_resource *res); 867 void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); 868 void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); 869 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, 870 const char *name, 871 unsigned int len, 872 unsigned int hash); 873 struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, 874 const char *name, 875 unsigned int len, 876 unsigned int hash); 877 struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, 878 const char *name, 879 unsigned int len); 880 881 int dlm_is_host_down(int errno); 882 883 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, 884 const char *lockid, 885 int namelen, 886 int flags); 887 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, 888 const char *name, 889 unsigned int namelen); 890 891 void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, 892 struct dlm_lock_resource *res, int bit); 893 void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, 894 struct dlm_lock_resource *res, int bit); 895 896 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, 897 struct dlm_lock_resource *res); 898 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 899 struct dlm_lock_resource *res); 900 901 void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm, 902 struct dlm_lock_resource *res); 903 904 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 905 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 906 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 907 void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 908 void dlm_do_local_ast(struct dlm_ctxt *dlm, 909 struct dlm_lock_resource *res, 910 struct dlm_lock *lock); 911 int dlm_do_remote_ast(struct dlm_ctxt *dlm, 912 struct dlm_lock_resource *res, 913 struct dlm_lock *lock); 914 void dlm_do_local_bast(struct dlm_ctxt *dlm, 915 struct dlm_lock_resource *res, 916 struct dlm_lock *lock, 917 int blocked_type); 918 int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, 919 struct dlm_lock_resource *res, 920 struct dlm_lock *lock, 921 int msg_type, 922 int blocked_type, int flags); 923 static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm, 924 struct dlm_lock_resource *res, 925 struct dlm_lock *lock, 926 int blocked_type) 927 { 928 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST, 929 blocked_type, 0); 930 } 931 932 static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm, 933 struct dlm_lock_resource *res, 934 struct dlm_lock *lock, 935 int flags) 936 { 937 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST, 938 0, flags); 939 } 940 941 void dlm_print_one_lock_resource(struct dlm_lock_resource *res); 942 void __dlm_print_one_lock_resource(struct dlm_lock_resource *res); 943 944 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); 945 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); 946 947 948 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data); 949 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data); 950 951 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); 952 int dlm_finish_migration(struct dlm_ctxt *dlm, 953 struct dlm_lock_resource *res, 954 u8 old_master); 955 void dlm_lockres_release_ast(struct dlm_ctxt *dlm, 956 struct dlm_lock_resource *res); 957 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res); 958 959 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, 960 void **ret_data); 961 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, 962 void **ret_data); 963 void dlm_assert_master_post_handler(int status, void *data, void *ret_data); 964 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 965 void **ret_data); 966 int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data, 967 void **ret_data); 968 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, 969 void **ret_data); 970 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 971 void **ret_data); 972 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, 973 void **ret_data); 974 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, 975 void **ret_data); 976 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, 977 void **ret_data); 978 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, 979 void **ret_data); 980 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, 981 void **ret_data); 982 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 983 u8 nodenum, u8 *real_master); 984 985 void __dlm_do_purge_lockres(struct dlm_ctxt *dlm, 986 struct dlm_lock_resource *res); 987 988 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, 989 struct dlm_lock_resource *res, 990 int ignore_higher, 991 u8 request_from, 992 u32 flags); 993 994 995 int dlm_send_one_lockres(struct dlm_ctxt *dlm, 996 struct dlm_lock_resource *res, 997 struct dlm_migratable_lockres *mres, 998 u8 send_to, 999 u8 flags); 1000 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, 1001 struct dlm_lock_resource *res); 1002 1003 /* will exit holding res->spinlock, but may drop in function */ 1004 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags); 1005 1006 /* will exit holding res->spinlock, but may drop in function */ 1007 static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res) 1008 { 1009 __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS| 1010 DLM_LOCK_RES_RECOVERING| 1011 DLM_LOCK_RES_RECOVERY_WAITING| 1012 DLM_LOCK_RES_MIGRATING)); 1013 } 1014 1015 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle); 1016 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle); 1017 1018 /* create/destroy slab caches */ 1019 int dlm_init_master_caches(void); 1020 void dlm_destroy_master_caches(void); 1021 1022 int dlm_init_lock_cache(void); 1023 void dlm_destroy_lock_cache(void); 1024 1025 int dlm_init_mle_cache(void); 1026 void dlm_destroy_mle_cache(void); 1027 1028 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up); 1029 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, 1030 struct dlm_lock_resource *res); 1031 void dlm_clean_master_list(struct dlm_ctxt *dlm, 1032 u8 dead_node); 1033 void dlm_force_free_mles(struct dlm_ctxt *dlm); 1034 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); 1035 int __dlm_lockres_has_locks(struct dlm_lock_resource *res); 1036 int __dlm_lockres_unused(struct dlm_lock_resource *res); 1037 1038 static inline const char * dlm_lock_mode_name(int mode) 1039 { 1040 switch (mode) { 1041 case LKM_EXMODE: 1042 return "EX"; 1043 case LKM_PRMODE: 1044 return "PR"; 1045 case LKM_NLMODE: 1046 return "NL"; 1047 } 1048 return "UNKNOWN"; 1049 } 1050 1051 1052 static inline int dlm_lock_compatible(int existing, int request) 1053 { 1054 /* NO_LOCK compatible with all */ 1055 if (request == LKM_NLMODE || 1056 existing == LKM_NLMODE) 1057 return 1; 1058 1059 /* EX incompatible with all non-NO_LOCK */ 1060 if (request == LKM_EXMODE) 1061 return 0; 1062 1063 /* request must be PR, which is compatible with PR */ 1064 if (existing == LKM_PRMODE) 1065 return 1; 1066 1067 return 0; 1068 } 1069 1070 static inline int dlm_lock_on_list(struct list_head *head, 1071 struct dlm_lock *lock) 1072 { 1073 struct dlm_lock *tmplock; 1074 1075 list_for_each_entry(tmplock, head, list) { 1076 if (tmplock == lock) 1077 return 1; 1078 } 1079 return 0; 1080 } 1081 1082 1083 static inline enum dlm_status dlm_err_to_dlm_status(int err) 1084 { 1085 enum dlm_status ret; 1086 if (err == -ENOMEM) 1087 ret = DLM_SYSERR; 1088 else if (err == -ETIMEDOUT || o2net_link_down(err, NULL)) 1089 ret = DLM_NOLOCKMGR; 1090 else if (err == -EINVAL) 1091 ret = DLM_BADPARAM; 1092 else if (err == -ENAMETOOLONG) 1093 ret = DLM_IVBUFLEN; 1094 else 1095 ret = DLM_BADARGS; 1096 return ret; 1097 } 1098 1099 1100 static inline void dlm_node_iter_init(unsigned long *map, 1101 struct dlm_node_iter *iter) 1102 { 1103 memcpy(iter->node_map, map, sizeof(iter->node_map)); 1104 iter->curnode = -1; 1105 } 1106 1107 static inline int dlm_node_iter_next(struct dlm_node_iter *iter) 1108 { 1109 int bit; 1110 bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1); 1111 if (bit >= O2NM_MAX_NODES) { 1112 iter->curnode = O2NM_MAX_NODES; 1113 return -ENOENT; 1114 } 1115 iter->curnode = bit; 1116 return bit; 1117 } 1118 1119 static inline void dlm_set_lockres_owner(struct dlm_ctxt *dlm, 1120 struct dlm_lock_resource *res, 1121 u8 owner) 1122 { 1123 assert_spin_locked(&res->spinlock); 1124 1125 res->owner = owner; 1126 } 1127 1128 static inline void dlm_change_lockres_owner(struct dlm_ctxt *dlm, 1129 struct dlm_lock_resource *res, 1130 u8 owner) 1131 { 1132 assert_spin_locked(&res->spinlock); 1133 1134 if (owner != res->owner) 1135 dlm_set_lockres_owner(dlm, res, owner); 1136 } 1137 1138 #endif /* DLMCOMMON_H */ 1139