1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmconvert.c 5 * 6 * underlying calls for lock conversion 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/utsname.h> 34 #include <linux/init.h> 35 #include <linux/sysctl.h> 36 #include <linux/random.h> 37 #include <linux/blkdev.h> 38 #include <linux/socket.h> 39 #include <linux/inet.h> 40 #include <linux/spinlock.h> 41 42 43 #include "cluster/heartbeat.h" 44 #include "cluster/nodemanager.h" 45 #include "cluster/tcp.h" 46 47 #include "dlmapi.h" 48 #include "dlmcommon.h" 49 50 #include "dlmconvert.h" 51 52 #define MLOG_MASK_PREFIX ML_DLM 53 #include "cluster/masklog.h" 54 55 /* NOTE: __dlmconvert_master is the only function in here that 56 * needs a spinlock held on entry (res->spinlock) and it is the 57 * only one that holds a lock on exit (res->spinlock). 58 * All other functions in here need no locks and drop all of 59 * the locks that they acquire. */ 60 static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, 61 struct dlm_lock_resource *res, 62 struct dlm_lock *lock, int flags, 63 int type, int *call_ast, 64 int *kick_thread); 65 static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, 66 struct dlm_lock_resource *res, 67 struct dlm_lock *lock, int flags, int type); 68 69 /* 70 * this is only called directly by dlmlock(), and only when the 71 * local node is the owner of the lockres 72 * locking: 73 * caller needs: none 74 * taken: takes and drops res->spinlock 75 * held on exit: none 76 * returns: see __dlmconvert_master 77 */ 78 enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm, 79 struct dlm_lock_resource *res, 80 struct dlm_lock *lock, int flags, int type) 81 { 82 int call_ast = 0, kick_thread = 0; 83 enum dlm_status status; 84 85 spin_lock(&res->spinlock); 86 /* we are not in a network handler, this is fine */ 87 __dlm_wait_on_lockres(res); 88 __dlm_lockres_reserve_ast(res); 89 res->state |= DLM_LOCK_RES_IN_PROGRESS; 90 91 status = __dlmconvert_master(dlm, res, lock, flags, type, 92 &call_ast, &kick_thread); 93 94 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 95 spin_unlock(&res->spinlock); 96 wake_up(&res->wq); 97 if (status != DLM_NORMAL && status != DLM_NOTQUEUED) 98 dlm_error(status); 99 100 /* either queue the ast or release it */ 101 if (call_ast) 102 dlm_queue_ast(dlm, lock); 103 else 104 dlm_lockres_release_ast(dlm, res); 105 106 if (kick_thread) 107 dlm_kick_thread(dlm, res); 108 109 return status; 110 } 111 112 /* performs lock conversion at the lockres master site 113 * locking: 114 * caller needs: res->spinlock 115 * taken: takes and drops lock->spinlock 116 * held on exit: res->spinlock 117 * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED 118 * call_ast: whether ast should be called for this lock 119 * kick_thread: whether dlm_kick_thread should be called 120 */ 121 static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, 122 struct dlm_lock_resource *res, 123 struct dlm_lock *lock, int flags, 124 int type, int *call_ast, 125 int *kick_thread) 126 { 127 enum dlm_status status = DLM_NORMAL; 128 struct list_head *iter; 129 struct dlm_lock *tmplock=NULL; 130 131 assert_spin_locked(&res->spinlock); 132 133 mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n", 134 lock->ml.type, lock->ml.convert_type, type); 135 136 spin_lock(&lock->spinlock); 137 138 /* already converting? */ 139 if (lock->ml.convert_type != LKM_IVMODE) { 140 mlog(ML_ERROR, "attempted to convert a lock with a lock " 141 "conversion pending\n"); 142 status = DLM_DENIED; 143 goto unlock_exit; 144 } 145 146 /* must be on grant queue to convert */ 147 if (!dlm_lock_on_list(&res->granted, lock)) { 148 mlog(ML_ERROR, "attempted to convert a lock not on grant " 149 "queue\n"); 150 status = DLM_DENIED; 151 goto unlock_exit; 152 } 153 154 if (flags & LKM_VALBLK) { 155 switch (lock->ml.type) { 156 case LKM_EXMODE: 157 /* EX + LKM_VALBLK + convert == set lvb */ 158 mlog(0, "will set lvb: converting %s->%s\n", 159 dlm_lock_mode_name(lock->ml.type), 160 dlm_lock_mode_name(type)); 161 lock->lksb->flags |= DLM_LKSB_PUT_LVB; 162 break; 163 case LKM_PRMODE: 164 case LKM_NLMODE: 165 /* refetch if new level is not NL */ 166 if (type > LKM_NLMODE) { 167 mlog(0, "will fetch new value into " 168 "lvb: converting %s->%s\n", 169 dlm_lock_mode_name(lock->ml.type), 170 dlm_lock_mode_name(type)); 171 lock->lksb->flags |= DLM_LKSB_GET_LVB; 172 } else { 173 mlog(0, "will NOT fetch new value " 174 "into lvb: converting %s->%s\n", 175 dlm_lock_mode_name(lock->ml.type), 176 dlm_lock_mode_name(type)); 177 flags &= ~(LKM_VALBLK); 178 } 179 break; 180 } 181 } 182 183 184 /* in-place downconvert? */ 185 if (type <= lock->ml.type) 186 goto grant; 187 188 /* upconvert from here on */ 189 status = DLM_NORMAL; 190 list_for_each(iter, &res->granted) { 191 tmplock = list_entry(iter, struct dlm_lock, list); 192 if (tmplock == lock) 193 continue; 194 if (!dlm_lock_compatible(tmplock->ml.type, type)) 195 goto switch_queues; 196 } 197 198 list_for_each(iter, &res->converting) { 199 tmplock = list_entry(iter, struct dlm_lock, list); 200 if (!dlm_lock_compatible(tmplock->ml.type, type)) 201 goto switch_queues; 202 /* existing conversion requests take precedence */ 203 if (!dlm_lock_compatible(tmplock->ml.convert_type, type)) 204 goto switch_queues; 205 } 206 207 /* fall thru to grant */ 208 209 grant: 210 mlog(0, "res %.*s, granting %s lock\n", res->lockname.len, 211 res->lockname.name, dlm_lock_mode_name(type)); 212 /* immediately grant the new lock type */ 213 lock->lksb->status = DLM_NORMAL; 214 if (lock->ml.node == dlm->node_num) 215 mlog(0, "doing in-place convert for nonlocal lock\n"); 216 lock->ml.type = type; 217 if (lock->lksb->flags & DLM_LKSB_PUT_LVB) 218 memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN); 219 220 status = DLM_NORMAL; 221 *call_ast = 1; 222 goto unlock_exit; 223 224 switch_queues: 225 if (flags & LKM_NOQUEUE) { 226 mlog(0, "failed to convert NOQUEUE lock %.*s from " 227 "%d to %d...\n", res->lockname.len, res->lockname.name, 228 lock->ml.type, type); 229 status = DLM_NOTQUEUED; 230 goto unlock_exit; 231 } 232 mlog(0, "res %.*s, queueing...\n", res->lockname.len, 233 res->lockname.name); 234 235 lock->ml.convert_type = type; 236 /* do not alter lock refcount. switching lists. */ 237 list_move_tail(&lock->list, &res->converting); 238 239 unlock_exit: 240 spin_unlock(&lock->spinlock); 241 if (status == DLM_DENIED) { 242 __dlm_print_one_lock_resource(res); 243 } 244 if (status == DLM_NORMAL) 245 *kick_thread = 1; 246 return status; 247 } 248 249 void dlm_revert_pending_convert(struct dlm_lock_resource *res, 250 struct dlm_lock *lock) 251 { 252 /* do not alter lock refcount. switching lists. */ 253 list_move_tail(&lock->list, &res->granted); 254 lock->ml.convert_type = LKM_IVMODE; 255 lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); 256 } 257 258 /* messages the master site to do lock conversion 259 * locking: 260 * caller needs: none 261 * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS 262 * held on exit: none 263 * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node 264 */ 265 enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, 266 struct dlm_lock_resource *res, 267 struct dlm_lock *lock, int flags, int type) 268 { 269 enum dlm_status status; 270 271 mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, 272 lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); 273 274 spin_lock(&res->spinlock); 275 if (res->state & DLM_LOCK_RES_RECOVERING) { 276 mlog(0, "bailing out early since res is RECOVERING " 277 "on secondary queue\n"); 278 /* __dlm_print_one_lock_resource(res); */ 279 status = DLM_RECOVERING; 280 goto bail; 281 } 282 /* will exit this call with spinlock held */ 283 __dlm_wait_on_lockres(res); 284 285 if (lock->ml.convert_type != LKM_IVMODE) { 286 __dlm_print_one_lock_resource(res); 287 mlog(ML_ERROR, "converting a remote lock that is already " 288 "converting! (cookie=%u:%llu, conv=%d)\n", 289 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 290 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), 291 lock->ml.convert_type); 292 status = DLM_DENIED; 293 goto bail; 294 } 295 res->state |= DLM_LOCK_RES_IN_PROGRESS; 296 /* move lock to local convert queue */ 297 /* do not alter lock refcount. switching lists. */ 298 list_move_tail(&lock->list, &res->converting); 299 lock->convert_pending = 1; 300 lock->ml.convert_type = type; 301 302 if (flags & LKM_VALBLK) { 303 if (lock->ml.type == LKM_EXMODE) { 304 flags |= LKM_PUT_LVB; 305 lock->lksb->flags |= DLM_LKSB_PUT_LVB; 306 } else { 307 if (lock->ml.convert_type == LKM_NLMODE) 308 flags &= ~LKM_VALBLK; 309 else { 310 flags |= LKM_GET_LVB; 311 lock->lksb->flags |= DLM_LKSB_GET_LVB; 312 } 313 } 314 } 315 spin_unlock(&res->spinlock); 316 317 /* no locks held here. 318 * need to wait for a reply as to whether it got queued or not. */ 319 status = dlm_send_remote_convert_request(dlm, res, lock, flags, type); 320 321 spin_lock(&res->spinlock); 322 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 323 lock->convert_pending = 0; 324 /* if it failed, move it back to granted queue */ 325 if (status != DLM_NORMAL) { 326 if (status != DLM_NOTQUEUED) 327 dlm_error(status); 328 dlm_revert_pending_convert(res, lock); 329 } 330 bail: 331 spin_unlock(&res->spinlock); 332 333 /* TODO: should this be a wake_one? */ 334 /* wake up any IN_PROGRESS waiters */ 335 wake_up(&res->wq); 336 337 return status; 338 } 339 340 /* sends DLM_CONVERT_LOCK_MSG to master site 341 * locking: 342 * caller needs: none 343 * taken: none 344 * held on exit: none 345 * returns: DLM_NOLOCKMGR, status from remote node 346 */ 347 static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, 348 struct dlm_lock_resource *res, 349 struct dlm_lock *lock, int flags, int type) 350 { 351 struct dlm_convert_lock convert; 352 int tmpret; 353 enum dlm_status ret; 354 int status = 0; 355 struct kvec vec[2]; 356 size_t veclen = 1; 357 358 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); 359 360 memset(&convert, 0, sizeof(struct dlm_convert_lock)); 361 convert.node_idx = dlm->node_num; 362 convert.requested_type = type; 363 convert.cookie = lock->ml.cookie; 364 convert.namelen = res->lockname.len; 365 convert.flags = cpu_to_be32(flags); 366 memcpy(convert.name, res->lockname.name, convert.namelen); 367 368 vec[0].iov_len = sizeof(struct dlm_convert_lock); 369 vec[0].iov_base = &convert; 370 371 if (flags & LKM_PUT_LVB) { 372 /* extra data to send if we are updating lvb */ 373 vec[1].iov_len = DLM_LVB_LEN; 374 vec[1].iov_base = lock->lksb->lvb; 375 veclen++; 376 } 377 378 tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key, 379 vec, veclen, res->owner, &status); 380 if (tmpret >= 0) { 381 // successfully sent and received 382 ret = status; // this is already a dlm_status 383 if (ret == DLM_RECOVERING) { 384 mlog(0, "node %u returned DLM_RECOVERING from convert " 385 "message!\n", res->owner); 386 } else if (ret == DLM_MIGRATING) { 387 mlog(0, "node %u returned DLM_MIGRATING from convert " 388 "message!\n", res->owner); 389 } else if (ret == DLM_FORWARD) { 390 mlog(0, "node %u returned DLM_FORWARD from convert " 391 "message!\n", res->owner); 392 } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED) 393 dlm_error(ret); 394 } else { 395 mlog_errno(tmpret); 396 if (dlm_is_host_down(tmpret)) { 397 /* instead of logging the same network error over 398 * and over, sleep here and wait for the heartbeat 399 * to notice the node is dead. times out after 5s. */ 400 dlm_wait_for_node_death(dlm, res->owner, 401 DLM_NODE_DEATH_WAIT_MAX); 402 ret = DLM_RECOVERING; 403 mlog(0, "node %u died so returning DLM_RECOVERING " 404 "from convert message!\n", res->owner); 405 } else { 406 ret = dlm_err_to_dlm_status(tmpret); 407 } 408 } 409 410 return ret; 411 } 412 413 /* handler for DLM_CONVERT_LOCK_MSG on master site 414 * locking: 415 * caller needs: none 416 * taken: takes and drop res->spinlock 417 * held on exit: none 418 * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS, 419 * status from __dlmconvert_master 420 */ 421 int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data, 422 void **ret_data) 423 { 424 struct dlm_ctxt *dlm = data; 425 struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf; 426 struct dlm_lock_resource *res = NULL; 427 struct list_head *iter; 428 struct dlm_lock *lock = NULL; 429 struct dlm_lockstatus *lksb; 430 enum dlm_status status = DLM_NORMAL; 431 u32 flags; 432 int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0; 433 434 if (!dlm_grab(dlm)) { 435 dlm_error(DLM_REJECTED); 436 return DLM_REJECTED; 437 } 438 439 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), 440 "Domain %s not fully joined!\n", dlm->name); 441 442 if (cnv->namelen > DLM_LOCKID_NAME_MAX) { 443 status = DLM_IVBUFLEN; 444 dlm_error(status); 445 goto leave; 446 } 447 448 flags = be32_to_cpu(cnv->flags); 449 450 if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == 451 (LKM_PUT_LVB|LKM_GET_LVB)) { 452 mlog(ML_ERROR, "both PUT and GET lvb specified\n"); 453 status = DLM_BADARGS; 454 goto leave; 455 } 456 457 mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : 458 (flags & LKM_GET_LVB ? "get lvb" : "none")); 459 460 status = DLM_IVLOCKID; 461 res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen); 462 if (!res) { 463 dlm_error(status); 464 goto leave; 465 } 466 467 spin_lock(&res->spinlock); 468 status = __dlm_lockres_state_to_status(res); 469 if (status != DLM_NORMAL) { 470 spin_unlock(&res->spinlock); 471 dlm_error(status); 472 goto leave; 473 } 474 list_for_each(iter, &res->granted) { 475 lock = list_entry(iter, struct dlm_lock, list); 476 if (lock->ml.cookie == cnv->cookie && 477 lock->ml.node == cnv->node_idx) { 478 dlm_lock_get(lock); 479 break; 480 } 481 lock = NULL; 482 } 483 spin_unlock(&res->spinlock); 484 if (!lock) { 485 status = DLM_IVLOCKID; 486 mlog(ML_ERROR, "did not find lock to convert on grant queue! " 487 "cookie=%u:%llu\n", 488 dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)), 489 dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie))); 490 __dlm_print_one_lock_resource(res); 491 goto leave; 492 } 493 494 /* found the lock */ 495 lksb = lock->lksb; 496 497 /* see if caller needed to get/put lvb */ 498 if (flags & LKM_PUT_LVB) { 499 BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); 500 lksb->flags |= DLM_LKSB_PUT_LVB; 501 memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN); 502 } else if (flags & LKM_GET_LVB) { 503 BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); 504 lksb->flags |= DLM_LKSB_GET_LVB; 505 } 506 507 spin_lock(&res->spinlock); 508 status = __dlm_lockres_state_to_status(res); 509 if (status == DLM_NORMAL) { 510 __dlm_lockres_reserve_ast(res); 511 ast_reserved = 1; 512 res->state |= DLM_LOCK_RES_IN_PROGRESS; 513 status = __dlmconvert_master(dlm, res, lock, flags, 514 cnv->requested_type, 515 &call_ast, &kick_thread); 516 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 517 wake = 1; 518 } 519 spin_unlock(&res->spinlock); 520 if (wake) 521 wake_up(&res->wq); 522 523 if (status != DLM_NORMAL) { 524 if (status != DLM_NOTQUEUED) 525 dlm_error(status); 526 lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); 527 } 528 529 leave: 530 if (lock) 531 dlm_lock_put(lock); 532 533 /* either queue the ast or release it, if reserved */ 534 if (call_ast) 535 dlm_queue_ast(dlm, lock); 536 else if (ast_reserved) 537 dlm_lockres_release_ast(dlm, res); 538 539 if (kick_thread) 540 dlm_kick_thread(dlm, res); 541 542 if (res) 543 dlm_lockres_put(res); 544 545 dlm_put(dlm); 546 547 return status; 548 } 549