Lines Matching +full:lock +full:- +full:- +full:- +full:-
1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * underlying calls for lock creation
45 struct dlm_lock *lock, int flags);
49 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
57 return -ENOMEM; in dlm_init_lock_cache()
66 /* Tell us whether we can grant a new lock request.
68 * caller needs: res->spinlock
71 * returns: 1 if the lock can be granted, 0 otherwise.
74 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument
78 list_for_each_entry(tmplock, &res->granted, list) { in dlm_can_grant_new_lock()
79 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock()
83 list_for_each_entry(tmplock, &res->converting, list) { in dlm_can_grant_new_lock()
84 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock()
86 if (!dlm_lock_compatible(tmplock->ml.convert_type, in dlm_can_grant_new_lock()
87 lock->ml.type)) in dlm_can_grant_new_lock()
94 /* performs lock creation at the lockres master site
97 * taken: takes and drops res->spinlock
103 struct dlm_lock *lock, int flags) in dlmlock_master() argument
108 mlog(0, "type=%d\n", lock->ml.type); in dlmlock_master()
110 spin_lock(&res->spinlock); in dlmlock_master()
115 lock->ml.node != dlm->node_num) { in dlmlock_master()
116 /* erf. state changed after lock was dropped. */ in dlmlock_master()
117 spin_unlock(&res->spinlock); in dlmlock_master()
124 if (dlm_can_grant_new_lock(res, lock)) { in dlmlock_master()
125 mlog(0, "I can grant this lock right away\n"); in dlmlock_master()
127 lock->lksb->status = DLM_NORMAL; in dlmlock_master()
129 dlm_lock_get(lock); in dlmlock_master()
130 list_add_tail(&lock->list, &res->granted); in dlmlock_master()
132 /* for the recovery lock, we can't allow the ast in dlmlock_master()
134 * frozen. but the recovery lock is always locked in dlmlock_master()
137 if (!dlm_is_recovery_lock(res->lockname.name, in dlmlock_master()
138 res->lockname.len)) { in dlmlock_master()
143 "node %u for reco lock\n", dlm->name, in dlmlock_master()
144 lock->ml.node); in dlmlock_master()
148 * lock right away, return DLM_NOTQUEUED */ in dlmlock_master()
151 if (dlm_is_recovery_lock(res->lockname.name, in dlmlock_master()
152 res->lockname.len)) { in dlmlock_master()
154 "node %u for reco lock\n", dlm->name, in dlmlock_master()
155 lock->ml.node); in dlmlock_master()
159 dlm_lock_get(lock); in dlmlock_master()
160 list_add_tail(&lock->list, &res->blocked); in dlmlock_master()
165 spin_unlock(&res->spinlock); in dlmlock_master()
166 wake_up(&res->wq); in dlmlock_master()
170 dlm_queue_ast(dlm, lock); in dlmlock_master()
182 struct dlm_lock *lock) in dlm_revert_pending_lock() argument
185 list_del_init(&lock->list); in dlm_revert_pending_lock()
186 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; in dlm_revert_pending_lock()
193 * taken: takes and drops res->spinlock
199 struct dlm_lock *lock, int flags) in dlmlock_remote() argument
205 lock->ml.type, res->lockname.len, in dlmlock_remote()
206 res->lockname.name, flags); in dlmlock_remote()
212 spin_lock(&res->spinlock); in dlmlock_remote()
214 if (res->owner == dlm->node_num) { in dlmlock_remote()
215 spin_unlock(&res->spinlock); in dlmlock_remote()
218 res->state |= DLM_LOCK_RES_IN_PROGRESS; in dlmlock_remote()
220 /* add lock to local (secondary) queue */ in dlmlock_remote()
221 dlm_lock_get(lock); in dlmlock_remote()
222 list_add_tail(&lock->list, &res->blocked); in dlmlock_remote()
223 lock->lock_pending = 1; in dlmlock_remote()
224 spin_unlock(&res->spinlock); in dlmlock_remote()
226 /* spec seems to say that you will get DLM_NORMAL when the lock in dlmlock_remote()
228 status = dlm_send_remote_lock_request(dlm, res, lock, flags); in dlmlock_remote()
230 spin_lock(&res->spinlock); in dlmlock_remote()
231 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; in dlmlock_remote()
232 lock->lock_pending = 0; in dlmlock_remote()
235 dlm_is_recovery_lock(res->lockname.name, in dlmlock_remote()
236 res->lockname.len)) { in dlmlock_remote()
237 /* recovery lock was mastered by dead node. in dlmlock_remote()
240 mlog(0, "%s: recovery lock was owned by " in dlmlock_remote()
242 dlm->name, res->owner); in dlmlock_remote()
253 dlm_revert_pending_lock(res, lock); in dlmlock_remote()
254 dlm_lock_put(lock); in dlmlock_remote()
255 } else if (dlm_is_recovery_lock(res->lockname.name, in dlmlock_remote()
256 res->lockname.len)) { in dlmlock_remote()
257 /* special case for the $RECOVERY lock. in dlmlock_remote()
259 * this lock on the proper secondary queue in dlmlock_remote()
261 mlog(0, "%s: $RECOVERY lock for this node (%u) is " in dlmlock_remote()
262 "mastered by %u; got lock, manually granting (no ast)\n", in dlmlock_remote()
263 dlm->name, dlm->node_num, res->owner); in dlmlock_remote()
264 list_move_tail(&lock->list, &res->granted); in dlmlock_remote()
266 spin_unlock(&res->spinlock); in dlmlock_remote()
271 wake_up(&res->wq); in dlmlock_remote()
276 /* for remote lock creation.
278 * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS
285 struct dlm_lock *lock, int flags) in dlm_send_remote_lock_request() argument
292 create.node_idx = dlm->node_num; in dlm_send_remote_lock_request()
293 create.requested_type = lock->ml.type; in dlm_send_remote_lock_request()
294 create.cookie = lock->ml.cookie; in dlm_send_remote_lock_request()
295 create.namelen = res->lockname.len; in dlm_send_remote_lock_request()
297 memcpy(create.name, res->lockname.name, create.namelen); in dlm_send_remote_lock_request()
299 tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, in dlm_send_remote_lock_request()
300 sizeof(create), res->owner, &status); in dlm_send_remote_lock_request()
306 "currently.\n", dlm->name, create.namelen, in dlm_send_remote_lock_request()
307 create.name, res->owner); in dlm_send_remote_lock_request()
312 mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to " in dlm_send_remote_lock_request()
313 "node %u\n", dlm->name, create.namelen, create.name, in dlm_send_remote_lock_request()
314 tmpret, res->owner); in dlm_send_remote_lock_request()
324 void dlm_lock_get(struct dlm_lock *lock) in dlm_lock_get() argument
326 kref_get(&lock->lock_refs); in dlm_lock_get()
329 void dlm_lock_put(struct dlm_lock *lock) in dlm_lock_put() argument
331 kref_put(&lock->lock_refs, dlm_lock_release); in dlm_lock_put()
336 struct dlm_lock *lock; in dlm_lock_release() local
338 lock = container_of(kref, struct dlm_lock, lock_refs); in dlm_lock_release()
340 BUG_ON(!list_empty(&lock->list)); in dlm_lock_release()
341 BUG_ON(!list_empty(&lock->ast_list)); in dlm_lock_release()
342 BUG_ON(!list_empty(&lock->bast_list)); in dlm_lock_release()
343 BUG_ON(lock->ast_pending); in dlm_lock_release()
344 BUG_ON(lock->bast_pending); in dlm_lock_release()
346 dlm_lock_detach_lockres(lock); in dlm_lock_release()
348 if (lock->lksb_kernel_allocated) { in dlm_lock_release()
349 mlog(0, "freeing kernel-allocated lksb\n"); in dlm_lock_release()
350 kfree(lock->lksb); in dlm_lock_release()
352 kmem_cache_free(dlm_lock_cache, lock); in dlm_lock_release()
355 /* associate a lock with it's lockres, getting a ref on the lockres */
356 void dlm_lock_attach_lockres(struct dlm_lock *lock, in dlm_lock_attach_lockres() argument
360 lock->lockres = res; in dlm_lock_attach_lockres()
363 /* drop ref on lockres, if there is still one associated with lock */
364 static void dlm_lock_detach_lockres(struct dlm_lock *lock) in dlm_lock_detach_lockres() argument
368 res = lock->lockres; in dlm_lock_detach_lockres()
370 lock->lockres = NULL; in dlm_lock_detach_lockres()
371 mlog(0, "removing lock's lockres reference\n"); in dlm_lock_detach_lockres()
379 INIT_LIST_HEAD(&newlock->list); in dlm_init_lock()
380 INIT_LIST_HEAD(&newlock->ast_list); in dlm_init_lock()
381 INIT_LIST_HEAD(&newlock->bast_list); in dlm_init_lock()
382 spin_lock_init(&newlock->spinlock); in dlm_init_lock()
383 newlock->ml.type = type; in dlm_init_lock()
384 newlock->ml.convert_type = LKM_IVMODE; in dlm_init_lock()
385 newlock->ml.highest_blocked = LKM_IVMODE; in dlm_init_lock()
386 newlock->ml.node = node; in dlm_init_lock()
387 newlock->ml.pad1 = 0; in dlm_init_lock()
388 newlock->ml.list = 0; in dlm_init_lock()
389 newlock->ml.flags = 0; in dlm_init_lock()
390 newlock->ast = NULL; in dlm_init_lock()
391 newlock->bast = NULL; in dlm_init_lock()
392 newlock->astdata = NULL; in dlm_init_lock()
393 newlock->ml.cookie = cpu_to_be64(cookie); in dlm_init_lock()
394 newlock->ast_pending = 0; in dlm_init_lock()
395 newlock->bast_pending = 0; in dlm_init_lock()
396 newlock->convert_pending = 0; in dlm_init_lock()
397 newlock->lock_pending = 0; in dlm_init_lock()
398 newlock->unlock_pending = 0; in dlm_init_lock()
399 newlock->cancel_pending = 0; in dlm_init_lock()
400 newlock->lksb_kernel_allocated = 0; in dlm_init_lock()
402 kref_init(&newlock->lock_refs); in dlm_init_lock()
408 struct dlm_lock *lock; in dlm_new_lock() local
411 lock = kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS); in dlm_new_lock()
412 if (!lock) in dlm_new_lock()
416 /* zero memory only if kernel-allocated */ in dlm_new_lock()
419 kmem_cache_free(dlm_lock_cache, lock); in dlm_new_lock()
425 dlm_init_lock(lock, type, node, cookie); in dlm_new_lock()
427 lock->lksb_kernel_allocated = 1; in dlm_new_lock()
428 lock->lksb = lksb; in dlm_new_lock()
429 lksb->lockid = lock; in dlm_new_lock()
430 return lock; in dlm_new_lock()
433 /* handler for lock creation net message
436 * taken: takes and drops res->spinlock
444 struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf; in dlm_create_lock_handler()
457 name = create->name; in dlm_create_lock_handler()
458 namelen = create->namelen; in dlm_create_lock_handler()
462 "sending a create_lock message for lock %.*s!\n", in dlm_create_lock_handler()
463 dlm->name, create->node_idx, namelen, name); in dlm_create_lock_handler()
475 newlock = dlm_new_lock(create->requested_type, in dlm_create_lock_handler()
476 create->node_idx, in dlm_create_lock_handler()
477 be64_to_cpu(create->cookie), NULL); in dlm_create_lock_handler()
483 lksb = newlock->lksb; in dlm_create_lock_handler()
485 if (be32_to_cpu(create->flags) & LKM_GET_LVB) { in dlm_create_lock_handler()
486 lksb->flags |= DLM_LKSB_GET_LVB; in dlm_create_lock_handler()
497 spin_lock(&res->spinlock); in dlm_create_lock_handler()
499 spin_unlock(&res->spinlock); in dlm_create_lock_handler()
502 mlog(0, "lockres recovering/migrating/in-progress\n"); in dlm_create_lock_handler()
508 status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags)); in dlm_create_lock_handler()
523 /* fetch next node-local (u8 nodenum + u56 cookie) into u64 */
547 struct dlm_lock *lock = NULL; in dlmlock() local
552 * lock and convert paths, especially in the retry blocks */ in dlmlock()
586 lock = lksb->lockid; in dlmlock()
587 if (!lock) { in dlmlock()
588 mlog(ML_ERROR, "NULL lock pointer in convert " in dlmlock()
593 res = lock->lockres; in dlmlock()
602 * static after the original lock call. convert requests will in dlmlock()
606 if (lock->lksb != lksb || lock->ast != ast || in dlmlock()
607 lock->bast != bast || lock->astdata != data) { in dlmlock()
612 "astdata=%p\n", lock->lksb, lock->ast, in dlmlock()
613 lock->bast, lock->astdata); in dlmlock()
619 if (res->owner == dlm->node_num) in dlmlock()
620 status = dlmconvert_master(dlm, res, lock, flags, mode); in dlmlock()
622 status = dlmconvert_remote(dlm, res, lock, flags, mode); in dlmlock()
630 "in-progress\n"); in dlmlock()
637 /* LOCK request */ in dlmlock()
650 dlm_get_next_cookie(dlm->node_num, &tmpcookie); in dlmlock()
651 lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb); in dlmlock()
652 if (!lock) { in dlmlock()
660 /* find or create the lock resource */ in dlmlock()
669 mlog(0, "creating lock: lock=%p res=%p\n", lock, res); in dlmlock()
671 dlm_lock_attach_lockres(lock, res); in dlmlock()
672 lock->ast = ast; in dlmlock()
673 lock->bast = bast; in dlmlock()
674 lock->astdata = data; in dlmlock()
686 lock->lksb->flags |= DLM_LKSB_GET_LVB; in dlmlock()
690 if (res->owner == dlm->node_num) in dlmlock()
691 status = dlmlock_master(dlm, res, lock, flags); in dlmlock()
693 status = dlmlock_remote(dlm, res, lock, flags); in dlmlock()
704 dlm_wait_for_node_death(dlm, res->owner, in dlmlock()
713 spin_lock(&res->spinlock); in dlmlock()
715 spin_unlock(&res->spinlock); in dlmlock()
721 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; in dlmlock()
730 if (lock && !convert) in dlmlock()
731 dlm_lock_put(lock); in dlmlock()
733 lksb->status = status; in dlmlock()