16714d8e8SKurt Hackel /* -*- mode: c; c-basic-offset: 8; -*- 26714d8e8SKurt Hackel * vim: noexpandtab sw=8 ts=8 sts=0: 36714d8e8SKurt Hackel * 46714d8e8SKurt Hackel * dlmthread.c 56714d8e8SKurt Hackel * 66714d8e8SKurt Hackel * standalone DLM module 76714d8e8SKurt Hackel * 86714d8e8SKurt Hackel * Copyright (C) 2004 Oracle. All rights reserved. 96714d8e8SKurt Hackel * 106714d8e8SKurt Hackel * This program is free software; you can redistribute it and/or 116714d8e8SKurt Hackel * modify it under the terms of the GNU General Public 126714d8e8SKurt Hackel * License as published by the Free Software Foundation; either 136714d8e8SKurt Hackel * version 2 of the License, or (at your option) any later version. 146714d8e8SKurt Hackel * 156714d8e8SKurt Hackel * This program is distributed in the hope that it will be useful, 166714d8e8SKurt Hackel * but WITHOUT ANY WARRANTY; without even the implied warranty of 176714d8e8SKurt Hackel * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 186714d8e8SKurt Hackel * General Public License for more details. 196714d8e8SKurt Hackel * 206714d8e8SKurt Hackel * You should have received a copy of the GNU General Public 216714d8e8SKurt Hackel * License along with this program; if not, write to the 226714d8e8SKurt Hackel * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 236714d8e8SKurt Hackel * Boston, MA 021110-1307, USA. 246714d8e8SKurt Hackel * 256714d8e8SKurt Hackel */ 266714d8e8SKurt Hackel 276714d8e8SKurt Hackel 286714d8e8SKurt Hackel #include <linux/module.h> 296714d8e8SKurt Hackel #include <linux/fs.h> 306714d8e8SKurt Hackel #include <linux/types.h> 316714d8e8SKurt Hackel #include <linux/slab.h> 326714d8e8SKurt Hackel #include <linux/highmem.h> 336714d8e8SKurt Hackel #include <linux/utsname.h> 346714d8e8SKurt Hackel #include <linux/init.h> 356714d8e8SKurt Hackel #include <linux/sysctl.h> 366714d8e8SKurt Hackel #include <linux/random.h> 376714d8e8SKurt Hackel #include <linux/blkdev.h> 386714d8e8SKurt Hackel #include <linux/socket.h> 396714d8e8SKurt Hackel #include <linux/inet.h> 406714d8e8SKurt Hackel #include <linux/timer.h> 416714d8e8SKurt Hackel #include <linux/kthread.h> 428d79d088SKurt Hackel #include <linux/delay.h> 436714d8e8SKurt Hackel 446714d8e8SKurt Hackel 456714d8e8SKurt Hackel #include "cluster/heartbeat.h" 466714d8e8SKurt Hackel #include "cluster/nodemanager.h" 476714d8e8SKurt Hackel #include "cluster/tcp.h" 486714d8e8SKurt Hackel 496714d8e8SKurt Hackel #include "dlmapi.h" 506714d8e8SKurt Hackel #include "dlmcommon.h" 516714d8e8SKurt Hackel #include "dlmdomain.h" 526714d8e8SKurt Hackel 536714d8e8SKurt Hackel #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD) 546714d8e8SKurt Hackel #include "cluster/masklog.h" 556714d8e8SKurt Hackel 566714d8e8SKurt Hackel static int dlm_thread(void *data); 576714d8e8SKurt Hackel static void dlm_flush_asts(struct dlm_ctxt *dlm); 586714d8e8SKurt Hackel 596714d8e8SKurt Hackel #define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num) 606714d8e8SKurt Hackel 616714d8e8SKurt Hackel /* will exit holding res->spinlock, but may drop in function */ 626714d8e8SKurt Hackel /* waits until flags are cleared on res->state */ 636714d8e8SKurt Hackel void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags) 646714d8e8SKurt Hackel { 656714d8e8SKurt Hackel DECLARE_WAITQUEUE(wait, current); 666714d8e8SKurt Hackel 676714d8e8SKurt Hackel assert_spin_locked(&res->spinlock); 686714d8e8SKurt Hackel 696714d8e8SKurt Hackel add_wait_queue(&res->wq, &wait); 706714d8e8SKurt Hackel repeat: 716714d8e8SKurt Hackel set_current_state(TASK_UNINTERRUPTIBLE); 726714d8e8SKurt Hackel if (res->state & flags) { 736714d8e8SKurt Hackel spin_unlock(&res->spinlock); 746714d8e8SKurt Hackel schedule(); 756714d8e8SKurt Hackel spin_lock(&res->spinlock); 766714d8e8SKurt Hackel goto repeat; 776714d8e8SKurt Hackel } 786714d8e8SKurt Hackel remove_wait_queue(&res->wq, &wait); 795c2c9d38SMilind Arun Choudhary __set_current_state(TASK_RUNNING); 806714d8e8SKurt Hackel } 816714d8e8SKurt Hackel 82ba2bf218SKurt Hackel int __dlm_lockres_has_locks(struct dlm_lock_resource *res) 836714d8e8SKurt Hackel { 846714d8e8SKurt Hackel if (list_empty(&res->granted) && 856714d8e8SKurt Hackel list_empty(&res->converting) && 86ba2bf218SKurt Hackel list_empty(&res->blocked)) 87ba2bf218SKurt Hackel return 0; 886714d8e8SKurt Hackel return 1; 89ba2bf218SKurt Hackel } 90ba2bf218SKurt Hackel 91ba2bf218SKurt Hackel /* "unused": the lockres has no locks, is not on the dirty list, 92ba2bf218SKurt Hackel * has no inflight locks (in the gap between mastery and acquiring 93ba2bf218SKurt Hackel * the first lock), and has no bits in its refmap. 94ba2bf218SKurt Hackel * truly ready to be freed. */ 95ba2bf218SKurt Hackel int __dlm_lockres_unused(struct dlm_lock_resource *res) 96ba2bf218SKurt Hackel { 97ba2bf218SKurt Hackel if (!__dlm_lockres_has_locks(res) && 98ddc09c8dSKurt Hackel (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) { 99ba2bf218SKurt Hackel /* try not to scan the bitmap unless the first two 100ba2bf218SKurt Hackel * conditions are already true */ 101ba2bf218SKurt Hackel int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 102ba2bf218SKurt Hackel if (bit >= O2NM_MAX_NODES) { 103ba2bf218SKurt Hackel /* since the bit for dlm->node_num is not 104ba2bf218SKurt Hackel * set, inflight_locks better be zero */ 105ba2bf218SKurt Hackel BUG_ON(res->inflight_locks != 0); 106ba2bf218SKurt Hackel return 1; 107ba2bf218SKurt Hackel } 108ba2bf218SKurt Hackel } 1096714d8e8SKurt Hackel return 0; 1106714d8e8SKurt Hackel } 1116714d8e8SKurt Hackel 1126714d8e8SKurt Hackel 1136714d8e8SKurt Hackel /* Call whenever you may have added or deleted something from one of 1146714d8e8SKurt Hackel * the lockres queue's. This will figure out whether it belongs on the 1156714d8e8SKurt Hackel * unused list or not and does the appropriate thing. */ 1166714d8e8SKurt Hackel void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, 1176714d8e8SKurt Hackel struct dlm_lock_resource *res) 1186714d8e8SKurt Hackel { 1196714d8e8SKurt Hackel mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); 1206714d8e8SKurt Hackel 1216714d8e8SKurt Hackel assert_spin_locked(&dlm->spinlock); 1226714d8e8SKurt Hackel assert_spin_locked(&res->spinlock); 1236714d8e8SKurt Hackel 1246714d8e8SKurt Hackel if (__dlm_lockres_unused(res)){ 1256714d8e8SKurt Hackel if (list_empty(&res->purge)) { 126ba2bf218SKurt Hackel mlog(0, "putting lockres %.*s:%p onto purge list\n", 127ba2bf218SKurt Hackel res->lockname.len, res->lockname.name, res); 1286714d8e8SKurt Hackel 1296714d8e8SKurt Hackel res->last_used = jiffies; 130ba2bf218SKurt Hackel dlm_lockres_get(res); 1316714d8e8SKurt Hackel list_add_tail(&res->purge, &dlm->purge_list); 1326714d8e8SKurt Hackel dlm->purge_count++; 1336714d8e8SKurt Hackel } 1346714d8e8SKurt Hackel } else if (!list_empty(&res->purge)) { 135ba2bf218SKurt Hackel mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n", 136ba2bf218SKurt Hackel res->lockname.len, res->lockname.name, res, res->owner); 1376714d8e8SKurt Hackel 1386714d8e8SKurt Hackel list_del_init(&res->purge); 139ba2bf218SKurt Hackel dlm_lockres_put(res); 1406714d8e8SKurt Hackel dlm->purge_count--; 1416714d8e8SKurt Hackel } 1426714d8e8SKurt Hackel } 1436714d8e8SKurt Hackel 1446714d8e8SKurt Hackel void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, 1456714d8e8SKurt Hackel struct dlm_lock_resource *res) 1466714d8e8SKurt Hackel { 1476714d8e8SKurt Hackel mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); 1486714d8e8SKurt Hackel spin_lock(&dlm->spinlock); 1496714d8e8SKurt Hackel spin_lock(&res->spinlock); 1506714d8e8SKurt Hackel 1516714d8e8SKurt Hackel __dlm_lockres_calc_usage(dlm, res); 1526714d8e8SKurt Hackel 1536714d8e8SKurt Hackel spin_unlock(&res->spinlock); 1546714d8e8SKurt Hackel spin_unlock(&dlm->spinlock); 1556714d8e8SKurt Hackel } 1566714d8e8SKurt Hackel 157faf0ec9fSAdrian Bunk static int dlm_purge_lockres(struct dlm_ctxt *dlm, 158faf0ec9fSAdrian Bunk struct dlm_lock_resource *res) 1596714d8e8SKurt Hackel { 1606714d8e8SKurt Hackel int master; 161ba2bf218SKurt Hackel int ret = 0; 1626714d8e8SKurt Hackel 163ba2bf218SKurt Hackel spin_lock(&res->spinlock); 164ba2bf218SKurt Hackel if (!__dlm_lockres_unused(res)) { 165ba2bf218SKurt Hackel spin_unlock(&res->spinlock); 166ba2bf218SKurt Hackel mlog(0, "%s:%.*s: tried to purge but not unused\n", 167ba2bf218SKurt Hackel dlm->name, res->lockname.len, res->lockname.name); 168ba2bf218SKurt Hackel return -ENOTEMPTY; 169ba2bf218SKurt Hackel } 170ba2bf218SKurt Hackel master = (res->owner == dlm->node_num); 1716714d8e8SKurt Hackel if (!master) 172ba2bf218SKurt Hackel res->state |= DLM_LOCK_RES_DROPPING_REF; 173ba2bf218SKurt Hackel spin_unlock(&res->spinlock); 1746714d8e8SKurt Hackel 175ba2bf218SKurt Hackel mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len, 176ba2bf218SKurt Hackel res->lockname.name, master); 177ba2bf218SKurt Hackel 178ba2bf218SKurt Hackel if (!master) { 179c824c3c7SSunil Mushran /* drop spinlock... retake below */ 180c824c3c7SSunil Mushran spin_unlock(&dlm->spinlock); 181c824c3c7SSunil Mushran 1823b8118cfSKurt Hackel spin_lock(&res->spinlock); 1833b8118cfSKurt Hackel /* This ensures that clear refmap is sent after the set */ 1847dc102b7SSunil Mushran __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); 1853b8118cfSKurt Hackel spin_unlock(&res->spinlock); 186c824c3c7SSunil Mushran 187ba2bf218SKurt Hackel /* clear our bit from the master's refmap, ignore errors */ 188ba2bf218SKurt Hackel ret = dlm_drop_lockres_ref(dlm, res); 189ba2bf218SKurt Hackel if (ret < 0) { 190ba2bf218SKurt Hackel mlog_errno(ret); 191ba2bf218SKurt Hackel if (!dlm_is_host_down(ret)) 1926714d8e8SKurt Hackel BUG(); 1936714d8e8SKurt Hackel } 194ba2bf218SKurt Hackel mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n", 195ba2bf218SKurt Hackel dlm->name, res->lockname.len, res->lockname.name, ret); 1966714d8e8SKurt Hackel spin_lock(&dlm->spinlock); 197ba2bf218SKurt Hackel } 1986714d8e8SKurt Hackel 199ba2bf218SKurt Hackel if (!list_empty(&res->purge)) { 200ba2bf218SKurt Hackel mlog(0, "removing lockres %.*s:%p from purgelist, " 201ba2bf218SKurt Hackel "master = %d\n", res->lockname.len, res->lockname.name, 202ba2bf218SKurt Hackel res, master); 203ba2bf218SKurt Hackel list_del_init(&res->purge); 204ba2bf218SKurt Hackel dlm_lockres_put(res); 2056714d8e8SKurt Hackel dlm->purge_count--; 2066714d8e8SKurt Hackel } 207ba2bf218SKurt Hackel __dlm_unhash_lockres(res); 208ba2bf218SKurt Hackel 209ba2bf218SKurt Hackel /* lockres is not in the hash now. drop the flag and wake up 210ba2bf218SKurt Hackel * any processes waiting in dlm_get_lock_resource. */ 211ba2bf218SKurt Hackel if (!master) { 212ba2bf218SKurt Hackel spin_lock(&res->spinlock); 213ba2bf218SKurt Hackel res->state &= ~DLM_LOCK_RES_DROPPING_REF; 214ba2bf218SKurt Hackel spin_unlock(&res->spinlock); 215ba2bf218SKurt Hackel wake_up(&res->wq); 2166714d8e8SKurt Hackel } 217ba2bf218SKurt Hackel return 0; 2188b219809SKurt Hackel } 2198b219809SKurt Hackel 2206714d8e8SKurt Hackel static void dlm_run_purge_list(struct dlm_ctxt *dlm, 2216714d8e8SKurt Hackel int purge_now) 2226714d8e8SKurt Hackel { 2236714d8e8SKurt Hackel unsigned int run_max, unused; 2246714d8e8SKurt Hackel unsigned long purge_jiffies; 2256714d8e8SKurt Hackel struct dlm_lock_resource *lockres; 2266714d8e8SKurt Hackel 2276714d8e8SKurt Hackel spin_lock(&dlm->spinlock); 2286714d8e8SKurt Hackel run_max = dlm->purge_count; 2296714d8e8SKurt Hackel 2306714d8e8SKurt Hackel while(run_max && !list_empty(&dlm->purge_list)) { 2316714d8e8SKurt Hackel run_max--; 2326714d8e8SKurt Hackel 2336714d8e8SKurt Hackel lockres = list_entry(dlm->purge_list.next, 2346714d8e8SKurt Hackel struct dlm_lock_resource, purge); 2356714d8e8SKurt Hackel 2366714d8e8SKurt Hackel /* Status of the lockres *might* change so double 2376714d8e8SKurt Hackel * check. If the lockres is unused, holding the dlm 2386714d8e8SKurt Hackel * spinlock will prevent people from getting and more 2396714d8e8SKurt Hackel * refs on it -- there's no need to keep the lockres 2406714d8e8SKurt Hackel * spinlock. */ 2416714d8e8SKurt Hackel spin_lock(&lockres->spinlock); 2426714d8e8SKurt Hackel unused = __dlm_lockres_unused(lockres); 2436714d8e8SKurt Hackel spin_unlock(&lockres->spinlock); 2446714d8e8SKurt Hackel 2456714d8e8SKurt Hackel if (!unused) 2466714d8e8SKurt Hackel continue; 2476714d8e8SKurt Hackel 2486714d8e8SKurt Hackel purge_jiffies = lockres->last_used + 2496714d8e8SKurt Hackel msecs_to_jiffies(DLM_PURGE_INTERVAL_MS); 2506714d8e8SKurt Hackel 2516714d8e8SKurt Hackel /* Make sure that we want to be processing this guy at 2526714d8e8SKurt Hackel * this time. */ 2536714d8e8SKurt Hackel if (!purge_now && time_after(purge_jiffies, jiffies)) { 2546714d8e8SKurt Hackel /* Since resources are added to the purge list 2556714d8e8SKurt Hackel * in tail order, we can stop at the first 2566714d8e8SKurt Hackel * unpurgable resource -- anyone added after 2576714d8e8SKurt Hackel * him will have a greater last_used value */ 2586714d8e8SKurt Hackel break; 2596714d8e8SKurt Hackel } 2606714d8e8SKurt Hackel 26178062cb2SSunil Mushran dlm_lockres_get(lockres); 2626714d8e8SKurt Hackel 2636714d8e8SKurt Hackel /* This may drop and reacquire the dlm spinlock if it 2646714d8e8SKurt Hackel * has to do migration. */ 265ba2bf218SKurt Hackel if (dlm_purge_lockres(dlm, lockres)) 266ba2bf218SKurt Hackel BUG(); 26778062cb2SSunil Mushran 2683fca0894SSunil Mushran dlm_lockres_put(lockres); 2696714d8e8SKurt Hackel 2706714d8e8SKurt Hackel /* Avoid adding any scheduling latencies */ 2716714d8e8SKurt Hackel cond_resched_lock(&dlm->spinlock); 2726714d8e8SKurt Hackel } 2736714d8e8SKurt Hackel 2746714d8e8SKurt Hackel spin_unlock(&dlm->spinlock); 2756714d8e8SKurt Hackel } 2766714d8e8SKurt Hackel 2776714d8e8SKurt Hackel static void dlm_shuffle_lists(struct dlm_ctxt *dlm, 2786714d8e8SKurt Hackel struct dlm_lock_resource *res) 2796714d8e8SKurt Hackel { 2806714d8e8SKurt Hackel struct dlm_lock *lock, *target; 2816714d8e8SKurt Hackel struct list_head *iter; 2826714d8e8SKurt Hackel struct list_head *head; 2836714d8e8SKurt Hackel int can_grant = 1; 2846714d8e8SKurt Hackel 2856714d8e8SKurt Hackel //mlog(0, "res->lockname.len=%d\n", res->lockname.len); 2866714d8e8SKurt Hackel //mlog(0, "res->lockname.name=%p\n", res->lockname.name); 2876714d8e8SKurt Hackel //mlog(0, "shuffle res %.*s\n", res->lockname.len, 2886714d8e8SKurt Hackel // res->lockname.name); 2896714d8e8SKurt Hackel 2906714d8e8SKurt Hackel /* because this function is called with the lockres 2916714d8e8SKurt Hackel * spinlock, and because we know that it is not migrating/ 2926714d8e8SKurt Hackel * recovering/in-progress, it is fine to reserve asts and 2936714d8e8SKurt Hackel * basts right before queueing them all throughout */ 2946714d8e8SKurt Hackel assert_spin_locked(&res->spinlock); 2956714d8e8SKurt Hackel BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| 2966714d8e8SKurt Hackel DLM_LOCK_RES_RECOVERING| 2976714d8e8SKurt Hackel DLM_LOCK_RES_IN_PROGRESS))); 2986714d8e8SKurt Hackel 2996714d8e8SKurt Hackel converting: 3006714d8e8SKurt Hackel if (list_empty(&res->converting)) 3016714d8e8SKurt Hackel goto blocked; 3026714d8e8SKurt Hackel mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len, 3036714d8e8SKurt Hackel res->lockname.name); 3046714d8e8SKurt Hackel 3056714d8e8SKurt Hackel target = list_entry(res->converting.next, struct dlm_lock, list); 3066714d8e8SKurt Hackel if (target->ml.convert_type == LKM_IVMODE) { 3076714d8e8SKurt Hackel mlog(ML_ERROR, "%.*s: converting a lock with no " 3086714d8e8SKurt Hackel "convert_type!\n", res->lockname.len, res->lockname.name); 3096714d8e8SKurt Hackel BUG(); 3106714d8e8SKurt Hackel } 3116714d8e8SKurt Hackel head = &res->granted; 3126714d8e8SKurt Hackel list_for_each(iter, head) { 3136714d8e8SKurt Hackel lock = list_entry(iter, struct dlm_lock, list); 3146714d8e8SKurt Hackel if (lock==target) 3156714d8e8SKurt Hackel continue; 3166714d8e8SKurt Hackel if (!dlm_lock_compatible(lock->ml.type, 3176714d8e8SKurt Hackel target->ml.convert_type)) { 3186714d8e8SKurt Hackel can_grant = 0; 3196714d8e8SKurt Hackel /* queue the BAST if not already */ 3206714d8e8SKurt Hackel if (lock->ml.highest_blocked == LKM_IVMODE) { 3216714d8e8SKurt Hackel __dlm_lockres_reserve_ast(res); 3226714d8e8SKurt Hackel dlm_queue_bast(dlm, lock); 3236714d8e8SKurt Hackel } 3246714d8e8SKurt Hackel /* update the highest_blocked if needed */ 3256714d8e8SKurt Hackel if (lock->ml.highest_blocked < target->ml.convert_type) 3266714d8e8SKurt Hackel lock->ml.highest_blocked = 3276714d8e8SKurt Hackel target->ml.convert_type; 3286714d8e8SKurt Hackel } 3296714d8e8SKurt Hackel } 3306714d8e8SKurt Hackel head = &res->converting; 3316714d8e8SKurt Hackel list_for_each(iter, head) { 3326714d8e8SKurt Hackel lock = list_entry(iter, struct dlm_lock, list); 3336714d8e8SKurt Hackel if (lock==target) 3346714d8e8SKurt Hackel continue; 3356714d8e8SKurt Hackel if (!dlm_lock_compatible(lock->ml.type, 3366714d8e8SKurt Hackel target->ml.convert_type)) { 3376714d8e8SKurt Hackel can_grant = 0; 3386714d8e8SKurt Hackel if (lock->ml.highest_blocked == LKM_IVMODE) { 3396714d8e8SKurt Hackel __dlm_lockres_reserve_ast(res); 3406714d8e8SKurt Hackel dlm_queue_bast(dlm, lock); 3416714d8e8SKurt Hackel } 3426714d8e8SKurt Hackel if (lock->ml.highest_blocked < target->ml.convert_type) 3436714d8e8SKurt Hackel lock->ml.highest_blocked = 3446714d8e8SKurt Hackel target->ml.convert_type; 3456714d8e8SKurt Hackel } 3466714d8e8SKurt Hackel } 3476714d8e8SKurt Hackel 3486714d8e8SKurt Hackel /* we can convert the lock */ 3496714d8e8SKurt Hackel if (can_grant) { 3506714d8e8SKurt Hackel spin_lock(&target->spinlock); 3516714d8e8SKurt Hackel BUG_ON(target->ml.highest_blocked != LKM_IVMODE); 3526714d8e8SKurt Hackel 3536714d8e8SKurt Hackel mlog(0, "calling ast for converting lock: %.*s, have: %d, " 3546714d8e8SKurt Hackel "granting: %d, node: %u\n", res->lockname.len, 3556714d8e8SKurt Hackel res->lockname.name, target->ml.type, 3566714d8e8SKurt Hackel target->ml.convert_type, target->ml.node); 3576714d8e8SKurt Hackel 3586714d8e8SKurt Hackel target->ml.type = target->ml.convert_type; 3596714d8e8SKurt Hackel target->ml.convert_type = LKM_IVMODE; 360f116629dSAkinobu Mita list_move_tail(&target->list, &res->granted); 3616714d8e8SKurt Hackel 3626714d8e8SKurt Hackel BUG_ON(!target->lksb); 3636714d8e8SKurt Hackel target->lksb->status = DLM_NORMAL; 3646714d8e8SKurt Hackel 3656714d8e8SKurt Hackel spin_unlock(&target->spinlock); 3666714d8e8SKurt Hackel 3676714d8e8SKurt Hackel __dlm_lockres_reserve_ast(res); 3686714d8e8SKurt Hackel dlm_queue_ast(dlm, target); 3696714d8e8SKurt Hackel /* go back and check for more */ 3706714d8e8SKurt Hackel goto converting; 3716714d8e8SKurt Hackel } 3726714d8e8SKurt Hackel 3736714d8e8SKurt Hackel blocked: 3746714d8e8SKurt Hackel if (list_empty(&res->blocked)) 3756714d8e8SKurt Hackel goto leave; 3766714d8e8SKurt Hackel target = list_entry(res->blocked.next, struct dlm_lock, list); 3776714d8e8SKurt Hackel 3786714d8e8SKurt Hackel head = &res->granted; 3796714d8e8SKurt Hackel list_for_each(iter, head) { 3806714d8e8SKurt Hackel lock = list_entry(iter, struct dlm_lock, list); 3816714d8e8SKurt Hackel if (lock==target) 3826714d8e8SKurt Hackel continue; 3836714d8e8SKurt Hackel if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { 3846714d8e8SKurt Hackel can_grant = 0; 3856714d8e8SKurt Hackel if (lock->ml.highest_blocked == LKM_IVMODE) { 3866714d8e8SKurt Hackel __dlm_lockres_reserve_ast(res); 3876714d8e8SKurt Hackel dlm_queue_bast(dlm, lock); 3886714d8e8SKurt Hackel } 3896714d8e8SKurt Hackel if (lock->ml.highest_blocked < target->ml.type) 3906714d8e8SKurt Hackel lock->ml.highest_blocked = target->ml.type; 3916714d8e8SKurt Hackel } 3926714d8e8SKurt Hackel } 3936714d8e8SKurt Hackel 3946714d8e8SKurt Hackel head = &res->converting; 3956714d8e8SKurt Hackel list_for_each(iter, head) { 3966714d8e8SKurt Hackel lock = list_entry(iter, struct dlm_lock, list); 3976714d8e8SKurt Hackel if (lock==target) 3986714d8e8SKurt Hackel continue; 3996714d8e8SKurt Hackel if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { 4006714d8e8SKurt Hackel can_grant = 0; 4016714d8e8SKurt Hackel if (lock->ml.highest_blocked == LKM_IVMODE) { 4026714d8e8SKurt Hackel __dlm_lockres_reserve_ast(res); 4036714d8e8SKurt Hackel dlm_queue_bast(dlm, lock); 4046714d8e8SKurt Hackel } 4056714d8e8SKurt Hackel if (lock->ml.highest_blocked < target->ml.type) 4066714d8e8SKurt Hackel lock->ml.highest_blocked = target->ml.type; 4076714d8e8SKurt Hackel } 4086714d8e8SKurt Hackel } 4096714d8e8SKurt Hackel 4106714d8e8SKurt Hackel /* we can grant the blocked lock (only 4116714d8e8SKurt Hackel * possible if converting list empty) */ 4126714d8e8SKurt Hackel if (can_grant) { 4136714d8e8SKurt Hackel spin_lock(&target->spinlock); 4146714d8e8SKurt Hackel BUG_ON(target->ml.highest_blocked != LKM_IVMODE); 4156714d8e8SKurt Hackel 4166714d8e8SKurt Hackel mlog(0, "calling ast for blocked lock: %.*s, granting: %d, " 4176714d8e8SKurt Hackel "node: %u\n", res->lockname.len, res->lockname.name, 4186714d8e8SKurt Hackel target->ml.type, target->ml.node); 4196714d8e8SKurt Hackel 4206714d8e8SKurt Hackel // target->ml.type is already correct 421f116629dSAkinobu Mita list_move_tail(&target->list, &res->granted); 4226714d8e8SKurt Hackel 4236714d8e8SKurt Hackel BUG_ON(!target->lksb); 4246714d8e8SKurt Hackel target->lksb->status = DLM_NORMAL; 4256714d8e8SKurt Hackel 4266714d8e8SKurt Hackel spin_unlock(&target->spinlock); 4276714d8e8SKurt Hackel 4286714d8e8SKurt Hackel __dlm_lockres_reserve_ast(res); 4296714d8e8SKurt Hackel dlm_queue_ast(dlm, target); 4306714d8e8SKurt Hackel /* go back and check for more */ 4316714d8e8SKurt Hackel goto converting; 4326714d8e8SKurt Hackel } 4336714d8e8SKurt Hackel 4346714d8e8SKurt Hackel leave: 4356714d8e8SKurt Hackel return; 4366714d8e8SKurt Hackel } 4376714d8e8SKurt Hackel 4386714d8e8SKurt Hackel /* must have NO locks when calling this with res !=NULL * */ 4396714d8e8SKurt Hackel void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 4406714d8e8SKurt Hackel { 4416714d8e8SKurt Hackel mlog_entry("dlm=%p, res=%p\n", dlm, res); 4426714d8e8SKurt Hackel if (res) { 4436714d8e8SKurt Hackel spin_lock(&dlm->spinlock); 4446714d8e8SKurt Hackel spin_lock(&res->spinlock); 4456714d8e8SKurt Hackel __dlm_dirty_lockres(dlm, res); 4466714d8e8SKurt Hackel spin_unlock(&res->spinlock); 4476714d8e8SKurt Hackel spin_unlock(&dlm->spinlock); 4486714d8e8SKurt Hackel } 4496714d8e8SKurt Hackel wake_up(&dlm->dlm_thread_wq); 4506714d8e8SKurt Hackel } 4516714d8e8SKurt Hackel 4526714d8e8SKurt Hackel void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 4536714d8e8SKurt Hackel { 4546714d8e8SKurt Hackel mlog_entry("dlm=%p, res=%p\n", dlm, res); 4556714d8e8SKurt Hackel 4566714d8e8SKurt Hackel assert_spin_locked(&dlm->spinlock); 4576714d8e8SKurt Hackel assert_spin_locked(&res->spinlock); 4586714d8e8SKurt Hackel 4596714d8e8SKurt Hackel /* don't shuffle secondary queues */ 460ddc09c8dSKurt Hackel if ((res->owner == dlm->node_num)) { 461ddc09c8dSKurt Hackel if (res->state & (DLM_LOCK_RES_MIGRATING | 462ddc09c8dSKurt Hackel DLM_LOCK_RES_BLOCK_DIRTY)) 463ddc09c8dSKurt Hackel return; 464ddc09c8dSKurt Hackel 465ddc09c8dSKurt Hackel if (list_empty(&res->dirty)) { 4666ff06a93SKurt Hackel /* ref for dirty_list */ 4676ff06a93SKurt Hackel dlm_lockres_get(res); 4686714d8e8SKurt Hackel list_add_tail(&res->dirty, &dlm->dirty_list); 4696714d8e8SKurt Hackel res->state |= DLM_LOCK_RES_DIRTY; 4706714d8e8SKurt Hackel } 4716714d8e8SKurt Hackel } 472ddc09c8dSKurt Hackel } 4736714d8e8SKurt Hackel 4746714d8e8SKurt Hackel 4756714d8e8SKurt Hackel /* Launch the NM thread for the mounted volume */ 4766714d8e8SKurt Hackel int dlm_launch_thread(struct dlm_ctxt *dlm) 4776714d8e8SKurt Hackel { 4786714d8e8SKurt Hackel mlog(0, "starting dlm thread...\n"); 4796714d8e8SKurt Hackel 4806714d8e8SKurt Hackel dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread"); 4816714d8e8SKurt Hackel if (IS_ERR(dlm->dlm_thread_task)) { 4826714d8e8SKurt Hackel mlog_errno(PTR_ERR(dlm->dlm_thread_task)); 4836714d8e8SKurt Hackel dlm->dlm_thread_task = NULL; 4846714d8e8SKurt Hackel return -EINVAL; 4856714d8e8SKurt Hackel } 4866714d8e8SKurt Hackel 4876714d8e8SKurt Hackel return 0; 4886714d8e8SKurt Hackel } 4896714d8e8SKurt Hackel 4906714d8e8SKurt Hackel void dlm_complete_thread(struct dlm_ctxt *dlm) 4916714d8e8SKurt Hackel { 4926714d8e8SKurt Hackel if (dlm->dlm_thread_task) { 4936714d8e8SKurt Hackel mlog(ML_KTHREAD, "waiting for dlm thread to exit\n"); 4946714d8e8SKurt Hackel kthread_stop(dlm->dlm_thread_task); 4956714d8e8SKurt Hackel dlm->dlm_thread_task = NULL; 4966714d8e8SKurt Hackel } 4976714d8e8SKurt Hackel } 4986714d8e8SKurt Hackel 4996714d8e8SKurt Hackel static int dlm_dirty_list_empty(struct dlm_ctxt *dlm) 5006714d8e8SKurt Hackel { 5016714d8e8SKurt Hackel int empty; 5026714d8e8SKurt Hackel 5036714d8e8SKurt Hackel spin_lock(&dlm->spinlock); 5046714d8e8SKurt Hackel empty = list_empty(&dlm->dirty_list); 5056714d8e8SKurt Hackel spin_unlock(&dlm->spinlock); 5066714d8e8SKurt Hackel 5076714d8e8SKurt Hackel return empty; 5086714d8e8SKurt Hackel } 5096714d8e8SKurt Hackel 5106714d8e8SKurt Hackel static void dlm_flush_asts(struct dlm_ctxt *dlm) 5116714d8e8SKurt Hackel { 5126714d8e8SKurt Hackel int ret; 5136714d8e8SKurt Hackel struct dlm_lock *lock; 5146714d8e8SKurt Hackel struct dlm_lock_resource *res; 5156714d8e8SKurt Hackel u8 hi; 5166714d8e8SKurt Hackel 5176714d8e8SKurt Hackel spin_lock(&dlm->ast_lock); 5186714d8e8SKurt Hackel while (!list_empty(&dlm->pending_asts)) { 5196714d8e8SKurt Hackel lock = list_entry(dlm->pending_asts.next, 5206714d8e8SKurt Hackel struct dlm_lock, ast_list); 5216714d8e8SKurt Hackel /* get an extra ref on lock */ 5226714d8e8SKurt Hackel dlm_lock_get(lock); 5236714d8e8SKurt Hackel res = lock->lockres; 5246714d8e8SKurt Hackel mlog(0, "delivering an ast for this lockres\n"); 5256714d8e8SKurt Hackel 5266714d8e8SKurt Hackel BUG_ON(!lock->ast_pending); 5276714d8e8SKurt Hackel 5286714d8e8SKurt Hackel /* remove from list (including ref) */ 5296714d8e8SKurt Hackel list_del_init(&lock->ast_list); 5306714d8e8SKurt Hackel dlm_lock_put(lock); 5316714d8e8SKurt Hackel spin_unlock(&dlm->ast_lock); 5326714d8e8SKurt Hackel 5336714d8e8SKurt Hackel if (lock->ml.node != dlm->node_num) { 5346714d8e8SKurt Hackel ret = dlm_do_remote_ast(dlm, res, lock); 5356714d8e8SKurt Hackel if (ret < 0) 5366714d8e8SKurt Hackel mlog_errno(ret); 5376714d8e8SKurt Hackel } else 5386714d8e8SKurt Hackel dlm_do_local_ast(dlm, res, lock); 5396714d8e8SKurt Hackel 5406714d8e8SKurt Hackel spin_lock(&dlm->ast_lock); 5416714d8e8SKurt Hackel 5426714d8e8SKurt Hackel /* possible that another ast was queued while 5436714d8e8SKurt Hackel * we were delivering the last one */ 5446714d8e8SKurt Hackel if (!list_empty(&lock->ast_list)) { 5456714d8e8SKurt Hackel mlog(0, "aha another ast got queued while " 5466714d8e8SKurt Hackel "we were finishing the last one. will " 5476714d8e8SKurt Hackel "keep the ast_pending flag set.\n"); 5486714d8e8SKurt Hackel } else 5496714d8e8SKurt Hackel lock->ast_pending = 0; 5506714d8e8SKurt Hackel 5516714d8e8SKurt Hackel /* drop the extra ref. 5526714d8e8SKurt Hackel * this may drop it completely. */ 5536714d8e8SKurt Hackel dlm_lock_put(lock); 5546714d8e8SKurt Hackel dlm_lockres_release_ast(dlm, res); 5556714d8e8SKurt Hackel } 5566714d8e8SKurt Hackel 5576714d8e8SKurt Hackel while (!list_empty(&dlm->pending_basts)) { 5586714d8e8SKurt Hackel lock = list_entry(dlm->pending_basts.next, 5596714d8e8SKurt Hackel struct dlm_lock, bast_list); 5606714d8e8SKurt Hackel /* get an extra ref on lock */ 5616714d8e8SKurt Hackel dlm_lock_get(lock); 5626714d8e8SKurt Hackel res = lock->lockres; 5636714d8e8SKurt Hackel 5646714d8e8SKurt Hackel BUG_ON(!lock->bast_pending); 5656714d8e8SKurt Hackel 5666714d8e8SKurt Hackel /* get the highest blocked lock, and reset */ 5676714d8e8SKurt Hackel spin_lock(&lock->spinlock); 5686714d8e8SKurt Hackel BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE); 5696714d8e8SKurt Hackel hi = lock->ml.highest_blocked; 5706714d8e8SKurt Hackel lock->ml.highest_blocked = LKM_IVMODE; 5716714d8e8SKurt Hackel spin_unlock(&lock->spinlock); 5726714d8e8SKurt Hackel 5736714d8e8SKurt Hackel /* remove from list (including ref) */ 5746714d8e8SKurt Hackel list_del_init(&lock->bast_list); 5756714d8e8SKurt Hackel dlm_lock_put(lock); 5766714d8e8SKurt Hackel spin_unlock(&dlm->ast_lock); 5776714d8e8SKurt Hackel 5786714d8e8SKurt Hackel mlog(0, "delivering a bast for this lockres " 5796714d8e8SKurt Hackel "(blocked = %d\n", hi); 5806714d8e8SKurt Hackel 5816714d8e8SKurt Hackel if (lock->ml.node != dlm->node_num) { 5826714d8e8SKurt Hackel ret = dlm_send_proxy_bast(dlm, res, lock, hi); 5836714d8e8SKurt Hackel if (ret < 0) 5846714d8e8SKurt Hackel mlog_errno(ret); 5856714d8e8SKurt Hackel } else 5866714d8e8SKurt Hackel dlm_do_local_bast(dlm, res, lock, hi); 5876714d8e8SKurt Hackel 5886714d8e8SKurt Hackel spin_lock(&dlm->ast_lock); 5896714d8e8SKurt Hackel 5906714d8e8SKurt Hackel /* possible that another bast was queued while 5916714d8e8SKurt Hackel * we were delivering the last one */ 5926714d8e8SKurt Hackel if (!list_empty(&lock->bast_list)) { 5936714d8e8SKurt Hackel mlog(0, "aha another bast got queued while " 5946714d8e8SKurt Hackel "we were finishing the last one. will " 5956714d8e8SKurt Hackel "keep the bast_pending flag set.\n"); 5966714d8e8SKurt Hackel } else 5976714d8e8SKurt Hackel lock->bast_pending = 0; 5986714d8e8SKurt Hackel 5996714d8e8SKurt Hackel /* drop the extra ref. 6006714d8e8SKurt Hackel * this may drop it completely. */ 6016714d8e8SKurt Hackel dlm_lock_put(lock); 6026714d8e8SKurt Hackel dlm_lockres_release_ast(dlm, res); 6036714d8e8SKurt Hackel } 6046714d8e8SKurt Hackel wake_up(&dlm->ast_wq); 6056714d8e8SKurt Hackel spin_unlock(&dlm->ast_lock); 6066714d8e8SKurt Hackel } 6076714d8e8SKurt Hackel 6086714d8e8SKurt Hackel 6096714d8e8SKurt Hackel #define DLM_THREAD_TIMEOUT_MS (4 * 1000) 6106714d8e8SKurt Hackel #define DLM_THREAD_MAX_DIRTY 100 6116714d8e8SKurt Hackel #define DLM_THREAD_MAX_ASTS 10 6126714d8e8SKurt Hackel 6136714d8e8SKurt Hackel static int dlm_thread(void *data) 6146714d8e8SKurt Hackel { 6156714d8e8SKurt Hackel struct dlm_lock_resource *res; 6166714d8e8SKurt Hackel struct dlm_ctxt *dlm = data; 6176714d8e8SKurt Hackel unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS); 6186714d8e8SKurt Hackel 6196714d8e8SKurt Hackel mlog(0, "dlm thread running for %s...\n", dlm->name); 6206714d8e8SKurt Hackel 6216714d8e8SKurt Hackel while (!kthread_should_stop()) { 6226714d8e8SKurt Hackel int n = DLM_THREAD_MAX_DIRTY; 6236714d8e8SKurt Hackel 6246714d8e8SKurt Hackel /* dlm_shutting_down is very point-in-time, but that 6256714d8e8SKurt Hackel * doesn't matter as we'll just loop back around if we 6266714d8e8SKurt Hackel * get false on the leading edge of a state 6276714d8e8SKurt Hackel * transition. */ 6286714d8e8SKurt Hackel dlm_run_purge_list(dlm, dlm_shutting_down(dlm)); 6296714d8e8SKurt Hackel 6306714d8e8SKurt Hackel /* We really don't want to hold dlm->spinlock while 6316714d8e8SKurt Hackel * calling dlm_shuffle_lists on each lockres that 6326714d8e8SKurt Hackel * needs to have its queues adjusted and AST/BASTs 6336714d8e8SKurt Hackel * run. So let's pull each entry off the dirty_list 6346714d8e8SKurt Hackel * and drop dlm->spinlock ASAP. Once off the list, 6356714d8e8SKurt Hackel * res->spinlock needs to be taken again to protect 6366714d8e8SKurt Hackel * the queues while calling dlm_shuffle_lists. */ 6376714d8e8SKurt Hackel spin_lock(&dlm->spinlock); 6386714d8e8SKurt Hackel while (!list_empty(&dlm->dirty_list)) { 6396714d8e8SKurt Hackel int delay = 0; 6406714d8e8SKurt Hackel res = list_entry(dlm->dirty_list.next, 6416714d8e8SKurt Hackel struct dlm_lock_resource, dirty); 6426714d8e8SKurt Hackel 6436714d8e8SKurt Hackel /* peel a lockres off, remove it from the list, 6446714d8e8SKurt Hackel * unset the dirty flag and drop the dlm lock */ 6456714d8e8SKurt Hackel BUG_ON(!res); 6466714d8e8SKurt Hackel dlm_lockres_get(res); 6476714d8e8SKurt Hackel 6486714d8e8SKurt Hackel spin_lock(&res->spinlock); 649ddc09c8dSKurt Hackel /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */ 6506714d8e8SKurt Hackel list_del_init(&res->dirty); 6516714d8e8SKurt Hackel spin_unlock(&res->spinlock); 6526714d8e8SKurt Hackel spin_unlock(&dlm->spinlock); 6536ff06a93SKurt Hackel /* Drop dirty_list ref */ 6546ff06a93SKurt Hackel dlm_lockres_put(res); 6556714d8e8SKurt Hackel 6566714d8e8SKurt Hackel /* lockres can be re-dirtied/re-added to the 6576714d8e8SKurt Hackel * dirty_list in this gap, but that is ok */ 6586714d8e8SKurt Hackel 6596714d8e8SKurt Hackel spin_lock(&res->spinlock); 6606714d8e8SKurt Hackel if (res->owner != dlm->node_num) { 6616714d8e8SKurt Hackel __dlm_print_one_lock_resource(res); 6626714d8e8SKurt Hackel mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n", 6636714d8e8SKurt Hackel res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no", 6646714d8e8SKurt Hackel res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no", 6656714d8e8SKurt Hackel res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no", 6666714d8e8SKurt Hackel res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); 6676714d8e8SKurt Hackel } 6686714d8e8SKurt Hackel BUG_ON(res->owner != dlm->node_num); 6696714d8e8SKurt Hackel 6706714d8e8SKurt Hackel /* it is now ok to move lockreses in these states 6716714d8e8SKurt Hackel * to the dirty list, assuming that they will only be 6726714d8e8SKurt Hackel * dirty for a short while. */ 673ddc09c8dSKurt Hackel BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 6746714d8e8SKurt Hackel if (res->state & (DLM_LOCK_RES_IN_PROGRESS | 6756714d8e8SKurt Hackel DLM_LOCK_RES_RECOVERING)) { 6766714d8e8SKurt Hackel /* move it to the tail and keep going */ 677ddc09c8dSKurt Hackel res->state &= ~DLM_LOCK_RES_DIRTY; 6786714d8e8SKurt Hackel spin_unlock(&res->spinlock); 6796714d8e8SKurt Hackel mlog(0, "delaying list shuffling for in-" 6806714d8e8SKurt Hackel "progress lockres %.*s, state=%d\n", 6816714d8e8SKurt Hackel res->lockname.len, res->lockname.name, 6826714d8e8SKurt Hackel res->state); 6836714d8e8SKurt Hackel delay = 1; 6846714d8e8SKurt Hackel goto in_progress; 6856714d8e8SKurt Hackel } 6866714d8e8SKurt Hackel 6876714d8e8SKurt Hackel /* at this point the lockres is not migrating/ 6886714d8e8SKurt Hackel * recovering/in-progress. we have the lockres 6896714d8e8SKurt Hackel * spinlock and do NOT have the dlm lock. 6906714d8e8SKurt Hackel * safe to reserve/queue asts and run the lists. */ 6916714d8e8SKurt Hackel 6928d79d088SKurt Hackel mlog(0, "calling dlm_shuffle_lists with dlm=%s, " 6938d79d088SKurt Hackel "res=%.*s\n", dlm->name, 6948d79d088SKurt Hackel res->lockname.len, res->lockname.name); 6956714d8e8SKurt Hackel 6966714d8e8SKurt Hackel /* called while holding lockres lock */ 6976714d8e8SKurt Hackel dlm_shuffle_lists(dlm, res); 698ddc09c8dSKurt Hackel res->state &= ~DLM_LOCK_RES_DIRTY; 6996714d8e8SKurt Hackel spin_unlock(&res->spinlock); 7006714d8e8SKurt Hackel 7016714d8e8SKurt Hackel dlm_lockres_calc_usage(dlm, res); 7026714d8e8SKurt Hackel 7036714d8e8SKurt Hackel in_progress: 7046714d8e8SKurt Hackel 7056714d8e8SKurt Hackel spin_lock(&dlm->spinlock); 7066714d8e8SKurt Hackel /* if the lock was in-progress, stick 7076714d8e8SKurt Hackel * it on the back of the list */ 7086714d8e8SKurt Hackel if (delay) { 7096714d8e8SKurt Hackel spin_lock(&res->spinlock); 710ddc09c8dSKurt Hackel __dlm_dirty_lockres(dlm, res); 7116714d8e8SKurt Hackel spin_unlock(&res->spinlock); 7126714d8e8SKurt Hackel } 7136714d8e8SKurt Hackel dlm_lockres_put(res); 7146714d8e8SKurt Hackel 7156714d8e8SKurt Hackel /* unlikely, but we may need to give time to 7166714d8e8SKurt Hackel * other tasks */ 7176714d8e8SKurt Hackel if (!--n) { 7186714d8e8SKurt Hackel mlog(0, "throttling dlm_thread\n"); 7196714d8e8SKurt Hackel break; 7206714d8e8SKurt Hackel } 7216714d8e8SKurt Hackel } 7226714d8e8SKurt Hackel 7236714d8e8SKurt Hackel spin_unlock(&dlm->spinlock); 7246714d8e8SKurt Hackel dlm_flush_asts(dlm); 7256714d8e8SKurt Hackel 7266714d8e8SKurt Hackel /* yield and continue right away if there is more work to do */ 7276714d8e8SKurt Hackel if (!n) { 728f85cd47aSKurt Hackel cond_resched(); 7296714d8e8SKurt Hackel continue; 7306714d8e8SKurt Hackel } 7316714d8e8SKurt Hackel 7326714d8e8SKurt Hackel wait_event_interruptible_timeout(dlm->dlm_thread_wq, 7336714d8e8SKurt Hackel !dlm_dirty_list_empty(dlm) || 7346714d8e8SKurt Hackel kthread_should_stop(), 7356714d8e8SKurt Hackel timeout); 7366714d8e8SKurt Hackel } 7376714d8e8SKurt Hackel 7386714d8e8SKurt Hackel mlog(0, "quitting DLM thread\n"); 7396714d8e8SKurt Hackel return 0; 7406714d8e8SKurt Hackel } 741