xref: /openbmc/linux/fs/ocfs2/dlm/dlmthread.c (revision b04b4f78)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmthread.c
5  *
6  * standalone DLM module
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26 
27 
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
43 
44 
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
48 
49 #include "dlmapi.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
52 
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
54 #include "cluster/masklog.h"
55 
56 static int dlm_thread(void *data);
57 static void dlm_flush_asts(struct dlm_ctxt *dlm);
58 
59 #define dlm_lock_is_remote(dlm, lock)     ((lock)->ml.node != (dlm)->node_num)
60 
61 /* will exit holding res->spinlock, but may drop in function */
62 /* waits until flags are cleared on res->state */
63 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
64 {
65 	DECLARE_WAITQUEUE(wait, current);
66 
67 	assert_spin_locked(&res->spinlock);
68 
69 	add_wait_queue(&res->wq, &wait);
70 repeat:
71 	set_current_state(TASK_UNINTERRUPTIBLE);
72 	if (res->state & flags) {
73 		spin_unlock(&res->spinlock);
74 		schedule();
75 		spin_lock(&res->spinlock);
76 		goto repeat;
77 	}
78 	remove_wait_queue(&res->wq, &wait);
79 	__set_current_state(TASK_RUNNING);
80 }
81 
82 int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
83 {
84 	if (list_empty(&res->granted) &&
85 	    list_empty(&res->converting) &&
86 	    list_empty(&res->blocked))
87 		return 0;
88 	return 1;
89 }
90 
91 /* "unused": the lockres has no locks, is not on the dirty list,
92  * has no inflight locks (in the gap between mastery and acquiring
93  * the first lock), and has no bits in its refmap.
94  * truly ready to be freed. */
95 int __dlm_lockres_unused(struct dlm_lock_resource *res)
96 {
97 	if (!__dlm_lockres_has_locks(res) &&
98 	    (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
99 		/* try not to scan the bitmap unless the first two
100 		 * conditions are already true */
101 		int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
102 		if (bit >= O2NM_MAX_NODES) {
103 			/* since the bit for dlm->node_num is not
104 			 * set, inflight_locks better be zero */
105 			BUG_ON(res->inflight_locks != 0);
106 			return 1;
107 		}
108 	}
109 	return 0;
110 }
111 
112 
113 /* Call whenever you may have added or deleted something from one of
114  * the lockres queue's. This will figure out whether it belongs on the
115  * unused list or not and does the appropriate thing. */
116 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
117 			      struct dlm_lock_resource *res)
118 {
119 	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
120 
121 	assert_spin_locked(&dlm->spinlock);
122 	assert_spin_locked(&res->spinlock);
123 
124 	if (__dlm_lockres_unused(res)){
125 		if (list_empty(&res->purge)) {
126 			mlog(0, "putting lockres %.*s:%p onto purge list\n",
127 			     res->lockname.len, res->lockname.name, res);
128 
129 			res->last_used = jiffies;
130 			dlm_lockres_get(res);
131 			list_add_tail(&res->purge, &dlm->purge_list);
132 			dlm->purge_count++;
133 		}
134 	} else if (!list_empty(&res->purge)) {
135 		mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n",
136 		     res->lockname.len, res->lockname.name, res, res->owner);
137 
138 		list_del_init(&res->purge);
139 		dlm_lockres_put(res);
140 		dlm->purge_count--;
141 	}
142 }
143 
144 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
145 			    struct dlm_lock_resource *res)
146 {
147 	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
148 	spin_lock(&dlm->spinlock);
149 	spin_lock(&res->spinlock);
150 
151 	__dlm_lockres_calc_usage(dlm, res);
152 
153 	spin_unlock(&res->spinlock);
154 	spin_unlock(&dlm->spinlock);
155 }
156 
157 static int dlm_purge_lockres(struct dlm_ctxt *dlm,
158 			     struct dlm_lock_resource *res)
159 {
160 	int master;
161 	int ret = 0;
162 
163 	spin_lock(&res->spinlock);
164 	if (!__dlm_lockres_unused(res)) {
165 		mlog(0, "%s:%.*s: tried to purge but not unused\n",
166 		     dlm->name, res->lockname.len, res->lockname.name);
167 		__dlm_print_one_lock_resource(res);
168 		spin_unlock(&res->spinlock);
169 		BUG();
170 	}
171 
172 	if (res->state & DLM_LOCK_RES_MIGRATING) {
173 		mlog(0, "%s:%.*s: Delay dropref as this lockres is "
174 		     "being remastered\n", dlm->name, res->lockname.len,
175 		     res->lockname.name);
176 		/* Re-add the lockres to the end of the purge list */
177 		if (!list_empty(&res->purge)) {
178 			list_del_init(&res->purge);
179 			list_add_tail(&res->purge, &dlm->purge_list);
180 		}
181 		spin_unlock(&res->spinlock);
182 		return 0;
183 	}
184 
185 	master = (res->owner == dlm->node_num);
186 
187 	if (!master)
188 		res->state |= DLM_LOCK_RES_DROPPING_REF;
189 	spin_unlock(&res->spinlock);
190 
191 	mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
192 	     res->lockname.name, master);
193 
194 	if (!master) {
195 		/* drop spinlock...  retake below */
196 		spin_unlock(&dlm->spinlock);
197 
198 		spin_lock(&res->spinlock);
199 		/* This ensures that clear refmap is sent after the set */
200 		__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
201 		spin_unlock(&res->spinlock);
202 
203 		/* clear our bit from the master's refmap, ignore errors */
204 		ret = dlm_drop_lockres_ref(dlm, res);
205 		if (ret < 0) {
206 			mlog_errno(ret);
207 			if (!dlm_is_host_down(ret))
208 				BUG();
209 		}
210 		mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
211 		     dlm->name, res->lockname.len, res->lockname.name, ret);
212 		spin_lock(&dlm->spinlock);
213 	}
214 
215 	if (!list_empty(&res->purge)) {
216 		mlog(0, "removing lockres %.*s:%p from purgelist, "
217 		     "master = %d\n", res->lockname.len, res->lockname.name,
218 		     res, master);
219 		list_del_init(&res->purge);
220 		dlm_lockres_put(res);
221 		dlm->purge_count--;
222 	}
223 	__dlm_unhash_lockres(res);
224 
225 	/* lockres is not in the hash now.  drop the flag and wake up
226 	 * any processes waiting in dlm_get_lock_resource. */
227 	if (!master) {
228 		spin_lock(&res->spinlock);
229 		res->state &= ~DLM_LOCK_RES_DROPPING_REF;
230 		spin_unlock(&res->spinlock);
231 		wake_up(&res->wq);
232 	}
233 	return 0;
234 }
235 
236 static void dlm_run_purge_list(struct dlm_ctxt *dlm,
237 			       int purge_now)
238 {
239 	unsigned int run_max, unused;
240 	unsigned long purge_jiffies;
241 	struct dlm_lock_resource *lockres;
242 
243 	spin_lock(&dlm->spinlock);
244 	run_max = dlm->purge_count;
245 
246 	while(run_max && !list_empty(&dlm->purge_list)) {
247 		run_max--;
248 
249 		lockres = list_entry(dlm->purge_list.next,
250 				     struct dlm_lock_resource, purge);
251 
252 		/* Status of the lockres *might* change so double
253 		 * check. If the lockres is unused, holding the dlm
254 		 * spinlock will prevent people from getting and more
255 		 * refs on it -- there's no need to keep the lockres
256 		 * spinlock. */
257 		spin_lock(&lockres->spinlock);
258 		unused = __dlm_lockres_unused(lockres);
259 		spin_unlock(&lockres->spinlock);
260 
261 		if (!unused)
262 			continue;
263 
264 		purge_jiffies = lockres->last_used +
265 			msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
266 
267 		/* Make sure that we want to be processing this guy at
268 		 * this time. */
269 		if (!purge_now && time_after(purge_jiffies, jiffies)) {
270 			/* Since resources are added to the purge list
271 			 * in tail order, we can stop at the first
272 			 * unpurgable resource -- anyone added after
273 			 * him will have a greater last_used value */
274 			break;
275 		}
276 
277 		dlm_lockres_get(lockres);
278 
279 		/* This may drop and reacquire the dlm spinlock if it
280 		 * has to do migration. */
281 		if (dlm_purge_lockres(dlm, lockres))
282 			BUG();
283 
284 		dlm_lockres_put(lockres);
285 
286 		/* Avoid adding any scheduling latencies */
287 		cond_resched_lock(&dlm->spinlock);
288 	}
289 
290 	spin_unlock(&dlm->spinlock);
291 }
292 
293 static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
294 			      struct dlm_lock_resource *res)
295 {
296 	struct dlm_lock *lock, *target;
297 	struct list_head *iter;
298 	struct list_head *head;
299 	int can_grant = 1;
300 
301 	//mlog(0, "res->lockname.len=%d\n", res->lockname.len);
302 	//mlog(0, "res->lockname.name=%p\n", res->lockname.name);
303 	//mlog(0, "shuffle res %.*s\n", res->lockname.len,
304 	//	  res->lockname.name);
305 
306 	/* because this function is called with the lockres
307 	 * spinlock, and because we know that it is not migrating/
308 	 * recovering/in-progress, it is fine to reserve asts and
309 	 * basts right before queueing them all throughout */
310 	assert_spin_locked(&res->spinlock);
311 	BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
312 			      DLM_LOCK_RES_RECOVERING|
313 			      DLM_LOCK_RES_IN_PROGRESS)));
314 
315 converting:
316 	if (list_empty(&res->converting))
317 		goto blocked;
318 	mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len,
319 	     res->lockname.name);
320 
321 	target = list_entry(res->converting.next, struct dlm_lock, list);
322 	if (target->ml.convert_type == LKM_IVMODE) {
323 		mlog(ML_ERROR, "%.*s: converting a lock with no "
324 		     "convert_type!\n", res->lockname.len, res->lockname.name);
325 		BUG();
326 	}
327 	head = &res->granted;
328 	list_for_each(iter, head) {
329 		lock = list_entry(iter, struct dlm_lock, list);
330 		if (lock==target)
331 			continue;
332 		if (!dlm_lock_compatible(lock->ml.type,
333 					 target->ml.convert_type)) {
334 			can_grant = 0;
335 			/* queue the BAST if not already */
336 			if (lock->ml.highest_blocked == LKM_IVMODE) {
337 				__dlm_lockres_reserve_ast(res);
338 				dlm_queue_bast(dlm, lock);
339 			}
340 			/* update the highest_blocked if needed */
341 			if (lock->ml.highest_blocked < target->ml.convert_type)
342 				lock->ml.highest_blocked =
343 					target->ml.convert_type;
344 		}
345 	}
346 	head = &res->converting;
347 	list_for_each(iter, head) {
348 		lock = list_entry(iter, struct dlm_lock, list);
349 		if (lock==target)
350 			continue;
351 		if (!dlm_lock_compatible(lock->ml.type,
352 					 target->ml.convert_type)) {
353 			can_grant = 0;
354 			if (lock->ml.highest_blocked == LKM_IVMODE) {
355 				__dlm_lockres_reserve_ast(res);
356 				dlm_queue_bast(dlm, lock);
357 			}
358 			if (lock->ml.highest_blocked < target->ml.convert_type)
359 				lock->ml.highest_blocked =
360 					target->ml.convert_type;
361 		}
362 	}
363 
364 	/* we can convert the lock */
365 	if (can_grant) {
366 		spin_lock(&target->spinlock);
367 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
368 
369 		mlog(0, "calling ast for converting lock: %.*s, have: %d, "
370 		     "granting: %d, node: %u\n", res->lockname.len,
371 		     res->lockname.name, target->ml.type,
372 		     target->ml.convert_type, target->ml.node);
373 
374 		target->ml.type = target->ml.convert_type;
375 		target->ml.convert_type = LKM_IVMODE;
376 		list_move_tail(&target->list, &res->granted);
377 
378 		BUG_ON(!target->lksb);
379 		target->lksb->status = DLM_NORMAL;
380 
381 		spin_unlock(&target->spinlock);
382 
383 		__dlm_lockres_reserve_ast(res);
384 		dlm_queue_ast(dlm, target);
385 		/* go back and check for more */
386 		goto converting;
387 	}
388 
389 blocked:
390 	if (list_empty(&res->blocked))
391 		goto leave;
392 	target = list_entry(res->blocked.next, struct dlm_lock, list);
393 
394 	head = &res->granted;
395 	list_for_each(iter, head) {
396 		lock = list_entry(iter, struct dlm_lock, list);
397 		if (lock==target)
398 			continue;
399 		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
400 			can_grant = 0;
401 			if (lock->ml.highest_blocked == LKM_IVMODE) {
402 				__dlm_lockres_reserve_ast(res);
403 				dlm_queue_bast(dlm, lock);
404 			}
405 			if (lock->ml.highest_blocked < target->ml.type)
406 				lock->ml.highest_blocked = target->ml.type;
407 		}
408 	}
409 
410 	head = &res->converting;
411 	list_for_each(iter, head) {
412 		lock = list_entry(iter, struct dlm_lock, list);
413 		if (lock==target)
414 			continue;
415 		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
416 			can_grant = 0;
417 			if (lock->ml.highest_blocked == LKM_IVMODE) {
418 				__dlm_lockres_reserve_ast(res);
419 				dlm_queue_bast(dlm, lock);
420 			}
421 			if (lock->ml.highest_blocked < target->ml.type)
422 				lock->ml.highest_blocked = target->ml.type;
423 		}
424 	}
425 
426 	/* we can grant the blocked lock (only
427 	 * possible if converting list empty) */
428 	if (can_grant) {
429 		spin_lock(&target->spinlock);
430 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
431 
432 		mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
433 		     "node: %u\n", res->lockname.len, res->lockname.name,
434 		     target->ml.type, target->ml.node);
435 
436 		// target->ml.type is already correct
437 		list_move_tail(&target->list, &res->granted);
438 
439 		BUG_ON(!target->lksb);
440 		target->lksb->status = DLM_NORMAL;
441 
442 		spin_unlock(&target->spinlock);
443 
444 		__dlm_lockres_reserve_ast(res);
445 		dlm_queue_ast(dlm, target);
446 		/* go back and check for more */
447 		goto converting;
448 	}
449 
450 leave:
451 	return;
452 }
453 
454 /* must have NO locks when calling this with res !=NULL * */
455 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
456 {
457 	mlog_entry("dlm=%p, res=%p\n", dlm, res);
458 	if (res) {
459 		spin_lock(&dlm->spinlock);
460 		spin_lock(&res->spinlock);
461 		__dlm_dirty_lockres(dlm, res);
462 		spin_unlock(&res->spinlock);
463 		spin_unlock(&dlm->spinlock);
464 	}
465 	wake_up(&dlm->dlm_thread_wq);
466 }
467 
468 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
469 {
470 	mlog_entry("dlm=%p, res=%p\n", dlm, res);
471 
472 	assert_spin_locked(&dlm->spinlock);
473 	assert_spin_locked(&res->spinlock);
474 
475 	/* don't shuffle secondary queues */
476 	if ((res->owner == dlm->node_num)) {
477 		if (res->state & (DLM_LOCK_RES_MIGRATING |
478 				  DLM_LOCK_RES_BLOCK_DIRTY))
479 		    return;
480 
481 		if (list_empty(&res->dirty)) {
482 			/* ref for dirty_list */
483 			dlm_lockres_get(res);
484 			list_add_tail(&res->dirty, &dlm->dirty_list);
485 			res->state |= DLM_LOCK_RES_DIRTY;
486 		}
487 	}
488 }
489 
490 
491 /* Launch the NM thread for the mounted volume */
492 int dlm_launch_thread(struct dlm_ctxt *dlm)
493 {
494 	mlog(0, "starting dlm thread...\n");
495 
496 	dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
497 	if (IS_ERR(dlm->dlm_thread_task)) {
498 		mlog_errno(PTR_ERR(dlm->dlm_thread_task));
499 		dlm->dlm_thread_task = NULL;
500 		return -EINVAL;
501 	}
502 
503 	return 0;
504 }
505 
506 void dlm_complete_thread(struct dlm_ctxt *dlm)
507 {
508 	if (dlm->dlm_thread_task) {
509 		mlog(ML_KTHREAD, "waiting for dlm thread to exit\n");
510 		kthread_stop(dlm->dlm_thread_task);
511 		dlm->dlm_thread_task = NULL;
512 	}
513 }
514 
515 static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
516 {
517 	int empty;
518 
519 	spin_lock(&dlm->spinlock);
520 	empty = list_empty(&dlm->dirty_list);
521 	spin_unlock(&dlm->spinlock);
522 
523 	return empty;
524 }
525 
526 static void dlm_flush_asts(struct dlm_ctxt *dlm)
527 {
528 	int ret;
529 	struct dlm_lock *lock;
530 	struct dlm_lock_resource *res;
531 	u8 hi;
532 
533 	spin_lock(&dlm->ast_lock);
534 	while (!list_empty(&dlm->pending_asts)) {
535 		lock = list_entry(dlm->pending_asts.next,
536 				  struct dlm_lock, ast_list);
537 		/* get an extra ref on lock */
538 		dlm_lock_get(lock);
539 		res = lock->lockres;
540 		mlog(0, "delivering an ast for this lockres\n");
541 
542 		BUG_ON(!lock->ast_pending);
543 
544 		/* remove from list (including ref) */
545 		list_del_init(&lock->ast_list);
546 		dlm_lock_put(lock);
547 		spin_unlock(&dlm->ast_lock);
548 
549 		if (lock->ml.node != dlm->node_num) {
550 			ret = dlm_do_remote_ast(dlm, res, lock);
551 			if (ret < 0)
552 				mlog_errno(ret);
553 		} else
554 			dlm_do_local_ast(dlm, res, lock);
555 
556 		spin_lock(&dlm->ast_lock);
557 
558 		/* possible that another ast was queued while
559 		 * we were delivering the last one */
560 		if (!list_empty(&lock->ast_list)) {
561 			mlog(0, "aha another ast got queued while "
562 			     "we were finishing the last one.  will "
563 			     "keep the ast_pending flag set.\n");
564 		} else
565 			lock->ast_pending = 0;
566 
567 		/* drop the extra ref.
568 		 * this may drop it completely. */
569 		dlm_lock_put(lock);
570 		dlm_lockres_release_ast(dlm, res);
571 	}
572 
573 	while (!list_empty(&dlm->pending_basts)) {
574 		lock = list_entry(dlm->pending_basts.next,
575 				  struct dlm_lock, bast_list);
576 		/* get an extra ref on lock */
577 		dlm_lock_get(lock);
578 		res = lock->lockres;
579 
580 		BUG_ON(!lock->bast_pending);
581 
582 		/* get the highest blocked lock, and reset */
583 		spin_lock(&lock->spinlock);
584 		BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
585 		hi = lock->ml.highest_blocked;
586 		lock->ml.highest_blocked = LKM_IVMODE;
587 		spin_unlock(&lock->spinlock);
588 
589 		/* remove from list (including ref) */
590 		list_del_init(&lock->bast_list);
591 		dlm_lock_put(lock);
592 		spin_unlock(&dlm->ast_lock);
593 
594 		mlog(0, "delivering a bast for this lockres "
595 		     "(blocked = %d\n", hi);
596 
597 		if (lock->ml.node != dlm->node_num) {
598 			ret = dlm_send_proxy_bast(dlm, res, lock, hi);
599 			if (ret < 0)
600 				mlog_errno(ret);
601 		} else
602 			dlm_do_local_bast(dlm, res, lock, hi);
603 
604 		spin_lock(&dlm->ast_lock);
605 
606 		/* possible that another bast was queued while
607 		 * we were delivering the last one */
608 		if (!list_empty(&lock->bast_list)) {
609 			mlog(0, "aha another bast got queued while "
610 			     "we were finishing the last one.  will "
611 			     "keep the bast_pending flag set.\n");
612 		} else
613 			lock->bast_pending = 0;
614 
615 		/* drop the extra ref.
616 		 * this may drop it completely. */
617 		dlm_lock_put(lock);
618 		dlm_lockres_release_ast(dlm, res);
619 	}
620 	wake_up(&dlm->ast_wq);
621 	spin_unlock(&dlm->ast_lock);
622 }
623 
624 
625 #define DLM_THREAD_TIMEOUT_MS (4 * 1000)
626 #define DLM_THREAD_MAX_DIRTY  100
627 #define DLM_THREAD_MAX_ASTS   10
628 
629 static int dlm_thread(void *data)
630 {
631 	struct dlm_lock_resource *res;
632 	struct dlm_ctxt *dlm = data;
633 	unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
634 
635 	mlog(0, "dlm thread running for %s...\n", dlm->name);
636 
637 	while (!kthread_should_stop()) {
638 		int n = DLM_THREAD_MAX_DIRTY;
639 
640 		/* dlm_shutting_down is very point-in-time, but that
641 		 * doesn't matter as we'll just loop back around if we
642 		 * get false on the leading edge of a state
643 		 * transition. */
644 		dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
645 
646 		/* We really don't want to hold dlm->spinlock while
647 		 * calling dlm_shuffle_lists on each lockres that
648 		 * needs to have its queues adjusted and AST/BASTs
649 		 * run.  So let's pull each entry off the dirty_list
650 		 * and drop dlm->spinlock ASAP.  Once off the list,
651 		 * res->spinlock needs to be taken again to protect
652 		 * the queues while calling dlm_shuffle_lists.  */
653 		spin_lock(&dlm->spinlock);
654 		while (!list_empty(&dlm->dirty_list)) {
655 			int delay = 0;
656 			res = list_entry(dlm->dirty_list.next,
657 					 struct dlm_lock_resource, dirty);
658 
659 			/* peel a lockres off, remove it from the list,
660 			 * unset the dirty flag and drop the dlm lock */
661 			BUG_ON(!res);
662 			dlm_lockres_get(res);
663 
664 			spin_lock(&res->spinlock);
665 			/* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
666 			list_del_init(&res->dirty);
667 			spin_unlock(&res->spinlock);
668 			spin_unlock(&dlm->spinlock);
669 			/* Drop dirty_list ref */
670 			dlm_lockres_put(res);
671 
672 		 	/* lockres can be re-dirtied/re-added to the
673 			 * dirty_list in this gap, but that is ok */
674 
675 			spin_lock(&res->spinlock);
676 			if (res->owner != dlm->node_num) {
677 				__dlm_print_one_lock_resource(res);
678 				mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n",
679 				     res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no",
680 				     res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no",
681 				     res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no",
682 				     res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
683 			}
684 			BUG_ON(res->owner != dlm->node_num);
685 
686 			/* it is now ok to move lockreses in these states
687 			 * to the dirty list, assuming that they will only be
688 			 * dirty for a short while. */
689 			BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
690 			if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
691 					  DLM_LOCK_RES_RECOVERING)) {
692 				/* move it to the tail and keep going */
693 				res->state &= ~DLM_LOCK_RES_DIRTY;
694 				spin_unlock(&res->spinlock);
695 				mlog(0, "delaying list shuffling for in-"
696 				     "progress lockres %.*s, state=%d\n",
697 				     res->lockname.len, res->lockname.name,
698 				     res->state);
699 				delay = 1;
700 				goto in_progress;
701 			}
702 
703 			/* at this point the lockres is not migrating/
704 			 * recovering/in-progress.  we have the lockres
705 			 * spinlock and do NOT have the dlm lock.
706 			 * safe to reserve/queue asts and run the lists. */
707 
708 			mlog(0, "calling dlm_shuffle_lists with dlm=%s, "
709 			     "res=%.*s\n", dlm->name,
710 			     res->lockname.len, res->lockname.name);
711 
712 			/* called while holding lockres lock */
713 			dlm_shuffle_lists(dlm, res);
714 			res->state &= ~DLM_LOCK_RES_DIRTY;
715 			spin_unlock(&res->spinlock);
716 
717 			dlm_lockres_calc_usage(dlm, res);
718 
719 in_progress:
720 
721 			spin_lock(&dlm->spinlock);
722 			/* if the lock was in-progress, stick
723 			 * it on the back of the list */
724 			if (delay) {
725 				spin_lock(&res->spinlock);
726 				__dlm_dirty_lockres(dlm, res);
727 				spin_unlock(&res->spinlock);
728 			}
729 			dlm_lockres_put(res);
730 
731 			/* unlikely, but we may need to give time to
732 			 * other tasks */
733 			if (!--n) {
734 				mlog(0, "throttling dlm_thread\n");
735 				break;
736 			}
737 		}
738 
739 		spin_unlock(&dlm->spinlock);
740 		dlm_flush_asts(dlm);
741 
742 		/* yield and continue right away if there is more work to do */
743 		if (!n) {
744 			cond_resched();
745 			continue;
746 		}
747 
748 		wait_event_interruptible_timeout(dlm->dlm_thread_wq,
749 						 !dlm_dirty_list_empty(dlm) ||
750 						 kthread_should_stop(),
751 						 timeout);
752 	}
753 
754 	mlog(0, "quitting DLM thread\n");
755 	return 0;
756 }
757