xref: /openbmc/linux/fs/dlm/lock.c (revision 278002edb19bce2c628fafb0af936e77000f3a5b)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /******************************************************************************
3  *******************************************************************************
4  **
5  **  Copyright (C) 2005-2010 Red Hat, Inc.  All rights reserved.
6  **
7  **
8  *******************************************************************************
9  ******************************************************************************/
10  
11  /* Central locking logic has four stages:
12  
13     dlm_lock()
14     dlm_unlock()
15  
16     request_lock(ls, lkb)
17     convert_lock(ls, lkb)
18     unlock_lock(ls, lkb)
19     cancel_lock(ls, lkb)
20  
21     _request_lock(r, lkb)
22     _convert_lock(r, lkb)
23     _unlock_lock(r, lkb)
24     _cancel_lock(r, lkb)
25  
26     do_request(r, lkb)
27     do_convert(r, lkb)
28     do_unlock(r, lkb)
29     do_cancel(r, lkb)
30  
31     Stage 1 (lock, unlock) is mainly about checking input args and
32     splitting into one of the four main operations:
33  
34         dlm_lock          = request_lock
35         dlm_lock+CONVERT  = convert_lock
36         dlm_unlock        = unlock_lock
37         dlm_unlock+CANCEL = cancel_lock
38  
39     Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
40     provided to the next stage.
41  
42     Stage 3, _xxxx_lock(), determines if the operation is local or remote.
43     When remote, it calls send_xxxx(), when local it calls do_xxxx().
44  
45     Stage 4, do_xxxx(), is the guts of the operation.  It manipulates the
46     given rsb and lkb and queues callbacks.
47  
48     For remote operations, send_xxxx() results in the corresponding do_xxxx()
49     function being executed on the remote node.  The connecting send/receive
50     calls on local (L) and remote (R) nodes:
51  
52     L: send_xxxx()              ->  R: receive_xxxx()
53                                     R: do_xxxx()
54     L: receive_xxxx_reply()     <-  R: send_xxxx_reply()
55  */
56  #include <trace/events/dlm.h>
57  
58  #include <linux/types.h>
59  #include <linux/rbtree.h>
60  #include <linux/slab.h>
61  #include "dlm_internal.h"
62  #include <linux/dlm_device.h>
63  #include "memory.h"
64  #include "midcomms.h"
65  #include "requestqueue.h"
66  #include "util.h"
67  #include "dir.h"
68  #include "member.h"
69  #include "lockspace.h"
70  #include "ast.h"
71  #include "lock.h"
72  #include "rcom.h"
73  #include "recover.h"
74  #include "lvb_table.h"
75  #include "user.h"
76  #include "config.h"
77  
78  static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
79  static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
80  static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
81  static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
82  static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
83  static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
84  static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
85  static int send_remove(struct dlm_rsb *r);
86  static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
87  static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
88  static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
89  				    const struct dlm_message *ms, bool local);
90  static int receive_extralen(const struct dlm_message *ms);
91  static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
92  static void toss_rsb(struct kref *kref);
93  
94  /*
95   * Lock compatibilty matrix - thanks Steve
96   * UN = Unlocked state. Not really a state, used as a flag
97   * PD = Padding. Used to make the matrix a nice power of two in size
98   * Other states are the same as the VMS DLM.
99   * Usage: matrix[grmode+1][rqmode+1]  (although m[rq+1][gr+1] is the same)
100   */
101  
102  static const int __dlm_compat_matrix[8][8] = {
103        /* UN NL CR CW PR PW EX PD */
104          {1, 1, 1, 1, 1, 1, 1, 0},       /* UN */
105          {1, 1, 1, 1, 1, 1, 1, 0},       /* NL */
106          {1, 1, 1, 1, 1, 1, 0, 0},       /* CR */
107          {1, 1, 1, 1, 0, 0, 0, 0},       /* CW */
108          {1, 1, 1, 0, 1, 0, 0, 0},       /* PR */
109          {1, 1, 1, 0, 0, 0, 0, 0},       /* PW */
110          {1, 1, 0, 0, 0, 0, 0, 0},       /* EX */
111          {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
112  };
113  
114  /*
115   * This defines the direction of transfer of LVB data.
116   * Granted mode is the row; requested mode is the column.
117   * Usage: matrix[grmode+1][rqmode+1]
118   * 1 = LVB is returned to the caller
119   * 0 = LVB is written to the resource
120   * -1 = nothing happens to the LVB
121   */
122  
123  const int dlm_lvb_operations[8][8] = {
124          /* UN   NL  CR  CW  PR  PW  EX  PD*/
125          {  -1,  1,  1,  1,  1,  1,  1, -1 }, /* UN */
126          {  -1,  1,  1,  1,  1,  1,  1,  0 }, /* NL */
127          {  -1, -1,  1,  1,  1,  1,  1,  0 }, /* CR */
128          {  -1, -1, -1,  1,  1,  1,  1,  0 }, /* CW */
129          {  -1, -1, -1, -1,  1,  1,  1,  0 }, /* PR */
130          {  -1,  0,  0,  0,  0,  0,  1,  0 }, /* PW */
131          {  -1,  0,  0,  0,  0,  0,  0,  0 }, /* EX */
132          {  -1,  0,  0,  0,  0,  0,  0,  0 }  /* PD */
133  };
134  
135  #define modes_compat(gr, rq) \
136  	__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
137  
dlm_modes_compat(int mode1,int mode2)138  int dlm_modes_compat(int mode1, int mode2)
139  {
140  	return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
141  }
142  
143  /*
144   * Compatibility matrix for conversions with QUECVT set.
145   * Granted mode is the row; requested mode is the column.
146   * Usage: matrix[grmode+1][rqmode+1]
147   */
148  
149  static const int __quecvt_compat_matrix[8][8] = {
150        /* UN NL CR CW PR PW EX PD */
151          {0, 0, 0, 0, 0, 0, 0, 0},       /* UN */
152          {0, 0, 1, 1, 1, 1, 1, 0},       /* NL */
153          {0, 0, 0, 1, 1, 1, 1, 0},       /* CR */
154          {0, 0, 0, 0, 1, 1, 1, 0},       /* CW */
155          {0, 0, 0, 1, 0, 1, 1, 0},       /* PR */
156          {0, 0, 0, 0, 0, 0, 1, 0},       /* PW */
157          {0, 0, 0, 0, 0, 0, 0, 0},       /* EX */
158          {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
159  };
160  
dlm_print_lkb(struct dlm_lkb * lkb)161  void dlm_print_lkb(struct dlm_lkb *lkb)
162  {
163  	printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
164  	       "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
165  	       lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
166  	       dlm_iflags_val(lkb), lkb->lkb_status, lkb->lkb_rqmode,
167  	       lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
168  	       (unsigned long long)lkb->lkb_recover_seq);
169  }
170  
dlm_print_rsb(struct dlm_rsb * r)171  static void dlm_print_rsb(struct dlm_rsb *r)
172  {
173  	printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
174  	       "rlc %d name %s\n",
175  	       r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
176  	       r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
177  	       r->res_name);
178  }
179  
dlm_dump_rsb(struct dlm_rsb * r)180  void dlm_dump_rsb(struct dlm_rsb *r)
181  {
182  	struct dlm_lkb *lkb;
183  
184  	dlm_print_rsb(r);
185  
186  	printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
187  	       list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
188  	printk(KERN_ERR "rsb lookup list\n");
189  	list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
190  		dlm_print_lkb(lkb);
191  	printk(KERN_ERR "rsb grant queue:\n");
192  	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
193  		dlm_print_lkb(lkb);
194  	printk(KERN_ERR "rsb convert queue:\n");
195  	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
196  		dlm_print_lkb(lkb);
197  	printk(KERN_ERR "rsb wait queue:\n");
198  	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
199  		dlm_print_lkb(lkb);
200  }
201  
202  /* Threads cannot use the lockspace while it's being recovered */
203  
dlm_lock_recovery(struct dlm_ls * ls)204  static inline void dlm_lock_recovery(struct dlm_ls *ls)
205  {
206  	down_read(&ls->ls_in_recovery);
207  }
208  
dlm_unlock_recovery(struct dlm_ls * ls)209  void dlm_unlock_recovery(struct dlm_ls *ls)
210  {
211  	up_read(&ls->ls_in_recovery);
212  }
213  
dlm_lock_recovery_try(struct dlm_ls * ls)214  int dlm_lock_recovery_try(struct dlm_ls *ls)
215  {
216  	return down_read_trylock(&ls->ls_in_recovery);
217  }
218  
can_be_queued(struct dlm_lkb * lkb)219  static inline int can_be_queued(struct dlm_lkb *lkb)
220  {
221  	return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
222  }
223  
force_blocking_asts(struct dlm_lkb * lkb)224  static inline int force_blocking_asts(struct dlm_lkb *lkb)
225  {
226  	return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
227  }
228  
is_demoted(struct dlm_lkb * lkb)229  static inline int is_demoted(struct dlm_lkb *lkb)
230  {
231  	return test_bit(DLM_SBF_DEMOTED_BIT, &lkb->lkb_sbflags);
232  }
233  
is_altmode(struct dlm_lkb * lkb)234  static inline int is_altmode(struct dlm_lkb *lkb)
235  {
236  	return test_bit(DLM_SBF_ALTMODE_BIT, &lkb->lkb_sbflags);
237  }
238  
is_granted(struct dlm_lkb * lkb)239  static inline int is_granted(struct dlm_lkb *lkb)
240  {
241  	return (lkb->lkb_status == DLM_LKSTS_GRANTED);
242  }
243  
is_remote(struct dlm_rsb * r)244  static inline int is_remote(struct dlm_rsb *r)
245  {
246  	DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
247  	return !!r->res_nodeid;
248  }
249  
is_process_copy(struct dlm_lkb * lkb)250  static inline int is_process_copy(struct dlm_lkb *lkb)
251  {
252  	return lkb->lkb_nodeid &&
253  	       !test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags);
254  }
255  
is_master_copy(struct dlm_lkb * lkb)256  static inline int is_master_copy(struct dlm_lkb *lkb)
257  {
258  	return test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags);
259  }
260  
middle_conversion(struct dlm_lkb * lkb)261  static inline int middle_conversion(struct dlm_lkb *lkb)
262  {
263  	if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
264  	    (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
265  		return 1;
266  	return 0;
267  }
268  
down_conversion(struct dlm_lkb * lkb)269  static inline int down_conversion(struct dlm_lkb *lkb)
270  {
271  	return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
272  }
273  
is_overlap_unlock(struct dlm_lkb * lkb)274  static inline int is_overlap_unlock(struct dlm_lkb *lkb)
275  {
276  	return test_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags);
277  }
278  
is_overlap_cancel(struct dlm_lkb * lkb)279  static inline int is_overlap_cancel(struct dlm_lkb *lkb)
280  {
281  	return test_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
282  }
283  
is_overlap(struct dlm_lkb * lkb)284  static inline int is_overlap(struct dlm_lkb *lkb)
285  {
286  	return test_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags) ||
287  	       test_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
288  }
289  
queue_cast(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)290  static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
291  {
292  	if (is_master_copy(lkb))
293  		return;
294  
295  	DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
296  
297  	if (rv == -DLM_ECANCEL &&
298  	    test_and_clear_bit(DLM_IFL_DEADLOCK_CANCEL_BIT, &lkb->lkb_iflags))
299  		rv = -EDEADLK;
300  
301  	dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, dlm_sbflags_val(lkb));
302  }
303  
queue_cast_overlap(struct dlm_rsb * r,struct dlm_lkb * lkb)304  static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
305  {
306  	queue_cast(r, lkb,
307  		   is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
308  }
309  
queue_bast(struct dlm_rsb * r,struct dlm_lkb * lkb,int rqmode)310  static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
311  {
312  	if (is_master_copy(lkb)) {
313  		send_bast(r, lkb, rqmode);
314  	} else {
315  		dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
316  	}
317  }
318  
319  /*
320   * Basic operations on rsb's and lkb's
321   */
322  
323  /* This is only called to add a reference when the code already holds
324     a valid reference to the rsb, so there's no need for locking. */
325  
hold_rsb(struct dlm_rsb * r)326  static inline void hold_rsb(struct dlm_rsb *r)
327  {
328  	kref_get(&r->res_ref);
329  }
330  
dlm_hold_rsb(struct dlm_rsb * r)331  void dlm_hold_rsb(struct dlm_rsb *r)
332  {
333  	hold_rsb(r);
334  }
335  
336  /* When all references to the rsb are gone it's transferred to
337     the tossed list for later disposal. */
338  
put_rsb(struct dlm_rsb * r)339  static void put_rsb(struct dlm_rsb *r)
340  {
341  	struct dlm_ls *ls = r->res_ls;
342  	uint32_t bucket = r->res_bucket;
343  	int rv;
344  
345  	rv = kref_put_lock(&r->res_ref, toss_rsb,
346  			   &ls->ls_rsbtbl[bucket].lock);
347  	if (rv)
348  		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
349  }
350  
dlm_put_rsb(struct dlm_rsb * r)351  void dlm_put_rsb(struct dlm_rsb *r)
352  {
353  	put_rsb(r);
354  }
355  
pre_rsb_struct(struct dlm_ls * ls)356  static int pre_rsb_struct(struct dlm_ls *ls)
357  {
358  	struct dlm_rsb *r1, *r2;
359  	int count = 0;
360  
361  	spin_lock(&ls->ls_new_rsb_spin);
362  	if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
363  		spin_unlock(&ls->ls_new_rsb_spin);
364  		return 0;
365  	}
366  	spin_unlock(&ls->ls_new_rsb_spin);
367  
368  	r1 = dlm_allocate_rsb(ls);
369  	r2 = dlm_allocate_rsb(ls);
370  
371  	spin_lock(&ls->ls_new_rsb_spin);
372  	if (r1) {
373  		list_add(&r1->res_hashchain, &ls->ls_new_rsb);
374  		ls->ls_new_rsb_count++;
375  	}
376  	if (r2) {
377  		list_add(&r2->res_hashchain, &ls->ls_new_rsb);
378  		ls->ls_new_rsb_count++;
379  	}
380  	count = ls->ls_new_rsb_count;
381  	spin_unlock(&ls->ls_new_rsb_spin);
382  
383  	if (!count)
384  		return -ENOMEM;
385  	return 0;
386  }
387  
388  /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
389     unlock any spinlocks, go back and call pre_rsb_struct again.
390     Otherwise, take an rsb off the list and return it. */
391  
get_rsb_struct(struct dlm_ls * ls,const void * name,int len,struct dlm_rsb ** r_ret)392  static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
393  			  struct dlm_rsb **r_ret)
394  {
395  	struct dlm_rsb *r;
396  	int count;
397  
398  	spin_lock(&ls->ls_new_rsb_spin);
399  	if (list_empty(&ls->ls_new_rsb)) {
400  		count = ls->ls_new_rsb_count;
401  		spin_unlock(&ls->ls_new_rsb_spin);
402  		log_debug(ls, "find_rsb retry %d %d %s",
403  			  count, dlm_config.ci_new_rsb_count,
404  			  (const char *)name);
405  		return -EAGAIN;
406  	}
407  
408  	r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
409  	list_del(&r->res_hashchain);
410  	/* Convert the empty list_head to a NULL rb_node for tree usage: */
411  	memset(&r->res_hashnode, 0, sizeof(struct rb_node));
412  	ls->ls_new_rsb_count--;
413  	spin_unlock(&ls->ls_new_rsb_spin);
414  
415  	r->res_ls = ls;
416  	r->res_length = len;
417  	memcpy(r->res_name, name, len);
418  	mutex_init(&r->res_mutex);
419  
420  	INIT_LIST_HEAD(&r->res_lookup);
421  	INIT_LIST_HEAD(&r->res_grantqueue);
422  	INIT_LIST_HEAD(&r->res_convertqueue);
423  	INIT_LIST_HEAD(&r->res_waitqueue);
424  	INIT_LIST_HEAD(&r->res_root_list);
425  	INIT_LIST_HEAD(&r->res_recover_list);
426  
427  	*r_ret = r;
428  	return 0;
429  }
430  
rsb_cmp(struct dlm_rsb * r,const char * name,int nlen)431  static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
432  {
433  	char maxname[DLM_RESNAME_MAXLEN];
434  
435  	memset(maxname, 0, DLM_RESNAME_MAXLEN);
436  	memcpy(maxname, name, nlen);
437  	return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
438  }
439  
dlm_search_rsb_tree(struct rb_root * tree,const void * name,int len,struct dlm_rsb ** r_ret)440  int dlm_search_rsb_tree(struct rb_root *tree, const void *name, int len,
441  			struct dlm_rsb **r_ret)
442  {
443  	struct rb_node *node = tree->rb_node;
444  	struct dlm_rsb *r;
445  	int rc;
446  
447  	while (node) {
448  		r = rb_entry(node, struct dlm_rsb, res_hashnode);
449  		rc = rsb_cmp(r, name, len);
450  		if (rc < 0)
451  			node = node->rb_left;
452  		else if (rc > 0)
453  			node = node->rb_right;
454  		else
455  			goto found;
456  	}
457  	*r_ret = NULL;
458  	return -EBADR;
459  
460   found:
461  	*r_ret = r;
462  	return 0;
463  }
464  
rsb_insert(struct dlm_rsb * rsb,struct rb_root * tree)465  static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
466  {
467  	struct rb_node **newn = &tree->rb_node;
468  	struct rb_node *parent = NULL;
469  	int rc;
470  
471  	while (*newn) {
472  		struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
473  					       res_hashnode);
474  
475  		parent = *newn;
476  		rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
477  		if (rc < 0)
478  			newn = &parent->rb_left;
479  		else if (rc > 0)
480  			newn = &parent->rb_right;
481  		else {
482  			log_print("rsb_insert match");
483  			dlm_dump_rsb(rsb);
484  			dlm_dump_rsb(cur);
485  			return -EEXIST;
486  		}
487  	}
488  
489  	rb_link_node(&rsb->res_hashnode, parent, newn);
490  	rb_insert_color(&rsb->res_hashnode, tree);
491  	return 0;
492  }
493  
494  /*
495   * Find rsb in rsbtbl and potentially create/add one
496   *
497   * Delaying the release of rsb's has a similar benefit to applications keeping
498   * NL locks on an rsb, but without the guarantee that the cached master value
499   * will still be valid when the rsb is reused.  Apps aren't always smart enough
500   * to keep NL locks on an rsb that they may lock again shortly; this can lead
501   * to excessive master lookups and removals if we don't delay the release.
502   *
503   * Searching for an rsb means looking through both the normal list and toss
504   * list.  When found on the toss list the rsb is moved to the normal list with
505   * ref count of 1; when found on normal list the ref count is incremented.
506   *
507   * rsb's on the keep list are being used locally and refcounted.
508   * rsb's on the toss list are not being used locally, and are not refcounted.
509   *
510   * The toss list rsb's were either
511   * - previously used locally but not any more (were on keep list, then
512   *   moved to toss list when last refcount dropped)
513   * - created and put on toss list as a directory record for a lookup
514   *   (we are the dir node for the res, but are not using the res right now,
515   *   but some other node is)
516   *
517   * The purpose of find_rsb() is to return a refcounted rsb for local use.
518   * So, if the given rsb is on the toss list, it is moved to the keep list
519   * before being returned.
520   *
521   * toss_rsb() happens when all local usage of the rsb is done, i.e. no
522   * more refcounts exist, so the rsb is moved from the keep list to the
523   * toss list.
524   *
525   * rsb's on both keep and toss lists are used for doing a name to master
526   * lookups.  rsb's that are in use locally (and being refcounted) are on
527   * the keep list, rsb's that are not in use locally (not refcounted) and
528   * only exist for name/master lookups are on the toss list.
529   *
530   * rsb's on the toss list who's dir_nodeid is not local can have stale
531   * name/master mappings.  So, remote requests on such rsb's can potentially
532   * return with an error, which means the mapping is stale and needs to
533   * be updated with a new lookup.  (The idea behind MASTER UNCERTAIN and
534   * first_lkid is to keep only a single outstanding request on an rsb
535   * while that rsb has a potentially stale master.)
536   */
537  
find_rsb_dir(struct dlm_ls * ls,const void * name,int len,uint32_t hash,uint32_t b,int dir_nodeid,int from_nodeid,unsigned int flags,struct dlm_rsb ** r_ret)538  static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
539  			uint32_t hash, uint32_t b,
540  			int dir_nodeid, int from_nodeid,
541  			unsigned int flags, struct dlm_rsb **r_ret)
542  {
543  	struct dlm_rsb *r = NULL;
544  	int our_nodeid = dlm_our_nodeid();
545  	int from_local = 0;
546  	int from_other = 0;
547  	int from_dir = 0;
548  	int create = 0;
549  	int error;
550  
551  	if (flags & R_RECEIVE_REQUEST) {
552  		if (from_nodeid == dir_nodeid)
553  			from_dir = 1;
554  		else
555  			from_other = 1;
556  	} else if (flags & R_REQUEST) {
557  		from_local = 1;
558  	}
559  
560  	/*
561  	 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
562  	 * from_nodeid has sent us a lock in dlm_recover_locks, believing
563  	 * we're the new master.  Our local recovery may not have set
564  	 * res_master_nodeid to our_nodeid yet, so allow either.  Don't
565  	 * create the rsb; dlm_recover_process_copy() will handle EBADR
566  	 * by resending.
567  	 *
568  	 * If someone sends us a request, we are the dir node, and we do
569  	 * not find the rsb anywhere, then recreate it.  This happens if
570  	 * someone sends us a request after we have removed/freed an rsb
571  	 * from our toss list.  (They sent a request instead of lookup
572  	 * because they are using an rsb from their toss list.)
573  	 */
574  
575  	if (from_local || from_dir ||
576  	    (from_other && (dir_nodeid == our_nodeid))) {
577  		create = 1;
578  	}
579  
580   retry:
581  	if (create) {
582  		error = pre_rsb_struct(ls);
583  		if (error < 0)
584  			goto out;
585  	}
586  
587  	spin_lock(&ls->ls_rsbtbl[b].lock);
588  
589  	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
590  	if (error)
591  		goto do_toss;
592  
593  	/*
594  	 * rsb is active, so we can't check master_nodeid without lock_rsb.
595  	 */
596  
597  	kref_get(&r->res_ref);
598  	goto out_unlock;
599  
600  
601   do_toss:
602  	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
603  	if (error)
604  		goto do_new;
605  
606  	/*
607  	 * rsb found inactive (master_nodeid may be out of date unless
608  	 * we are the dir_nodeid or were the master)  No other thread
609  	 * is using this rsb because it's on the toss list, so we can
610  	 * look at or update res_master_nodeid without lock_rsb.
611  	 */
612  
613  	if ((r->res_master_nodeid != our_nodeid) && from_other) {
614  		/* our rsb was not master, and another node (not the dir node)
615  		   has sent us a request */
616  		log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
617  			  from_nodeid, r->res_master_nodeid, dir_nodeid,
618  			  r->res_name);
619  		error = -ENOTBLK;
620  		goto out_unlock;
621  	}
622  
623  	if ((r->res_master_nodeid != our_nodeid) && from_dir) {
624  		/* don't think this should ever happen */
625  		log_error(ls, "find_rsb toss from_dir %d master %d",
626  			  from_nodeid, r->res_master_nodeid);
627  		dlm_print_rsb(r);
628  		/* fix it and go on */
629  		r->res_master_nodeid = our_nodeid;
630  		r->res_nodeid = 0;
631  		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
632  		r->res_first_lkid = 0;
633  	}
634  
635  	if (from_local && (r->res_master_nodeid != our_nodeid)) {
636  		/* Because we have held no locks on this rsb,
637  		   res_master_nodeid could have become stale. */
638  		rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
639  		r->res_first_lkid = 0;
640  	}
641  
642  	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
643  	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
644  	goto out_unlock;
645  
646  
647   do_new:
648  	/*
649  	 * rsb not found
650  	 */
651  
652  	if (error == -EBADR && !create)
653  		goto out_unlock;
654  
655  	error = get_rsb_struct(ls, name, len, &r);
656  	if (error == -EAGAIN) {
657  		spin_unlock(&ls->ls_rsbtbl[b].lock);
658  		goto retry;
659  	}
660  	if (error)
661  		goto out_unlock;
662  
663  	r->res_hash = hash;
664  	r->res_bucket = b;
665  	r->res_dir_nodeid = dir_nodeid;
666  	kref_init(&r->res_ref);
667  
668  	if (from_dir) {
669  		/* want to see how often this happens */
670  		log_debug(ls, "find_rsb new from_dir %d recreate %s",
671  			  from_nodeid, r->res_name);
672  		r->res_master_nodeid = our_nodeid;
673  		r->res_nodeid = 0;
674  		goto out_add;
675  	}
676  
677  	if (from_other && (dir_nodeid != our_nodeid)) {
678  		/* should never happen */
679  		log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
680  			  from_nodeid, dir_nodeid, our_nodeid, r->res_name);
681  		dlm_free_rsb(r);
682  		r = NULL;
683  		error = -ENOTBLK;
684  		goto out_unlock;
685  	}
686  
687  	if (from_other) {
688  		log_debug(ls, "find_rsb new from_other %d dir %d %s",
689  			  from_nodeid, dir_nodeid, r->res_name);
690  	}
691  
692  	if (dir_nodeid == our_nodeid) {
693  		/* When we are the dir nodeid, we can set the master
694  		   node immediately */
695  		r->res_master_nodeid = our_nodeid;
696  		r->res_nodeid = 0;
697  	} else {
698  		/* set_master will send_lookup to dir_nodeid */
699  		r->res_master_nodeid = 0;
700  		r->res_nodeid = -1;
701  	}
702  
703   out_add:
704  	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
705   out_unlock:
706  	spin_unlock(&ls->ls_rsbtbl[b].lock);
707   out:
708  	*r_ret = r;
709  	return error;
710  }
711  
712  /* During recovery, other nodes can send us new MSTCPY locks (from
713     dlm_recover_locks) before we've made ourself master (in
714     dlm_recover_masters). */
715  
find_rsb_nodir(struct dlm_ls * ls,const void * name,int len,uint32_t hash,uint32_t b,int dir_nodeid,int from_nodeid,unsigned int flags,struct dlm_rsb ** r_ret)716  static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
717  			  uint32_t hash, uint32_t b,
718  			  int dir_nodeid, int from_nodeid,
719  			  unsigned int flags, struct dlm_rsb **r_ret)
720  {
721  	struct dlm_rsb *r = NULL;
722  	int our_nodeid = dlm_our_nodeid();
723  	int recover = (flags & R_RECEIVE_RECOVER);
724  	int error;
725  
726   retry:
727  	error = pre_rsb_struct(ls);
728  	if (error < 0)
729  		goto out;
730  
731  	spin_lock(&ls->ls_rsbtbl[b].lock);
732  
733  	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
734  	if (error)
735  		goto do_toss;
736  
737  	/*
738  	 * rsb is active, so we can't check master_nodeid without lock_rsb.
739  	 */
740  
741  	kref_get(&r->res_ref);
742  	goto out_unlock;
743  
744  
745   do_toss:
746  	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
747  	if (error)
748  		goto do_new;
749  
750  	/*
751  	 * rsb found inactive. No other thread is using this rsb because
752  	 * it's on the toss list, so we can look at or update
753  	 * res_master_nodeid without lock_rsb.
754  	 */
755  
756  	if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
757  		/* our rsb is not master, and another node has sent us a
758  		   request; this should never happen */
759  		log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
760  			  from_nodeid, r->res_master_nodeid, dir_nodeid);
761  		dlm_print_rsb(r);
762  		error = -ENOTBLK;
763  		goto out_unlock;
764  	}
765  
766  	if (!recover && (r->res_master_nodeid != our_nodeid) &&
767  	    (dir_nodeid == our_nodeid)) {
768  		/* our rsb is not master, and we are dir; may as well fix it;
769  		   this should never happen */
770  		log_error(ls, "find_rsb toss our %d master %d dir %d",
771  			  our_nodeid, r->res_master_nodeid, dir_nodeid);
772  		dlm_print_rsb(r);
773  		r->res_master_nodeid = our_nodeid;
774  		r->res_nodeid = 0;
775  	}
776  
777  	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
778  	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
779  	goto out_unlock;
780  
781  
782   do_new:
783  	/*
784  	 * rsb not found
785  	 */
786  
787  	error = get_rsb_struct(ls, name, len, &r);
788  	if (error == -EAGAIN) {
789  		spin_unlock(&ls->ls_rsbtbl[b].lock);
790  		goto retry;
791  	}
792  	if (error)
793  		goto out_unlock;
794  
795  	r->res_hash = hash;
796  	r->res_bucket = b;
797  	r->res_dir_nodeid = dir_nodeid;
798  	r->res_master_nodeid = dir_nodeid;
799  	r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
800  	kref_init(&r->res_ref);
801  
802  	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
803   out_unlock:
804  	spin_unlock(&ls->ls_rsbtbl[b].lock);
805   out:
806  	*r_ret = r;
807  	return error;
808  }
809  
find_rsb(struct dlm_ls * ls,const void * name,int len,int from_nodeid,unsigned int flags,struct dlm_rsb ** r_ret)810  static int find_rsb(struct dlm_ls *ls, const void *name, int len,
811  		    int from_nodeid, unsigned int flags,
812  		    struct dlm_rsb **r_ret)
813  {
814  	uint32_t hash, b;
815  	int dir_nodeid;
816  
817  	if (len > DLM_RESNAME_MAXLEN)
818  		return -EINVAL;
819  
820  	hash = jhash(name, len, 0);
821  	b = hash & (ls->ls_rsbtbl_size - 1);
822  
823  	dir_nodeid = dlm_hash2nodeid(ls, hash);
824  
825  	if (dlm_no_directory(ls))
826  		return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
827  				      from_nodeid, flags, r_ret);
828  	else
829  		return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
830  				      from_nodeid, flags, r_ret);
831  }
832  
833  /* we have received a request and found that res_master_nodeid != our_nodeid,
834     so we need to return an error or make ourself the master */
835  
validate_master_nodeid(struct dlm_ls * ls,struct dlm_rsb * r,int from_nodeid)836  static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
837  				  int from_nodeid)
838  {
839  	if (dlm_no_directory(ls)) {
840  		log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
841  			  from_nodeid, r->res_master_nodeid,
842  			  r->res_dir_nodeid);
843  		dlm_print_rsb(r);
844  		return -ENOTBLK;
845  	}
846  
847  	if (from_nodeid != r->res_dir_nodeid) {
848  		/* our rsb is not master, and another node (not the dir node)
849  	   	   has sent us a request.  this is much more common when our
850  	   	   master_nodeid is zero, so limit debug to non-zero.  */
851  
852  		if (r->res_master_nodeid) {
853  			log_debug(ls, "validate master from_other %d master %d "
854  				  "dir %d first %x %s", from_nodeid,
855  				  r->res_master_nodeid, r->res_dir_nodeid,
856  				  r->res_first_lkid, r->res_name);
857  		}
858  		return -ENOTBLK;
859  	} else {
860  		/* our rsb is not master, but the dir nodeid has sent us a
861  	   	   request; this could happen with master 0 / res_nodeid -1 */
862  
863  		if (r->res_master_nodeid) {
864  			log_error(ls, "validate master from_dir %d master %d "
865  				  "first %x %s",
866  				  from_nodeid, r->res_master_nodeid,
867  				  r->res_first_lkid, r->res_name);
868  		}
869  
870  		r->res_master_nodeid = dlm_our_nodeid();
871  		r->res_nodeid = 0;
872  		return 0;
873  	}
874  }
875  
__dlm_master_lookup(struct dlm_ls * ls,struct dlm_rsb * r,int our_nodeid,int from_nodeid,bool toss_list,unsigned int flags,int * r_nodeid,int * result)876  static void __dlm_master_lookup(struct dlm_ls *ls, struct dlm_rsb *r, int our_nodeid,
877  				int from_nodeid, bool toss_list, unsigned int flags,
878  				int *r_nodeid, int *result)
879  {
880  	int fix_master = (flags & DLM_LU_RECOVER_MASTER);
881  	int from_master = (flags & DLM_LU_RECOVER_DIR);
882  
883  	if (r->res_dir_nodeid != our_nodeid) {
884  		/* should not happen, but may as well fix it and carry on */
885  		log_error(ls, "%s res_dir %d our %d %s", __func__,
886  			  r->res_dir_nodeid, our_nodeid, r->res_name);
887  		r->res_dir_nodeid = our_nodeid;
888  	}
889  
890  	if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
891  		/* Recovery uses this function to set a new master when
892  		 * the previous master failed.  Setting NEW_MASTER will
893  		 * force dlm_recover_masters to call recover_master on this
894  		 * rsb even though the res_nodeid is no longer removed.
895  		 */
896  
897  		r->res_master_nodeid = from_nodeid;
898  		r->res_nodeid = from_nodeid;
899  		rsb_set_flag(r, RSB_NEW_MASTER);
900  
901  		if (toss_list) {
902  			/* I don't think we should ever find it on toss list. */
903  			log_error(ls, "%s fix_master on toss", __func__);
904  			dlm_dump_rsb(r);
905  		}
906  	}
907  
908  	if (from_master && (r->res_master_nodeid != from_nodeid)) {
909  		/* this will happen if from_nodeid became master during
910  		 * a previous recovery cycle, and we aborted the previous
911  		 * cycle before recovering this master value
912  		 */
913  
914  		log_limit(ls, "%s from_master %d master_nodeid %d res_nodeid %d first %x %s",
915  			  __func__, from_nodeid, r->res_master_nodeid,
916  			  r->res_nodeid, r->res_first_lkid, r->res_name);
917  
918  		if (r->res_master_nodeid == our_nodeid) {
919  			log_error(ls, "from_master %d our_master", from_nodeid);
920  			dlm_dump_rsb(r);
921  			goto ret_assign;
922  		}
923  
924  		r->res_master_nodeid = from_nodeid;
925  		r->res_nodeid = from_nodeid;
926  		rsb_set_flag(r, RSB_NEW_MASTER);
927  	}
928  
929  	if (!r->res_master_nodeid) {
930  		/* this will happen if recovery happens while we're looking
931  		 * up the master for this rsb
932  		 */
933  
934  		log_debug(ls, "%s master 0 to %d first %x %s", __func__,
935  			  from_nodeid, r->res_first_lkid, r->res_name);
936  		r->res_master_nodeid = from_nodeid;
937  		r->res_nodeid = from_nodeid;
938  	}
939  
940  	if (!from_master && !fix_master &&
941  	    (r->res_master_nodeid == from_nodeid)) {
942  		/* this can happen when the master sends remove, the dir node
943  		 * finds the rsb on the keep list and ignores the remove,
944  		 * and the former master sends a lookup
945  		 */
946  
947  		log_limit(ls, "%s from master %d flags %x first %x %s",
948  			  __func__, from_nodeid, flags, r->res_first_lkid,
949  			  r->res_name);
950  	}
951  
952   ret_assign:
953  	*r_nodeid = r->res_master_nodeid;
954  	if (result)
955  		*result = DLM_LU_MATCH;
956  }
957  
958  /*
959   * We're the dir node for this res and another node wants to know the
960   * master nodeid.  During normal operation (non recovery) this is only
961   * called from receive_lookup(); master lookups when the local node is
962   * the dir node are done by find_rsb().
963   *
964   * normal operation, we are the dir node for a resource
965   * . _request_lock
966   * . set_master
967   * . send_lookup
968   * . receive_lookup
969   * . dlm_master_lookup flags 0
970   *
971   * recover directory, we are rebuilding dir for all resources
972   * . dlm_recover_directory
973   * . dlm_rcom_names
974   *   remote node sends back the rsb names it is master of and we are dir of
975   * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
976   *   we either create new rsb setting remote node as master, or find existing
977   *   rsb and set master to be the remote node.
978   *
979   * recover masters, we are finding the new master for resources
980   * . dlm_recover_masters
981   * . recover_master
982   * . dlm_send_rcom_lookup
983   * . receive_rcom_lookup
984   * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
985   */
986  
dlm_master_lookup(struct dlm_ls * ls,int from_nodeid,const char * name,int len,unsigned int flags,int * r_nodeid,int * result)987  int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
988  		      int len, unsigned int flags, int *r_nodeid, int *result)
989  {
990  	struct dlm_rsb *r = NULL;
991  	uint32_t hash, b;
992  	int our_nodeid = dlm_our_nodeid();
993  	int dir_nodeid, error;
994  
995  	if (len > DLM_RESNAME_MAXLEN)
996  		return -EINVAL;
997  
998  	if (from_nodeid == our_nodeid) {
999  		log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
1000  			  our_nodeid, flags);
1001  		return -EINVAL;
1002  	}
1003  
1004  	hash = jhash(name, len, 0);
1005  	b = hash & (ls->ls_rsbtbl_size - 1);
1006  
1007  	dir_nodeid = dlm_hash2nodeid(ls, hash);
1008  	if (dir_nodeid != our_nodeid) {
1009  		log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
1010  			  from_nodeid, dir_nodeid, our_nodeid, hash,
1011  			  ls->ls_num_nodes);
1012  		*r_nodeid = -1;
1013  		return -EINVAL;
1014  	}
1015  
1016   retry:
1017  	error = pre_rsb_struct(ls);
1018  	if (error < 0)
1019  		return error;
1020  
1021  	spin_lock(&ls->ls_rsbtbl[b].lock);
1022  	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1023  	if (!error) {
1024  		/* because the rsb is active, we need to lock_rsb before
1025  		 * checking/changing re_master_nodeid
1026  		 */
1027  
1028  		hold_rsb(r);
1029  		spin_unlock(&ls->ls_rsbtbl[b].lock);
1030  		lock_rsb(r);
1031  
1032  		__dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false,
1033  				    flags, r_nodeid, result);
1034  
1035  		/* the rsb was active */
1036  		unlock_rsb(r);
1037  		put_rsb(r);
1038  
1039  		return 0;
1040  	}
1041  
1042  	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1043  	if (error)
1044  		goto not_found;
1045  
1046  	/* because the rsb is inactive (on toss list), it's not refcounted
1047  	 * and lock_rsb is not used, but is protected by the rsbtbl lock
1048  	 */
1049  
1050  	__dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags,
1051  			    r_nodeid, result);
1052  
1053  	r->res_toss_time = jiffies;
1054  	/* the rsb was inactive (on toss list) */
1055  	spin_unlock(&ls->ls_rsbtbl[b].lock);
1056  
1057  	return 0;
1058  
1059   not_found:
1060  	error = get_rsb_struct(ls, name, len, &r);
1061  	if (error == -EAGAIN) {
1062  		spin_unlock(&ls->ls_rsbtbl[b].lock);
1063  		goto retry;
1064  	}
1065  	if (error)
1066  		goto out_unlock;
1067  
1068  	r->res_hash = hash;
1069  	r->res_bucket = b;
1070  	r->res_dir_nodeid = our_nodeid;
1071  	r->res_master_nodeid = from_nodeid;
1072  	r->res_nodeid = from_nodeid;
1073  	kref_init(&r->res_ref);
1074  	r->res_toss_time = jiffies;
1075  
1076  	error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1077  	if (error) {
1078  		/* should never happen */
1079  		dlm_free_rsb(r);
1080  		spin_unlock(&ls->ls_rsbtbl[b].lock);
1081  		goto retry;
1082  	}
1083  
1084  	if (result)
1085  		*result = DLM_LU_ADD;
1086  	*r_nodeid = from_nodeid;
1087   out_unlock:
1088  	spin_unlock(&ls->ls_rsbtbl[b].lock);
1089  	return error;
1090  }
1091  
dlm_dump_rsb_hash(struct dlm_ls * ls,uint32_t hash)1092  static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1093  {
1094  	struct rb_node *n;
1095  	struct dlm_rsb *r;
1096  	int i;
1097  
1098  	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1099  		spin_lock(&ls->ls_rsbtbl[i].lock);
1100  		for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1101  			r = rb_entry(n, struct dlm_rsb, res_hashnode);
1102  			if (r->res_hash == hash)
1103  				dlm_dump_rsb(r);
1104  		}
1105  		spin_unlock(&ls->ls_rsbtbl[i].lock);
1106  	}
1107  }
1108  
dlm_dump_rsb_name(struct dlm_ls * ls,const char * name,int len)1109  void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len)
1110  {
1111  	struct dlm_rsb *r = NULL;
1112  	uint32_t hash, b;
1113  	int error;
1114  
1115  	hash = jhash(name, len, 0);
1116  	b = hash & (ls->ls_rsbtbl_size - 1);
1117  
1118  	spin_lock(&ls->ls_rsbtbl[b].lock);
1119  	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1120  	if (!error)
1121  		goto out_dump;
1122  
1123  	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1124  	if (error)
1125  		goto out;
1126   out_dump:
1127  	dlm_dump_rsb(r);
1128   out:
1129  	spin_unlock(&ls->ls_rsbtbl[b].lock);
1130  }
1131  
toss_rsb(struct kref * kref)1132  static void toss_rsb(struct kref *kref)
1133  {
1134  	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1135  	struct dlm_ls *ls = r->res_ls;
1136  
1137  	DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1138  	kref_init(&r->res_ref);
1139  	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1140  	rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1141  	r->res_toss_time = jiffies;
1142  	set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[r->res_bucket].flags);
1143  	if (r->res_lvbptr) {
1144  		dlm_free_lvb(r->res_lvbptr);
1145  		r->res_lvbptr = NULL;
1146  	}
1147  }
1148  
1149  /* See comment for unhold_lkb */
1150  
unhold_rsb(struct dlm_rsb * r)1151  static void unhold_rsb(struct dlm_rsb *r)
1152  {
1153  	int rv;
1154  	rv = kref_put(&r->res_ref, toss_rsb);
1155  	DLM_ASSERT(!rv, dlm_dump_rsb(r););
1156  }
1157  
kill_rsb(struct kref * kref)1158  static void kill_rsb(struct kref *kref)
1159  {
1160  	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1161  
1162  	/* All work is done after the return from kref_put() so we
1163  	   can release the write_lock before the remove and free. */
1164  
1165  	DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1166  	DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1167  	DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1168  	DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1169  	DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1170  	DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1171  }
1172  
1173  /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1174     The rsb must exist as long as any lkb's for it do. */
1175  
attach_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb)1176  static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1177  {
1178  	hold_rsb(r);
1179  	lkb->lkb_resource = r;
1180  }
1181  
detach_lkb(struct dlm_lkb * lkb)1182  static void detach_lkb(struct dlm_lkb *lkb)
1183  {
1184  	if (lkb->lkb_resource) {
1185  		put_rsb(lkb->lkb_resource);
1186  		lkb->lkb_resource = NULL;
1187  	}
1188  }
1189  
_create_lkb(struct dlm_ls * ls,struct dlm_lkb ** lkb_ret,int start,int end)1190  static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
1191  		       int start, int end)
1192  {
1193  	struct dlm_lkb *lkb;
1194  	int rv;
1195  
1196  	lkb = dlm_allocate_lkb(ls);
1197  	if (!lkb)
1198  		return -ENOMEM;
1199  
1200  	lkb->lkb_last_bast_mode = -1;
1201  	lkb->lkb_nodeid = -1;
1202  	lkb->lkb_grmode = DLM_LOCK_IV;
1203  	kref_init(&lkb->lkb_ref);
1204  	INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1205  	INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1206  	INIT_LIST_HEAD(&lkb->lkb_cb_list);
1207  	INIT_LIST_HEAD(&lkb->lkb_callbacks);
1208  	spin_lock_init(&lkb->lkb_cb_lock);
1209  	INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1210  
1211  	idr_preload(GFP_NOFS);
1212  	spin_lock(&ls->ls_lkbidr_spin);
1213  	rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT);
1214  	if (rv >= 0)
1215  		lkb->lkb_id = rv;
1216  	spin_unlock(&ls->ls_lkbidr_spin);
1217  	idr_preload_end();
1218  
1219  	if (rv < 0) {
1220  		log_error(ls, "create_lkb idr error %d", rv);
1221  		dlm_free_lkb(lkb);
1222  		return rv;
1223  	}
1224  
1225  	*lkb_ret = lkb;
1226  	return 0;
1227  }
1228  
create_lkb(struct dlm_ls * ls,struct dlm_lkb ** lkb_ret)1229  static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1230  {
1231  	return _create_lkb(ls, lkb_ret, 1, 0);
1232  }
1233  
find_lkb(struct dlm_ls * ls,uint32_t lkid,struct dlm_lkb ** lkb_ret)1234  static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1235  {
1236  	struct dlm_lkb *lkb;
1237  
1238  	spin_lock(&ls->ls_lkbidr_spin);
1239  	lkb = idr_find(&ls->ls_lkbidr, lkid);
1240  	if (lkb)
1241  		kref_get(&lkb->lkb_ref);
1242  	spin_unlock(&ls->ls_lkbidr_spin);
1243  
1244  	*lkb_ret = lkb;
1245  	return lkb ? 0 : -ENOENT;
1246  }
1247  
kill_lkb(struct kref * kref)1248  static void kill_lkb(struct kref *kref)
1249  {
1250  	struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1251  
1252  	/* All work is done after the return from kref_put() so we
1253  	   can release the write_lock before the detach_lkb */
1254  
1255  	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1256  }
1257  
1258  /* __put_lkb() is used when an lkb may not have an rsb attached to
1259     it so we need to provide the lockspace explicitly */
1260  
__put_lkb(struct dlm_ls * ls,struct dlm_lkb * lkb)1261  static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1262  {
1263  	uint32_t lkid = lkb->lkb_id;
1264  	int rv;
1265  
1266  	rv = kref_put_lock(&lkb->lkb_ref, kill_lkb,
1267  			   &ls->ls_lkbidr_spin);
1268  	if (rv) {
1269  		idr_remove(&ls->ls_lkbidr, lkid);
1270  		spin_unlock(&ls->ls_lkbidr_spin);
1271  
1272  		detach_lkb(lkb);
1273  
1274  		/* for local/process lkbs, lvbptr points to caller's lksb */
1275  		if (lkb->lkb_lvbptr && is_master_copy(lkb))
1276  			dlm_free_lvb(lkb->lkb_lvbptr);
1277  		dlm_free_lkb(lkb);
1278  	}
1279  
1280  	return rv;
1281  }
1282  
dlm_put_lkb(struct dlm_lkb * lkb)1283  int dlm_put_lkb(struct dlm_lkb *lkb)
1284  {
1285  	struct dlm_ls *ls;
1286  
1287  	DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1288  	DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1289  
1290  	ls = lkb->lkb_resource->res_ls;
1291  	return __put_lkb(ls, lkb);
1292  }
1293  
1294  /* This is only called to add a reference when the code already holds
1295     a valid reference to the lkb, so there's no need for locking. */
1296  
hold_lkb(struct dlm_lkb * lkb)1297  static inline void hold_lkb(struct dlm_lkb *lkb)
1298  {
1299  	kref_get(&lkb->lkb_ref);
1300  }
1301  
unhold_lkb_assert(struct kref * kref)1302  static void unhold_lkb_assert(struct kref *kref)
1303  {
1304  	struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1305  
1306  	DLM_ASSERT(false, dlm_print_lkb(lkb););
1307  }
1308  
1309  /* This is called when we need to remove a reference and are certain
1310     it's not the last ref.  e.g. del_lkb is always called between a
1311     find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1312     put_lkb would work fine, but would involve unnecessary locking */
1313  
unhold_lkb(struct dlm_lkb * lkb)1314  static inline void unhold_lkb(struct dlm_lkb *lkb)
1315  {
1316  	kref_put(&lkb->lkb_ref, unhold_lkb_assert);
1317  }
1318  
lkb_add_ordered(struct list_head * new,struct list_head * head,int mode)1319  static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1320  			    int mode)
1321  {
1322  	struct dlm_lkb *lkb = NULL, *iter;
1323  
1324  	list_for_each_entry(iter, head, lkb_statequeue)
1325  		if (iter->lkb_rqmode < mode) {
1326  			lkb = iter;
1327  			list_add_tail(new, &iter->lkb_statequeue);
1328  			break;
1329  		}
1330  
1331  	if (!lkb)
1332  		list_add_tail(new, head);
1333  }
1334  
1335  /* add/remove lkb to rsb's grant/convert/wait queue */
1336  
add_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb,int status)1337  static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1338  {
1339  	kref_get(&lkb->lkb_ref);
1340  
1341  	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1342  
1343  	lkb->lkb_timestamp = ktime_get();
1344  
1345  	lkb->lkb_status = status;
1346  
1347  	switch (status) {
1348  	case DLM_LKSTS_WAITING:
1349  		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1350  			list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1351  		else
1352  			list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1353  		break;
1354  	case DLM_LKSTS_GRANTED:
1355  		/* convention says granted locks kept in order of grmode */
1356  		lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1357  				lkb->lkb_grmode);
1358  		break;
1359  	case DLM_LKSTS_CONVERT:
1360  		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1361  			list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1362  		else
1363  			list_add_tail(&lkb->lkb_statequeue,
1364  				      &r->res_convertqueue);
1365  		break;
1366  	default:
1367  		DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1368  	}
1369  }
1370  
del_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb)1371  static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1372  {
1373  	lkb->lkb_status = 0;
1374  	list_del(&lkb->lkb_statequeue);
1375  	unhold_lkb(lkb);
1376  }
1377  
move_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb,int sts)1378  static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1379  {
1380  	hold_lkb(lkb);
1381  	del_lkb(r, lkb);
1382  	add_lkb(r, lkb, sts);
1383  	unhold_lkb(lkb);
1384  }
1385  
msg_reply_type(int mstype)1386  static int msg_reply_type(int mstype)
1387  {
1388  	switch (mstype) {
1389  	case DLM_MSG_REQUEST:
1390  		return DLM_MSG_REQUEST_REPLY;
1391  	case DLM_MSG_CONVERT:
1392  		return DLM_MSG_CONVERT_REPLY;
1393  	case DLM_MSG_UNLOCK:
1394  		return DLM_MSG_UNLOCK_REPLY;
1395  	case DLM_MSG_CANCEL:
1396  		return DLM_MSG_CANCEL_REPLY;
1397  	case DLM_MSG_LOOKUP:
1398  		return DLM_MSG_LOOKUP_REPLY;
1399  	}
1400  	return -1;
1401  }
1402  
1403  /* add/remove lkb from global waiters list of lkb's waiting for
1404     a reply from a remote node */
1405  
add_to_waiters(struct dlm_lkb * lkb,int mstype,int to_nodeid)1406  static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1407  {
1408  	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1409  	int error = 0;
1410  	int wc;
1411  
1412  	mutex_lock(&ls->ls_waiters_mutex);
1413  
1414  	if (is_overlap_unlock(lkb) ||
1415  	    (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1416  		error = -EINVAL;
1417  		goto out;
1418  	}
1419  
1420  	if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1421  		switch (mstype) {
1422  		case DLM_MSG_UNLOCK:
1423  			set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags);
1424  			break;
1425  		case DLM_MSG_CANCEL:
1426  			set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
1427  			break;
1428  		default:
1429  			error = -EBUSY;
1430  			goto out;
1431  		}
1432  		wc = atomic_inc_return(&lkb->lkb_wait_count);
1433  		hold_lkb(lkb);
1434  
1435  		log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1436  			  lkb->lkb_id, lkb->lkb_wait_type, mstype, wc,
1437  			  dlm_iflags_val(lkb));
1438  		goto out;
1439  	}
1440  
1441  	wc = atomic_fetch_inc(&lkb->lkb_wait_count);
1442  	DLM_ASSERT(!wc, dlm_print_lkb(lkb); printk("wait_count %d\n", wc););
1443  	lkb->lkb_wait_type = mstype;
1444  	lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1445  	hold_lkb(lkb);
1446  	list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1447   out:
1448  	if (error)
1449  		log_error(ls, "addwait error %x %d flags %x %d %d %s",
1450  			  lkb->lkb_id, error, dlm_iflags_val(lkb), mstype,
1451  			  lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1452  	mutex_unlock(&ls->ls_waiters_mutex);
1453  	return error;
1454  }
1455  
1456  /* We clear the RESEND flag because we might be taking an lkb off the waiters
1457     list as part of process_requestqueue (e.g. a lookup that has an optimized
1458     request reply on the requestqueue) between dlm_recover_waiters_pre() which
1459     set RESEND and dlm_recover_waiters_post() */
1460  
_remove_from_waiters(struct dlm_lkb * lkb,int mstype,const struct dlm_message * ms)1461  static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1462  				const struct dlm_message *ms)
1463  {
1464  	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1465  	int overlap_done = 0;
1466  
1467  	if (mstype == DLM_MSG_UNLOCK_REPLY &&
1468  	    test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags)) {
1469  		log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1470  		overlap_done = 1;
1471  		goto out_del;
1472  	}
1473  
1474  	if (mstype == DLM_MSG_CANCEL_REPLY &&
1475  	    test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) {
1476  		log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1477  		overlap_done = 1;
1478  		goto out_del;
1479  	}
1480  
1481  	/* Cancel state was preemptively cleared by a successful convert,
1482  	   see next comment, nothing to do. */
1483  
1484  	if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1485  	    (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1486  		log_debug(ls, "remwait %x cancel_reply wait_type %d",
1487  			  lkb->lkb_id, lkb->lkb_wait_type);
1488  		return -1;
1489  	}
1490  
1491  	/* Remove for the convert reply, and premptively remove for the
1492  	   cancel reply.  A convert has been granted while there's still
1493  	   an outstanding cancel on it (the cancel is moot and the result
1494  	   in the cancel reply should be 0).  We preempt the cancel reply
1495  	   because the app gets the convert result and then can follow up
1496  	   with another op, like convert.  This subsequent op would see the
1497  	   lingering state of the cancel and fail with -EBUSY. */
1498  
1499  	if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1500  	    (lkb->lkb_wait_type == DLM_MSG_CONVERT) && ms && !ms->m_result &&
1501  	    test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) {
1502  		log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1503  			  lkb->lkb_id);
1504  		lkb->lkb_wait_type = 0;
1505  		atomic_dec(&lkb->lkb_wait_count);
1506  		unhold_lkb(lkb);
1507  		goto out_del;
1508  	}
1509  
1510  	/* N.B. type of reply may not always correspond to type of original
1511  	   msg due to lookup->request optimization, verify others? */
1512  
1513  	if (lkb->lkb_wait_type) {
1514  		lkb->lkb_wait_type = 0;
1515  		goto out_del;
1516  	}
1517  
1518  	log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1519  		  lkb->lkb_id, ms ? le32_to_cpu(ms->m_header.h_nodeid) : 0,
1520  		  lkb->lkb_remid, mstype, dlm_iflags_val(lkb));
1521  	return -1;
1522  
1523   out_del:
1524  	/* the force-unlock/cancel has completed and we haven't recvd a reply
1525  	   to the op that was in progress prior to the unlock/cancel; we
1526  	   give up on any reply to the earlier op.  FIXME: not sure when/how
1527  	   this would happen */
1528  
1529  	if (overlap_done && lkb->lkb_wait_type) {
1530  		log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1531  			  lkb->lkb_id, mstype, lkb->lkb_wait_type);
1532  		atomic_dec(&lkb->lkb_wait_count);
1533  		unhold_lkb(lkb);
1534  		lkb->lkb_wait_type = 0;
1535  	}
1536  
1537  	DLM_ASSERT(atomic_read(&lkb->lkb_wait_count), dlm_print_lkb(lkb););
1538  
1539  	clear_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags);
1540  	if (atomic_dec_and_test(&lkb->lkb_wait_count))
1541  		list_del_init(&lkb->lkb_wait_reply);
1542  	unhold_lkb(lkb);
1543  	return 0;
1544  }
1545  
remove_from_waiters(struct dlm_lkb * lkb,int mstype)1546  static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1547  {
1548  	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1549  	int error;
1550  
1551  	mutex_lock(&ls->ls_waiters_mutex);
1552  	error = _remove_from_waiters(lkb, mstype, NULL);
1553  	mutex_unlock(&ls->ls_waiters_mutex);
1554  	return error;
1555  }
1556  
1557  /* Handles situations where we might be processing a "fake" or "local" reply in
1558     which we can't try to take waiters_mutex again. */
1559  
remove_from_waiters_ms(struct dlm_lkb * lkb,const struct dlm_message * ms,bool local)1560  static int remove_from_waiters_ms(struct dlm_lkb *lkb,
1561  				  const struct dlm_message *ms, bool local)
1562  {
1563  	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1564  	int error;
1565  
1566  	if (!local)
1567  		mutex_lock(&ls->ls_waiters_mutex);
1568  	error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms);
1569  	if (!local)
1570  		mutex_unlock(&ls->ls_waiters_mutex);
1571  	return error;
1572  }
1573  
shrink_bucket(struct dlm_ls * ls,int b)1574  static void shrink_bucket(struct dlm_ls *ls, int b)
1575  {
1576  	struct rb_node *n, *next;
1577  	struct dlm_rsb *r;
1578  	char *name;
1579  	int our_nodeid = dlm_our_nodeid();
1580  	int remote_count = 0;
1581  	int need_shrink = 0;
1582  	int i, len, rv;
1583  
1584  	memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
1585  
1586  	spin_lock(&ls->ls_rsbtbl[b].lock);
1587  
1588  	if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags)) {
1589  		spin_unlock(&ls->ls_rsbtbl[b].lock);
1590  		return;
1591  	}
1592  
1593  	for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1594  		next = rb_next(n);
1595  		r = rb_entry(n, struct dlm_rsb, res_hashnode);
1596  
1597  		/* If we're the directory record for this rsb, and
1598  		   we're not the master of it, then we need to wait
1599  		   for the master node to send us a dir remove for
1600  		   before removing the dir record. */
1601  
1602  		if (!dlm_no_directory(ls) &&
1603  		    (r->res_master_nodeid != our_nodeid) &&
1604  		    (dlm_dir_nodeid(r) == our_nodeid)) {
1605  			continue;
1606  		}
1607  
1608  		need_shrink = 1;
1609  
1610  		if (!time_after_eq(jiffies, r->res_toss_time +
1611  				   dlm_config.ci_toss_secs * HZ)) {
1612  			continue;
1613  		}
1614  
1615  		if (!dlm_no_directory(ls) &&
1616  		    (r->res_master_nodeid == our_nodeid) &&
1617  		    (dlm_dir_nodeid(r) != our_nodeid)) {
1618  
1619  			/* We're the master of this rsb but we're not
1620  			   the directory record, so we need to tell the
1621  			   dir node to remove the dir record. */
1622  
1623  			ls->ls_remove_lens[remote_count] = r->res_length;
1624  			memcpy(ls->ls_remove_names[remote_count], r->res_name,
1625  			       DLM_RESNAME_MAXLEN);
1626  			remote_count++;
1627  
1628  			if (remote_count >= DLM_REMOVE_NAMES_MAX)
1629  				break;
1630  			continue;
1631  		}
1632  
1633  		if (!kref_put(&r->res_ref, kill_rsb)) {
1634  			log_error(ls, "tossed rsb in use %s", r->res_name);
1635  			continue;
1636  		}
1637  
1638  		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1639  		dlm_free_rsb(r);
1640  	}
1641  
1642  	if (need_shrink)
1643  		set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags);
1644  	else
1645  		clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags);
1646  	spin_unlock(&ls->ls_rsbtbl[b].lock);
1647  
1648  	/*
1649  	 * While searching for rsb's to free, we found some that require
1650  	 * remote removal.  We leave them in place and find them again here
1651  	 * so there is a very small gap between removing them from the toss
1652  	 * list and sending the removal.  Keeping this gap small is
1653  	 * important to keep us (the master node) from being out of sync
1654  	 * with the remote dir node for very long.
1655  	 */
1656  
1657  	for (i = 0; i < remote_count; i++) {
1658  		name = ls->ls_remove_names[i];
1659  		len = ls->ls_remove_lens[i];
1660  
1661  		spin_lock(&ls->ls_rsbtbl[b].lock);
1662  		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1663  		if (rv) {
1664  			spin_unlock(&ls->ls_rsbtbl[b].lock);
1665  			log_debug(ls, "remove_name not toss %s", name);
1666  			continue;
1667  		}
1668  
1669  		if (r->res_master_nodeid != our_nodeid) {
1670  			spin_unlock(&ls->ls_rsbtbl[b].lock);
1671  			log_debug(ls, "remove_name master %d dir %d our %d %s",
1672  				  r->res_master_nodeid, r->res_dir_nodeid,
1673  				  our_nodeid, name);
1674  			continue;
1675  		}
1676  
1677  		if (r->res_dir_nodeid == our_nodeid) {
1678  			/* should never happen */
1679  			spin_unlock(&ls->ls_rsbtbl[b].lock);
1680  			log_error(ls, "remove_name dir %d master %d our %d %s",
1681  				  r->res_dir_nodeid, r->res_master_nodeid,
1682  				  our_nodeid, name);
1683  			continue;
1684  		}
1685  
1686  		if (!time_after_eq(jiffies, r->res_toss_time +
1687  				   dlm_config.ci_toss_secs * HZ)) {
1688  			spin_unlock(&ls->ls_rsbtbl[b].lock);
1689  			log_debug(ls, "remove_name toss_time %lu now %lu %s",
1690  				  r->res_toss_time, jiffies, name);
1691  			continue;
1692  		}
1693  
1694  		if (!kref_put(&r->res_ref, kill_rsb)) {
1695  			spin_unlock(&ls->ls_rsbtbl[b].lock);
1696  			log_error(ls, "remove_name in use %s", name);
1697  			continue;
1698  		}
1699  
1700  		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1701  		send_remove(r);
1702  		spin_unlock(&ls->ls_rsbtbl[b].lock);
1703  
1704  		dlm_free_rsb(r);
1705  	}
1706  }
1707  
dlm_scan_rsbs(struct dlm_ls * ls)1708  void dlm_scan_rsbs(struct dlm_ls *ls)
1709  {
1710  	int i;
1711  
1712  	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1713  		shrink_bucket(ls, i);
1714  		if (dlm_locking_stopped(ls))
1715  			break;
1716  		cond_resched();
1717  	}
1718  }
1719  
1720  /* lkb is master or local copy */
1721  
set_lvb_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)1722  static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1723  {
1724  	int b, len = r->res_ls->ls_lvblen;
1725  
1726  	/* b=1 lvb returned to caller
1727  	   b=0 lvb written to rsb or invalidated
1728  	   b=-1 do nothing */
1729  
1730  	b =  dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1731  
1732  	if (b == 1) {
1733  		if (!lkb->lkb_lvbptr)
1734  			return;
1735  
1736  		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1737  			return;
1738  
1739  		if (!r->res_lvbptr)
1740  			return;
1741  
1742  		memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1743  		lkb->lkb_lvbseq = r->res_lvbseq;
1744  
1745  	} else if (b == 0) {
1746  		if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1747  			rsb_set_flag(r, RSB_VALNOTVALID);
1748  			return;
1749  		}
1750  
1751  		if (!lkb->lkb_lvbptr)
1752  			return;
1753  
1754  		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1755  			return;
1756  
1757  		if (!r->res_lvbptr)
1758  			r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1759  
1760  		if (!r->res_lvbptr)
1761  			return;
1762  
1763  		memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1764  		r->res_lvbseq++;
1765  		lkb->lkb_lvbseq = r->res_lvbseq;
1766  		rsb_clear_flag(r, RSB_VALNOTVALID);
1767  	}
1768  
1769  	if (rsb_flag(r, RSB_VALNOTVALID))
1770  		set_bit(DLM_SBF_VALNOTVALID_BIT, &lkb->lkb_sbflags);
1771  }
1772  
set_lvb_unlock(struct dlm_rsb * r,struct dlm_lkb * lkb)1773  static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1774  {
1775  	if (lkb->lkb_grmode < DLM_LOCK_PW)
1776  		return;
1777  
1778  	if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1779  		rsb_set_flag(r, RSB_VALNOTVALID);
1780  		return;
1781  	}
1782  
1783  	if (!lkb->lkb_lvbptr)
1784  		return;
1785  
1786  	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1787  		return;
1788  
1789  	if (!r->res_lvbptr)
1790  		r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1791  
1792  	if (!r->res_lvbptr)
1793  		return;
1794  
1795  	memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
1796  	r->res_lvbseq++;
1797  	rsb_clear_flag(r, RSB_VALNOTVALID);
1798  }
1799  
1800  /* lkb is process copy (pc) */
1801  
set_lvb_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb,const struct dlm_message * ms)1802  static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1803  			    const struct dlm_message *ms)
1804  {
1805  	int b;
1806  
1807  	if (!lkb->lkb_lvbptr)
1808  		return;
1809  
1810  	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1811  		return;
1812  
1813  	b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1814  	if (b == 1) {
1815  		int len = receive_extralen(ms);
1816  		if (len > r->res_ls->ls_lvblen)
1817  			len = r->res_ls->ls_lvblen;
1818  		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
1819  		lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq);
1820  	}
1821  }
1822  
1823  /* Manipulate lkb's on rsb's convert/granted/waiting queues
1824     remove_lock -- used for unlock, removes lkb from granted
1825     revert_lock -- used for cancel, moves lkb from convert to granted
1826     grant_lock  -- used for request and convert, adds lkb to granted or
1827                    moves lkb from convert or waiting to granted
1828  
1829     Each of these is used for master or local copy lkb's.  There is
1830     also a _pc() variation used to make the corresponding change on
1831     a process copy (pc) lkb. */
1832  
_remove_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)1833  static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1834  {
1835  	del_lkb(r, lkb);
1836  	lkb->lkb_grmode = DLM_LOCK_IV;
1837  	/* this unhold undoes the original ref from create_lkb()
1838  	   so this leads to the lkb being freed */
1839  	unhold_lkb(lkb);
1840  }
1841  
remove_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)1842  static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1843  {
1844  	set_lvb_unlock(r, lkb);
1845  	_remove_lock(r, lkb);
1846  }
1847  
remove_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb)1848  static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1849  {
1850  	_remove_lock(r, lkb);
1851  }
1852  
1853  /* returns: 0 did nothing
1854  	    1 moved lock to granted
1855  	   -1 removed lock */
1856  
revert_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)1857  static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1858  {
1859  	int rv = 0;
1860  
1861  	lkb->lkb_rqmode = DLM_LOCK_IV;
1862  
1863  	switch (lkb->lkb_status) {
1864  	case DLM_LKSTS_GRANTED:
1865  		break;
1866  	case DLM_LKSTS_CONVERT:
1867  		move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1868  		rv = 1;
1869  		break;
1870  	case DLM_LKSTS_WAITING:
1871  		del_lkb(r, lkb);
1872  		lkb->lkb_grmode = DLM_LOCK_IV;
1873  		/* this unhold undoes the original ref from create_lkb()
1874  		   so this leads to the lkb being freed */
1875  		unhold_lkb(lkb);
1876  		rv = -1;
1877  		break;
1878  	default:
1879  		log_print("invalid status for revert %d", lkb->lkb_status);
1880  	}
1881  	return rv;
1882  }
1883  
revert_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb)1884  static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1885  {
1886  	return revert_lock(r, lkb);
1887  }
1888  
_grant_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)1889  static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1890  {
1891  	if (lkb->lkb_grmode != lkb->lkb_rqmode) {
1892  		lkb->lkb_grmode = lkb->lkb_rqmode;
1893  		if (lkb->lkb_status)
1894  			move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1895  		else
1896  			add_lkb(r, lkb, DLM_LKSTS_GRANTED);
1897  	}
1898  
1899  	lkb->lkb_rqmode = DLM_LOCK_IV;
1900  	lkb->lkb_highbast = 0;
1901  }
1902  
grant_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)1903  static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1904  {
1905  	set_lvb_lock(r, lkb);
1906  	_grant_lock(r, lkb);
1907  }
1908  
grant_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb,const struct dlm_message * ms)1909  static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1910  			  const struct dlm_message *ms)
1911  {
1912  	set_lvb_lock_pc(r, lkb, ms);
1913  	_grant_lock(r, lkb);
1914  }
1915  
1916  /* called by grant_pending_locks() which means an async grant message must
1917     be sent to the requesting node in addition to granting the lock if the
1918     lkb belongs to a remote node. */
1919  
grant_lock_pending(struct dlm_rsb * r,struct dlm_lkb * lkb)1920  static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
1921  {
1922  	grant_lock(r, lkb);
1923  	if (is_master_copy(lkb))
1924  		send_grant(r, lkb);
1925  	else
1926  		queue_cast(r, lkb, 0);
1927  }
1928  
1929  /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
1930     change the granted/requested modes.  We're munging things accordingly in
1931     the process copy.
1932     CONVDEADLK: our grmode may have been forced down to NL to resolve a
1933     conversion deadlock
1934     ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
1935     compatible with other granted locks */
1936  
munge_demoted(struct dlm_lkb * lkb)1937  static void munge_demoted(struct dlm_lkb *lkb)
1938  {
1939  	if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
1940  		log_print("munge_demoted %x invalid modes gr %d rq %d",
1941  			  lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
1942  		return;
1943  	}
1944  
1945  	lkb->lkb_grmode = DLM_LOCK_NL;
1946  }
1947  
munge_altmode(struct dlm_lkb * lkb,const struct dlm_message * ms)1948  static void munge_altmode(struct dlm_lkb *lkb, const struct dlm_message *ms)
1949  {
1950  	if (ms->m_type != cpu_to_le32(DLM_MSG_REQUEST_REPLY) &&
1951  	    ms->m_type != cpu_to_le32(DLM_MSG_GRANT)) {
1952  		log_print("munge_altmode %x invalid reply type %d",
1953  			  lkb->lkb_id, le32_to_cpu(ms->m_type));
1954  		return;
1955  	}
1956  
1957  	if (lkb->lkb_exflags & DLM_LKF_ALTPR)
1958  		lkb->lkb_rqmode = DLM_LOCK_PR;
1959  	else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
1960  		lkb->lkb_rqmode = DLM_LOCK_CW;
1961  	else {
1962  		log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
1963  		dlm_print_lkb(lkb);
1964  	}
1965  }
1966  
first_in_list(struct dlm_lkb * lkb,struct list_head * head)1967  static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
1968  {
1969  	struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
1970  					   lkb_statequeue);
1971  	if (lkb->lkb_id == first->lkb_id)
1972  		return 1;
1973  
1974  	return 0;
1975  }
1976  
1977  /* Check if the given lkb conflicts with another lkb on the queue. */
1978  
queue_conflict(struct list_head * head,struct dlm_lkb * lkb)1979  static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
1980  {
1981  	struct dlm_lkb *this;
1982  
1983  	list_for_each_entry(this, head, lkb_statequeue) {
1984  		if (this == lkb)
1985  			continue;
1986  		if (!modes_compat(this, lkb))
1987  			return 1;
1988  	}
1989  	return 0;
1990  }
1991  
1992  /*
1993   * "A conversion deadlock arises with a pair of lock requests in the converting
1994   * queue for one resource.  The granted mode of each lock blocks the requested
1995   * mode of the other lock."
1996   *
1997   * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
1998   * convert queue from being granted, then deadlk/demote lkb.
1999   *
2000   * Example:
2001   * Granted Queue: empty
2002   * Convert Queue: NL->EX (first lock)
2003   *                PR->EX (second lock)
2004   *
2005   * The first lock can't be granted because of the granted mode of the second
2006   * lock and the second lock can't be granted because it's not first in the
2007   * list.  We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2008   * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2009   * flag set and return DEMOTED in the lksb flags.
2010   *
2011   * Originally, this function detected conv-deadlk in a more limited scope:
2012   * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2013   * - if lkb1 was the first entry in the queue (not just earlier), and was
2014   *   blocked by the granted mode of lkb2, and there was nothing on the
2015   *   granted queue preventing lkb1 from being granted immediately, i.e.
2016   *   lkb2 was the only thing preventing lkb1 from being granted.
2017   *
2018   * That second condition meant we'd only say there was conv-deadlk if
2019   * resolving it (by demotion) would lead to the first lock on the convert
2020   * queue being granted right away.  It allowed conversion deadlocks to exist
2021   * between locks on the convert queue while they couldn't be granted anyway.
2022   *
2023   * Now, we detect and take action on conversion deadlocks immediately when
2024   * they're created, even if they may not be immediately consequential.  If
2025   * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2026   * mode that would prevent lkb1's conversion from being granted, we do a
2027   * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2028   * I think this means that the lkb_is_ahead condition below should always
2029   * be zero, i.e. there will never be conv-deadlk between two locks that are
2030   * both already on the convert queue.
2031   */
2032  
conversion_deadlock_detect(struct dlm_rsb * r,struct dlm_lkb * lkb2)2033  static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2034  {
2035  	struct dlm_lkb *lkb1;
2036  	int lkb_is_ahead = 0;
2037  
2038  	list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2039  		if (lkb1 == lkb2) {
2040  			lkb_is_ahead = 1;
2041  			continue;
2042  		}
2043  
2044  		if (!lkb_is_ahead) {
2045  			if (!modes_compat(lkb2, lkb1))
2046  				return 1;
2047  		} else {
2048  			if (!modes_compat(lkb2, lkb1) &&
2049  			    !modes_compat(lkb1, lkb2))
2050  				return 1;
2051  		}
2052  	}
2053  	return 0;
2054  }
2055  
2056  /*
2057   * Return 1 if the lock can be granted, 0 otherwise.
2058   * Also detect and resolve conversion deadlocks.
2059   *
2060   * lkb is the lock to be granted
2061   *
2062   * now is 1 if the function is being called in the context of the
2063   * immediate request, it is 0 if called later, after the lock has been
2064   * queued.
2065   *
2066   * recover is 1 if dlm_recover_grant() is trying to grant conversions
2067   * after recovery.
2068   *
2069   * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2070   */
2071  
_can_be_granted(struct dlm_rsb * r,struct dlm_lkb * lkb,int now,int recover)2072  static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2073  			   int recover)
2074  {
2075  	int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2076  
2077  	/*
2078  	 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2079  	 * a new request for a NL mode lock being blocked.
2080  	 *
2081  	 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2082  	 * request, then it would be granted.  In essence, the use of this flag
2083  	 * tells the Lock Manager to expedite theis request by not considering
2084  	 * what may be in the CONVERTING or WAITING queues...  As of this
2085  	 * writing, the EXPEDITE flag can be used only with new requests for NL
2086  	 * mode locks.  This flag is not valid for conversion requests.
2087  	 *
2088  	 * A shortcut.  Earlier checks return an error if EXPEDITE is used in a
2089  	 * conversion or used with a non-NL requested mode.  We also know an
2090  	 * EXPEDITE request is always granted immediately, so now must always
2091  	 * be 1.  The full condition to grant an expedite request: (now &&
2092  	 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2093  	 * therefore be shortened to just checking the flag.
2094  	 */
2095  
2096  	if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2097  		return 1;
2098  
2099  	/*
2100  	 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2101  	 * added to the remaining conditions.
2102  	 */
2103  
2104  	if (queue_conflict(&r->res_grantqueue, lkb))
2105  		return 0;
2106  
2107  	/*
2108  	 * 6-3: By default, a conversion request is immediately granted if the
2109  	 * requested mode is compatible with the modes of all other granted
2110  	 * locks
2111  	 */
2112  
2113  	if (queue_conflict(&r->res_convertqueue, lkb))
2114  		return 0;
2115  
2116  	/*
2117  	 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2118  	 * locks for a recovered rsb, on which lkb's have been rebuilt.
2119  	 * The lkb's may have been rebuilt on the queues in a different
2120  	 * order than they were in on the previous master.  So, granting
2121  	 * queued conversions in order after recovery doesn't make sense
2122  	 * since the order hasn't been preserved anyway.  The new order
2123  	 * could also have created a new "in place" conversion deadlock.
2124  	 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2125  	 * After recovery, there would be no granted locks, and possibly
2126  	 * NL->EX, PR->EX, an in-place conversion deadlock.)  So, after
2127  	 * recovery, grant conversions without considering order.
2128  	 */
2129  
2130  	if (conv && recover)
2131  		return 1;
2132  
2133  	/*
2134  	 * 6-5: But the default algorithm for deciding whether to grant or
2135  	 * queue conversion requests does not by itself guarantee that such
2136  	 * requests are serviced on a "first come first serve" basis.  This, in
2137  	 * turn, can lead to a phenomenon known as "indefinate postponement".
2138  	 *
2139  	 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2140  	 * the system service employed to request a lock conversion.  This flag
2141  	 * forces certain conversion requests to be queued, even if they are
2142  	 * compatible with the granted modes of other locks on the same
2143  	 * resource.  Thus, the use of this flag results in conversion requests
2144  	 * being ordered on a "first come first servce" basis.
2145  	 *
2146  	 * DCT: This condition is all about new conversions being able to occur
2147  	 * "in place" while the lock remains on the granted queue (assuming
2148  	 * nothing else conflicts.)  IOW if QUECVT isn't set, a conversion
2149  	 * doesn't _have_ to go onto the convert queue where it's processed in
2150  	 * order.  The "now" variable is necessary to distinguish converts
2151  	 * being received and processed for the first time now, because once a
2152  	 * convert is moved to the conversion queue the condition below applies
2153  	 * requiring fifo granting.
2154  	 */
2155  
2156  	if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2157  		return 1;
2158  
2159  	/*
2160  	 * Even if the convert is compat with all granted locks,
2161  	 * QUECVT forces it behind other locks on the convert queue.
2162  	 */
2163  
2164  	if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2165  		if (list_empty(&r->res_convertqueue))
2166  			return 1;
2167  		else
2168  			return 0;
2169  	}
2170  
2171  	/*
2172  	 * The NOORDER flag is set to avoid the standard vms rules on grant
2173  	 * order.
2174  	 */
2175  
2176  	if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2177  		return 1;
2178  
2179  	/*
2180  	 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2181  	 * granted until all other conversion requests ahead of it are granted
2182  	 * and/or canceled.
2183  	 */
2184  
2185  	if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2186  		return 1;
2187  
2188  	/*
2189  	 * 6-4: By default, a new request is immediately granted only if all
2190  	 * three of the following conditions are satisfied when the request is
2191  	 * issued:
2192  	 * - The queue of ungranted conversion requests for the resource is
2193  	 *   empty.
2194  	 * - The queue of ungranted new requests for the resource is empty.
2195  	 * - The mode of the new request is compatible with the most
2196  	 *   restrictive mode of all granted locks on the resource.
2197  	 */
2198  
2199  	if (now && !conv && list_empty(&r->res_convertqueue) &&
2200  	    list_empty(&r->res_waitqueue))
2201  		return 1;
2202  
2203  	/*
2204  	 * 6-4: Once a lock request is in the queue of ungranted new requests,
2205  	 * it cannot be granted until the queue of ungranted conversion
2206  	 * requests is empty, all ungranted new requests ahead of it are
2207  	 * granted and/or canceled, and it is compatible with the granted mode
2208  	 * of the most restrictive lock granted on the resource.
2209  	 */
2210  
2211  	if (!now && !conv && list_empty(&r->res_convertqueue) &&
2212  	    first_in_list(lkb, &r->res_waitqueue))
2213  		return 1;
2214  
2215  	return 0;
2216  }
2217  
can_be_granted(struct dlm_rsb * r,struct dlm_lkb * lkb,int now,int recover,int * err)2218  static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2219  			  int recover, int *err)
2220  {
2221  	int rv;
2222  	int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2223  	int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2224  
2225  	if (err)
2226  		*err = 0;
2227  
2228  	rv = _can_be_granted(r, lkb, now, recover);
2229  	if (rv)
2230  		goto out;
2231  
2232  	/*
2233  	 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2234  	 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2235  	 * cancels one of the locks.
2236  	 */
2237  
2238  	if (is_convert && can_be_queued(lkb) &&
2239  	    conversion_deadlock_detect(r, lkb)) {
2240  		if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2241  			lkb->lkb_grmode = DLM_LOCK_NL;
2242  			set_bit(DLM_SBF_DEMOTED_BIT, &lkb->lkb_sbflags);
2243  		} else if (err) {
2244  			*err = -EDEADLK;
2245  		} else {
2246  			log_print("can_be_granted deadlock %x now %d",
2247  				  lkb->lkb_id, now);
2248  			dlm_dump_rsb(r);
2249  		}
2250  		goto out;
2251  	}
2252  
2253  	/*
2254  	 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2255  	 * to grant a request in a mode other than the normal rqmode.  It's a
2256  	 * simple way to provide a big optimization to applications that can
2257  	 * use them.
2258  	 */
2259  
2260  	if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2261  		alt = DLM_LOCK_PR;
2262  	else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2263  		alt = DLM_LOCK_CW;
2264  
2265  	if (alt) {
2266  		lkb->lkb_rqmode = alt;
2267  		rv = _can_be_granted(r, lkb, now, 0);
2268  		if (rv)
2269  			set_bit(DLM_SBF_ALTMODE_BIT, &lkb->lkb_sbflags);
2270  		else
2271  			lkb->lkb_rqmode = rqmode;
2272  	}
2273   out:
2274  	return rv;
2275  }
2276  
2277  /* Returns the highest requested mode of all blocked conversions; sets
2278     cw if there's a blocked conversion to DLM_LOCK_CW. */
2279  
grant_pending_convert(struct dlm_rsb * r,int high,int * cw,unsigned int * count)2280  static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2281  				 unsigned int *count)
2282  {
2283  	struct dlm_lkb *lkb, *s;
2284  	int recover = rsb_flag(r, RSB_RECOVER_GRANT);
2285  	int hi, demoted, quit, grant_restart, demote_restart;
2286  	int deadlk;
2287  
2288  	quit = 0;
2289   restart:
2290  	grant_restart = 0;
2291  	demote_restart = 0;
2292  	hi = DLM_LOCK_IV;
2293  
2294  	list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2295  		demoted = is_demoted(lkb);
2296  		deadlk = 0;
2297  
2298  		if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
2299  			grant_lock_pending(r, lkb);
2300  			grant_restart = 1;
2301  			if (count)
2302  				(*count)++;
2303  			continue;
2304  		}
2305  
2306  		if (!demoted && is_demoted(lkb)) {
2307  			log_print("WARN: pending demoted %x node %d %s",
2308  				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2309  			demote_restart = 1;
2310  			continue;
2311  		}
2312  
2313  		if (deadlk) {
2314  			/*
2315  			 * If DLM_LKB_NODLKWT flag is set and conversion
2316  			 * deadlock is detected, we request blocking AST and
2317  			 * down (or cancel) conversion.
2318  			 */
2319  			if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) {
2320  				if (lkb->lkb_highbast < lkb->lkb_rqmode) {
2321  					queue_bast(r, lkb, lkb->lkb_rqmode);
2322  					lkb->lkb_highbast = lkb->lkb_rqmode;
2323  				}
2324  			} else {
2325  				log_print("WARN: pending deadlock %x node %d %s",
2326  					  lkb->lkb_id, lkb->lkb_nodeid,
2327  					  r->res_name);
2328  				dlm_dump_rsb(r);
2329  			}
2330  			continue;
2331  		}
2332  
2333  		hi = max_t(int, lkb->lkb_rqmode, hi);
2334  
2335  		if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2336  			*cw = 1;
2337  	}
2338  
2339  	if (grant_restart)
2340  		goto restart;
2341  	if (demote_restart && !quit) {
2342  		quit = 1;
2343  		goto restart;
2344  	}
2345  
2346  	return max_t(int, high, hi);
2347  }
2348  
grant_pending_wait(struct dlm_rsb * r,int high,int * cw,unsigned int * count)2349  static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2350  			      unsigned int *count)
2351  {
2352  	struct dlm_lkb *lkb, *s;
2353  
2354  	list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2355  		if (can_be_granted(r, lkb, 0, 0, NULL)) {
2356  			grant_lock_pending(r, lkb);
2357  			if (count)
2358  				(*count)++;
2359  		} else {
2360  			high = max_t(int, lkb->lkb_rqmode, high);
2361  			if (lkb->lkb_rqmode == DLM_LOCK_CW)
2362  				*cw = 1;
2363  		}
2364  	}
2365  
2366  	return high;
2367  }
2368  
2369  /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2370     on either the convert or waiting queue.
2371     high is the largest rqmode of all locks blocked on the convert or
2372     waiting queue. */
2373  
lock_requires_bast(struct dlm_lkb * gr,int high,int cw)2374  static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2375  {
2376  	if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2377  		if (gr->lkb_highbast < DLM_LOCK_EX)
2378  			return 1;
2379  		return 0;
2380  	}
2381  
2382  	if (gr->lkb_highbast < high &&
2383  	    !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2384  		return 1;
2385  	return 0;
2386  }
2387  
grant_pending_locks(struct dlm_rsb * r,unsigned int * count)2388  static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2389  {
2390  	struct dlm_lkb *lkb, *s;
2391  	int high = DLM_LOCK_IV;
2392  	int cw = 0;
2393  
2394  	if (!is_master(r)) {
2395  		log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2396  		dlm_dump_rsb(r);
2397  		return;
2398  	}
2399  
2400  	high = grant_pending_convert(r, high, &cw, count);
2401  	high = grant_pending_wait(r, high, &cw, count);
2402  
2403  	if (high == DLM_LOCK_IV)
2404  		return;
2405  
2406  	/*
2407  	 * If there are locks left on the wait/convert queue then send blocking
2408  	 * ASTs to granted locks based on the largest requested mode (high)
2409  	 * found above.
2410  	 */
2411  
2412  	list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2413  		if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2414  			if (cw && high == DLM_LOCK_PR &&
2415  			    lkb->lkb_grmode == DLM_LOCK_PR)
2416  				queue_bast(r, lkb, DLM_LOCK_CW);
2417  			else
2418  				queue_bast(r, lkb, high);
2419  			lkb->lkb_highbast = high;
2420  		}
2421  	}
2422  }
2423  
modes_require_bast(struct dlm_lkb * gr,struct dlm_lkb * rq)2424  static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2425  {
2426  	if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2427  	    (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2428  		if (gr->lkb_highbast < DLM_LOCK_EX)
2429  			return 1;
2430  		return 0;
2431  	}
2432  
2433  	if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2434  		return 1;
2435  	return 0;
2436  }
2437  
send_bast_queue(struct dlm_rsb * r,struct list_head * head,struct dlm_lkb * lkb)2438  static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2439  			    struct dlm_lkb *lkb)
2440  {
2441  	struct dlm_lkb *gr;
2442  
2443  	list_for_each_entry(gr, head, lkb_statequeue) {
2444  		/* skip self when sending basts to convertqueue */
2445  		if (gr == lkb)
2446  			continue;
2447  		if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2448  			queue_bast(r, gr, lkb->lkb_rqmode);
2449  			gr->lkb_highbast = lkb->lkb_rqmode;
2450  		}
2451  	}
2452  }
2453  
send_blocking_asts(struct dlm_rsb * r,struct dlm_lkb * lkb)2454  static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2455  {
2456  	send_bast_queue(r, &r->res_grantqueue, lkb);
2457  }
2458  
send_blocking_asts_all(struct dlm_rsb * r,struct dlm_lkb * lkb)2459  static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2460  {
2461  	send_bast_queue(r, &r->res_grantqueue, lkb);
2462  	send_bast_queue(r, &r->res_convertqueue, lkb);
2463  }
2464  
2465  /* set_master(r, lkb) -- set the master nodeid of a resource
2466  
2467     The purpose of this function is to set the nodeid field in the given
2468     lkb using the nodeid field in the given rsb.  If the rsb's nodeid is
2469     known, it can just be copied to the lkb and the function will return
2470     0.  If the rsb's nodeid is _not_ known, it needs to be looked up
2471     before it can be copied to the lkb.
2472  
2473     When the rsb nodeid is being looked up remotely, the initial lkb
2474     causing the lookup is kept on the ls_waiters list waiting for the
2475     lookup reply.  Other lkb's waiting for the same rsb lookup are kept
2476     on the rsb's res_lookup list until the master is verified.
2477  
2478     Return values:
2479     0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2480     1: the rsb master is not available and the lkb has been placed on
2481        a wait queue
2482  */
2483  
set_master(struct dlm_rsb * r,struct dlm_lkb * lkb)2484  static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2485  {
2486  	int our_nodeid = dlm_our_nodeid();
2487  
2488  	if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2489  		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2490  		r->res_first_lkid = lkb->lkb_id;
2491  		lkb->lkb_nodeid = r->res_nodeid;
2492  		return 0;
2493  	}
2494  
2495  	if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2496  		list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2497  		return 1;
2498  	}
2499  
2500  	if (r->res_master_nodeid == our_nodeid) {
2501  		lkb->lkb_nodeid = 0;
2502  		return 0;
2503  	}
2504  
2505  	if (r->res_master_nodeid) {
2506  		lkb->lkb_nodeid = r->res_master_nodeid;
2507  		return 0;
2508  	}
2509  
2510  	if (dlm_dir_nodeid(r) == our_nodeid) {
2511  		/* This is a somewhat unusual case; find_rsb will usually
2512  		   have set res_master_nodeid when dir nodeid is local, but
2513  		   there are cases where we become the dir node after we've
2514  		   past find_rsb and go through _request_lock again.
2515  		   confirm_master() or process_lookup_list() needs to be
2516  		   called after this. */
2517  		log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2518  			  lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2519  			  r->res_name);
2520  		r->res_master_nodeid = our_nodeid;
2521  		r->res_nodeid = 0;
2522  		lkb->lkb_nodeid = 0;
2523  		return 0;
2524  	}
2525  
2526  	r->res_first_lkid = lkb->lkb_id;
2527  	send_lookup(r, lkb);
2528  	return 1;
2529  }
2530  
process_lookup_list(struct dlm_rsb * r)2531  static void process_lookup_list(struct dlm_rsb *r)
2532  {
2533  	struct dlm_lkb *lkb, *safe;
2534  
2535  	list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2536  		list_del_init(&lkb->lkb_rsb_lookup);
2537  		_request_lock(r, lkb);
2538  		schedule();
2539  	}
2540  }
2541  
2542  /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2543  
confirm_master(struct dlm_rsb * r,int error)2544  static void confirm_master(struct dlm_rsb *r, int error)
2545  {
2546  	struct dlm_lkb *lkb;
2547  
2548  	if (!r->res_first_lkid)
2549  		return;
2550  
2551  	switch (error) {
2552  	case 0:
2553  	case -EINPROGRESS:
2554  		r->res_first_lkid = 0;
2555  		process_lookup_list(r);
2556  		break;
2557  
2558  	case -EAGAIN:
2559  	case -EBADR:
2560  	case -ENOTBLK:
2561  		/* the remote request failed and won't be retried (it was
2562  		   a NOQUEUE, or has been canceled/unlocked); make a waiting
2563  		   lkb the first_lkid */
2564  
2565  		r->res_first_lkid = 0;
2566  
2567  		if (!list_empty(&r->res_lookup)) {
2568  			lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2569  					 lkb_rsb_lookup);
2570  			list_del_init(&lkb->lkb_rsb_lookup);
2571  			r->res_first_lkid = lkb->lkb_id;
2572  			_request_lock(r, lkb);
2573  		}
2574  		break;
2575  
2576  	default:
2577  		log_error(r->res_ls, "confirm_master unknown error %d", error);
2578  	}
2579  }
2580  
set_lock_args(int mode,struct dlm_lksb * lksb,uint32_t flags,int namelen,void (* ast)(void * astparam),void * astparam,void (* bast)(void * astparam,int mode),struct dlm_args * args)2581  static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2582  			 int namelen, void (*ast)(void *astparam),
2583  			 void *astparam,
2584  			 void (*bast)(void *astparam, int mode),
2585  			 struct dlm_args *args)
2586  {
2587  	int rv = -EINVAL;
2588  
2589  	/* check for invalid arg usage */
2590  
2591  	if (mode < 0 || mode > DLM_LOCK_EX)
2592  		goto out;
2593  
2594  	if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2595  		goto out;
2596  
2597  	if (flags & DLM_LKF_CANCEL)
2598  		goto out;
2599  
2600  	if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2601  		goto out;
2602  
2603  	if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2604  		goto out;
2605  
2606  	if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2607  		goto out;
2608  
2609  	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2610  		goto out;
2611  
2612  	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2613  		goto out;
2614  
2615  	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2616  		goto out;
2617  
2618  	if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2619  		goto out;
2620  
2621  	if (!ast || !lksb)
2622  		goto out;
2623  
2624  	if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2625  		goto out;
2626  
2627  	if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2628  		goto out;
2629  
2630  	/* these args will be copied to the lkb in validate_lock_args,
2631  	   it cannot be done now because when converting locks, fields in
2632  	   an active lkb cannot be modified before locking the rsb */
2633  
2634  	args->flags = flags;
2635  	args->astfn = ast;
2636  	args->astparam = astparam;
2637  	args->bastfn = bast;
2638  	args->mode = mode;
2639  	args->lksb = lksb;
2640  	rv = 0;
2641   out:
2642  	return rv;
2643  }
2644  
set_unlock_args(uint32_t flags,void * astarg,struct dlm_args * args)2645  static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2646  {
2647  	if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2648   		      DLM_LKF_FORCEUNLOCK))
2649  		return -EINVAL;
2650  
2651  	if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2652  		return -EINVAL;
2653  
2654  	args->flags = flags;
2655  	args->astparam = astarg;
2656  	return 0;
2657  }
2658  
validate_lock_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)2659  static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2660  			      struct dlm_args *args)
2661  {
2662  	int rv = -EBUSY;
2663  
2664  	if (args->flags & DLM_LKF_CONVERT) {
2665  		if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2666  			goto out;
2667  
2668  		/* lock not allowed if there's any op in progress */
2669  		if (lkb->lkb_wait_type || atomic_read(&lkb->lkb_wait_count))
2670  			goto out;
2671  
2672  		if (is_overlap(lkb))
2673  			goto out;
2674  
2675  		rv = -EINVAL;
2676  		if (test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags))
2677  			goto out;
2678  
2679  		if (args->flags & DLM_LKF_QUECVT &&
2680  		    !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2681  			goto out;
2682  	}
2683  
2684  	lkb->lkb_exflags = args->flags;
2685  	dlm_set_sbflags_val(lkb, 0);
2686  	lkb->lkb_astfn = args->astfn;
2687  	lkb->lkb_astparam = args->astparam;
2688  	lkb->lkb_bastfn = args->bastfn;
2689  	lkb->lkb_rqmode = args->mode;
2690  	lkb->lkb_lksb = args->lksb;
2691  	lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2692  	lkb->lkb_ownpid = (int) current->pid;
2693  	rv = 0;
2694   out:
2695  	switch (rv) {
2696  	case 0:
2697  		break;
2698  	case -EINVAL:
2699  		/* annoy the user because dlm usage is wrong */
2700  		WARN_ON(1);
2701  		log_error(ls, "%s %d %x %x %x %d %d", __func__,
2702  			  rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags,
2703  			  lkb->lkb_status, lkb->lkb_wait_type);
2704  		break;
2705  	default:
2706  		log_debug(ls, "%s %d %x %x %x %d %d", __func__,
2707  			  rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags,
2708  			  lkb->lkb_status, lkb->lkb_wait_type);
2709  		break;
2710  	}
2711  
2712  	return rv;
2713  }
2714  
2715  /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2716     for success */
2717  
2718  /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2719     because there may be a lookup in progress and it's valid to do
2720     cancel/unlockf on it */
2721  
validate_unlock_args(struct dlm_lkb * lkb,struct dlm_args * args)2722  static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2723  {
2724  	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2725  	int rv = -EBUSY;
2726  
2727  	/* normal unlock not allowed if there's any op in progress */
2728  	if (!(args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) &&
2729  	    (lkb->lkb_wait_type || atomic_read(&lkb->lkb_wait_count)))
2730  		goto out;
2731  
2732  	/* an lkb may be waiting for an rsb lookup to complete where the
2733  	   lookup was initiated by another lock */
2734  
2735  	if (!list_empty(&lkb->lkb_rsb_lookup)) {
2736  		if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2737  			log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2738  			list_del_init(&lkb->lkb_rsb_lookup);
2739  			queue_cast(lkb->lkb_resource, lkb,
2740  				   args->flags & DLM_LKF_CANCEL ?
2741  				   -DLM_ECANCEL : -DLM_EUNLOCK);
2742  			unhold_lkb(lkb); /* undoes create_lkb() */
2743  		}
2744  		/* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2745  		goto out;
2746  	}
2747  
2748  	rv = -EINVAL;
2749  	if (test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) {
2750  		log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2751  		dlm_print_lkb(lkb);
2752  		goto out;
2753  	}
2754  
2755  	/* an lkb may still exist even though the lock is EOL'ed due to a
2756  	 * cancel, unlock or failed noqueue request; an app can't use these
2757  	 * locks; return same error as if the lkid had not been found at all
2758  	 */
2759  
2760  	if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) {
2761  		log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2762  		rv = -ENOENT;
2763  		goto out;
2764  	}
2765  
2766  	/* cancel not allowed with another cancel/unlock in progress */
2767  
2768  	if (args->flags & DLM_LKF_CANCEL) {
2769  		if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2770  			goto out;
2771  
2772  		if (is_overlap(lkb))
2773  			goto out;
2774  
2775  		if (test_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags)) {
2776  			set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
2777  			rv = -EBUSY;
2778  			goto out;
2779  		}
2780  
2781  		/* there's nothing to cancel */
2782  		if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2783  		    !lkb->lkb_wait_type) {
2784  			rv = -EBUSY;
2785  			goto out;
2786  		}
2787  
2788  		switch (lkb->lkb_wait_type) {
2789  		case DLM_MSG_LOOKUP:
2790  		case DLM_MSG_REQUEST:
2791  			set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
2792  			rv = -EBUSY;
2793  			goto out;
2794  		case DLM_MSG_UNLOCK:
2795  		case DLM_MSG_CANCEL:
2796  			goto out;
2797  		}
2798  		/* add_to_waiters() will set OVERLAP_CANCEL */
2799  		goto out_ok;
2800  	}
2801  
2802  	/* do we need to allow a force-unlock if there's a normal unlock
2803  	   already in progress?  in what conditions could the normal unlock
2804  	   fail such that we'd want to send a force-unlock to be sure? */
2805  
2806  	if (args->flags & DLM_LKF_FORCEUNLOCK) {
2807  		if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
2808  			goto out;
2809  
2810  		if (is_overlap_unlock(lkb))
2811  			goto out;
2812  
2813  		if (test_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags)) {
2814  			set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags);
2815  			rv = -EBUSY;
2816  			goto out;
2817  		}
2818  
2819  		switch (lkb->lkb_wait_type) {
2820  		case DLM_MSG_LOOKUP:
2821  		case DLM_MSG_REQUEST:
2822  			set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags);
2823  			rv = -EBUSY;
2824  			goto out;
2825  		case DLM_MSG_UNLOCK:
2826  			goto out;
2827  		}
2828  		/* add_to_waiters() will set OVERLAP_UNLOCK */
2829  	}
2830  
2831   out_ok:
2832  	/* an overlapping op shouldn't blow away exflags from other op */
2833  	lkb->lkb_exflags |= args->flags;
2834  	dlm_set_sbflags_val(lkb, 0);
2835  	lkb->lkb_astparam = args->astparam;
2836  	rv = 0;
2837   out:
2838  	switch (rv) {
2839  	case 0:
2840  		break;
2841  	case -EINVAL:
2842  		/* annoy the user because dlm usage is wrong */
2843  		WARN_ON(1);
2844  		log_error(ls, "%s %d %x %x %x %x %d %s", __func__, rv,
2845  			  lkb->lkb_id, dlm_iflags_val(lkb), lkb->lkb_exflags,
2846  			  args->flags, lkb->lkb_wait_type,
2847  			  lkb->lkb_resource->res_name);
2848  		break;
2849  	default:
2850  		log_debug(ls, "%s %d %x %x %x %x %d %s", __func__, rv,
2851  			  lkb->lkb_id, dlm_iflags_val(lkb), lkb->lkb_exflags,
2852  			  args->flags, lkb->lkb_wait_type,
2853  			  lkb->lkb_resource->res_name);
2854  		break;
2855  	}
2856  
2857  	return rv;
2858  }
2859  
2860  /*
2861   * Four stage 4 varieties:
2862   * do_request(), do_convert(), do_unlock(), do_cancel()
2863   * These are called on the master node for the given lock and
2864   * from the central locking logic.
2865   */
2866  
do_request(struct dlm_rsb * r,struct dlm_lkb * lkb)2867  static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2868  {
2869  	int error = 0;
2870  
2871  	if (can_be_granted(r, lkb, 1, 0, NULL)) {
2872  		grant_lock(r, lkb);
2873  		queue_cast(r, lkb, 0);
2874  		goto out;
2875  	}
2876  
2877  	if (can_be_queued(lkb)) {
2878  		error = -EINPROGRESS;
2879  		add_lkb(r, lkb, DLM_LKSTS_WAITING);
2880  		goto out;
2881  	}
2882  
2883  	error = -EAGAIN;
2884  	queue_cast(r, lkb, -EAGAIN);
2885   out:
2886  	return error;
2887  }
2888  
do_request_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)2889  static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
2890  			       int error)
2891  {
2892  	switch (error) {
2893  	case -EAGAIN:
2894  		if (force_blocking_asts(lkb))
2895  			send_blocking_asts_all(r, lkb);
2896  		break;
2897  	case -EINPROGRESS:
2898  		send_blocking_asts(r, lkb);
2899  		break;
2900  	}
2901  }
2902  
do_convert(struct dlm_rsb * r,struct dlm_lkb * lkb)2903  static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2904  {
2905  	int error = 0;
2906  	int deadlk = 0;
2907  
2908  	/* changing an existing lock may allow others to be granted */
2909  
2910  	if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
2911  		grant_lock(r, lkb);
2912  		queue_cast(r, lkb, 0);
2913  		goto out;
2914  	}
2915  
2916  	/* can_be_granted() detected that this lock would block in a conversion
2917  	   deadlock, so we leave it on the granted queue and return EDEADLK in
2918  	   the ast for the convert. */
2919  
2920  	if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
2921  		/* it's left on the granted queue */
2922  		revert_lock(r, lkb);
2923  		queue_cast(r, lkb, -EDEADLK);
2924  		error = -EDEADLK;
2925  		goto out;
2926  	}
2927  
2928  	/* is_demoted() means the can_be_granted() above set the grmode
2929  	   to NL, and left us on the granted queue.  This auto-demotion
2930  	   (due to CONVDEADLK) might mean other locks, and/or this lock, are
2931  	   now grantable.  We have to try to grant other converting locks
2932  	   before we try again to grant this one. */
2933  
2934  	if (is_demoted(lkb)) {
2935  		grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
2936  		if (_can_be_granted(r, lkb, 1, 0)) {
2937  			grant_lock(r, lkb);
2938  			queue_cast(r, lkb, 0);
2939  			goto out;
2940  		}
2941  		/* else fall through and move to convert queue */
2942  	}
2943  
2944  	if (can_be_queued(lkb)) {
2945  		error = -EINPROGRESS;
2946  		del_lkb(r, lkb);
2947  		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
2948  		goto out;
2949  	}
2950  
2951  	error = -EAGAIN;
2952  	queue_cast(r, lkb, -EAGAIN);
2953   out:
2954  	return error;
2955  }
2956  
do_convert_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)2957  static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
2958  			       int error)
2959  {
2960  	switch (error) {
2961  	case 0:
2962  		grant_pending_locks(r, NULL);
2963  		/* grant_pending_locks also sends basts */
2964  		break;
2965  	case -EAGAIN:
2966  		if (force_blocking_asts(lkb))
2967  			send_blocking_asts_all(r, lkb);
2968  		break;
2969  	case -EINPROGRESS:
2970  		send_blocking_asts(r, lkb);
2971  		break;
2972  	}
2973  }
2974  
do_unlock(struct dlm_rsb * r,struct dlm_lkb * lkb)2975  static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2976  {
2977  	remove_lock(r, lkb);
2978  	queue_cast(r, lkb, -DLM_EUNLOCK);
2979  	return -DLM_EUNLOCK;
2980  }
2981  
do_unlock_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)2982  static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
2983  			      int error)
2984  {
2985  	grant_pending_locks(r, NULL);
2986  }
2987  
2988  /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
2989  
do_cancel(struct dlm_rsb * r,struct dlm_lkb * lkb)2990  static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2991  {
2992  	int error;
2993  
2994  	error = revert_lock(r, lkb);
2995  	if (error) {
2996  		queue_cast(r, lkb, -DLM_ECANCEL);
2997  		return -DLM_ECANCEL;
2998  	}
2999  	return 0;
3000  }
3001  
do_cancel_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3002  static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3003  			      int error)
3004  {
3005  	if (error)
3006  		grant_pending_locks(r, NULL);
3007  }
3008  
3009  /*
3010   * Four stage 3 varieties:
3011   * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3012   */
3013  
3014  /* add a new lkb to a possibly new rsb, called by requesting process */
3015  
_request_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3016  static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3017  {
3018  	int error;
3019  
3020  	/* set_master: sets lkb nodeid from r */
3021  
3022  	error = set_master(r, lkb);
3023  	if (error < 0)
3024  		goto out;
3025  	if (error) {
3026  		error = 0;
3027  		goto out;
3028  	}
3029  
3030  	if (is_remote(r)) {
3031  		/* receive_request() calls do_request() on remote node */
3032  		error = send_request(r, lkb);
3033  	} else {
3034  		error = do_request(r, lkb);
3035  		/* for remote locks the request_reply is sent
3036  		   between do_request and do_request_effects */
3037  		do_request_effects(r, lkb, error);
3038  	}
3039   out:
3040  	return error;
3041  }
3042  
3043  /* change some property of an existing lkb, e.g. mode */
3044  
_convert_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3045  static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3046  {
3047  	int error;
3048  
3049  	if (is_remote(r)) {
3050  		/* receive_convert() calls do_convert() on remote node */
3051  		error = send_convert(r, lkb);
3052  	} else {
3053  		error = do_convert(r, lkb);
3054  		/* for remote locks the convert_reply is sent
3055  		   between do_convert and do_convert_effects */
3056  		do_convert_effects(r, lkb, error);
3057  	}
3058  
3059  	return error;
3060  }
3061  
3062  /* remove an existing lkb from the granted queue */
3063  
_unlock_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3064  static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3065  {
3066  	int error;
3067  
3068  	if (is_remote(r)) {
3069  		/* receive_unlock() calls do_unlock() on remote node */
3070  		error = send_unlock(r, lkb);
3071  	} else {
3072  		error = do_unlock(r, lkb);
3073  		/* for remote locks the unlock_reply is sent
3074  		   between do_unlock and do_unlock_effects */
3075  		do_unlock_effects(r, lkb, error);
3076  	}
3077  
3078  	return error;
3079  }
3080  
3081  /* remove an existing lkb from the convert or wait queue */
3082  
_cancel_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3083  static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3084  {
3085  	int error;
3086  
3087  	if (is_remote(r)) {
3088  		/* receive_cancel() calls do_cancel() on remote node */
3089  		error = send_cancel(r, lkb);
3090  	} else {
3091  		error = do_cancel(r, lkb);
3092  		/* for remote locks the cancel_reply is sent
3093  		   between do_cancel and do_cancel_effects */
3094  		do_cancel_effects(r, lkb, error);
3095  	}
3096  
3097  	return error;
3098  }
3099  
3100  /*
3101   * Four stage 2 varieties:
3102   * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3103   */
3104  
request_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,const void * name,int len,struct dlm_args * args)3105  static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3106  			const void *name, int len,
3107  			struct dlm_args *args)
3108  {
3109  	struct dlm_rsb *r;
3110  	int error;
3111  
3112  	error = validate_lock_args(ls, lkb, args);
3113  	if (error)
3114  		return error;
3115  
3116  	error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3117  	if (error)
3118  		return error;
3119  
3120  	lock_rsb(r);
3121  
3122  	attach_lkb(r, lkb);
3123  	lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3124  
3125  	error = _request_lock(r, lkb);
3126  
3127  	unlock_rsb(r);
3128  	put_rsb(r);
3129  	return error;
3130  }
3131  
convert_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)3132  static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3133  			struct dlm_args *args)
3134  {
3135  	struct dlm_rsb *r;
3136  	int error;
3137  
3138  	r = lkb->lkb_resource;
3139  
3140  	hold_rsb(r);
3141  	lock_rsb(r);
3142  
3143  	error = validate_lock_args(ls, lkb, args);
3144  	if (error)
3145  		goto out;
3146  
3147  	error = _convert_lock(r, lkb);
3148   out:
3149  	unlock_rsb(r);
3150  	put_rsb(r);
3151  	return error;
3152  }
3153  
unlock_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)3154  static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3155  		       struct dlm_args *args)
3156  {
3157  	struct dlm_rsb *r;
3158  	int error;
3159  
3160  	r = lkb->lkb_resource;
3161  
3162  	hold_rsb(r);
3163  	lock_rsb(r);
3164  
3165  	error = validate_unlock_args(lkb, args);
3166  	if (error)
3167  		goto out;
3168  
3169  	error = _unlock_lock(r, lkb);
3170   out:
3171  	unlock_rsb(r);
3172  	put_rsb(r);
3173  	return error;
3174  }
3175  
cancel_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)3176  static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3177  		       struct dlm_args *args)
3178  {
3179  	struct dlm_rsb *r;
3180  	int error;
3181  
3182  	r = lkb->lkb_resource;
3183  
3184  	hold_rsb(r);
3185  	lock_rsb(r);
3186  
3187  	error = validate_unlock_args(lkb, args);
3188  	if (error)
3189  		goto out;
3190  
3191  	error = _cancel_lock(r, lkb);
3192   out:
3193  	unlock_rsb(r);
3194  	put_rsb(r);
3195  	return error;
3196  }
3197  
3198  /*
3199   * Two stage 1 varieties:  dlm_lock() and dlm_unlock()
3200   */
3201  
dlm_lock(dlm_lockspace_t * lockspace,int mode,struct dlm_lksb * lksb,uint32_t flags,const void * name,unsigned int namelen,uint32_t parent_lkid,void (* ast)(void * astarg),void * astarg,void (* bast)(void * astarg,int mode))3202  int dlm_lock(dlm_lockspace_t *lockspace,
3203  	     int mode,
3204  	     struct dlm_lksb *lksb,
3205  	     uint32_t flags,
3206  	     const void *name,
3207  	     unsigned int namelen,
3208  	     uint32_t parent_lkid,
3209  	     void (*ast) (void *astarg),
3210  	     void *astarg,
3211  	     void (*bast) (void *astarg, int mode))
3212  {
3213  	struct dlm_ls *ls;
3214  	struct dlm_lkb *lkb;
3215  	struct dlm_args args;
3216  	int error, convert = flags & DLM_LKF_CONVERT;
3217  
3218  	ls = dlm_find_lockspace_local(lockspace);
3219  	if (!ls)
3220  		return -EINVAL;
3221  
3222  	dlm_lock_recovery(ls);
3223  
3224  	if (convert)
3225  		error = find_lkb(ls, lksb->sb_lkid, &lkb);
3226  	else
3227  		error = create_lkb(ls, &lkb);
3228  
3229  	if (error)
3230  		goto out;
3231  
3232  	trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
3233  
3234  	error = set_lock_args(mode, lksb, flags, namelen, ast, astarg, bast,
3235  			      &args);
3236  	if (error)
3237  		goto out_put;
3238  
3239  	if (convert)
3240  		error = convert_lock(ls, lkb, &args);
3241  	else
3242  		error = request_lock(ls, lkb, name, namelen, &args);
3243  
3244  	if (error == -EINPROGRESS)
3245  		error = 0;
3246   out_put:
3247  	trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, true);
3248  
3249  	if (convert || error)
3250  		__put_lkb(ls, lkb);
3251  	if (error == -EAGAIN || error == -EDEADLK)
3252  		error = 0;
3253   out:
3254  	dlm_unlock_recovery(ls);
3255  	dlm_put_lockspace(ls);
3256  	return error;
3257  }
3258  
dlm_unlock(dlm_lockspace_t * lockspace,uint32_t lkid,uint32_t flags,struct dlm_lksb * lksb,void * astarg)3259  int dlm_unlock(dlm_lockspace_t *lockspace,
3260  	       uint32_t lkid,
3261  	       uint32_t flags,
3262  	       struct dlm_lksb *lksb,
3263  	       void *astarg)
3264  {
3265  	struct dlm_ls *ls;
3266  	struct dlm_lkb *lkb;
3267  	struct dlm_args args;
3268  	int error;
3269  
3270  	ls = dlm_find_lockspace_local(lockspace);
3271  	if (!ls)
3272  		return -EINVAL;
3273  
3274  	dlm_lock_recovery(ls);
3275  
3276  	error = find_lkb(ls, lkid, &lkb);
3277  	if (error)
3278  		goto out;
3279  
3280  	trace_dlm_unlock_start(ls, lkb, flags);
3281  
3282  	error = set_unlock_args(flags, astarg, &args);
3283  	if (error)
3284  		goto out_put;
3285  
3286  	if (flags & DLM_LKF_CANCEL)
3287  		error = cancel_lock(ls, lkb, &args);
3288  	else
3289  		error = unlock_lock(ls, lkb, &args);
3290  
3291  	if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3292  		error = 0;
3293  	if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3294  		error = 0;
3295   out_put:
3296  	trace_dlm_unlock_end(ls, lkb, flags, error);
3297  
3298  	dlm_put_lkb(lkb);
3299   out:
3300  	dlm_unlock_recovery(ls);
3301  	dlm_put_lockspace(ls);
3302  	return error;
3303  }
3304  
3305  /*
3306   * send/receive routines for remote operations and replies
3307   *
3308   * send_args
3309   * send_common
3310   * send_request			receive_request
3311   * send_convert			receive_convert
3312   * send_unlock			receive_unlock
3313   * send_cancel			receive_cancel
3314   * send_grant			receive_grant
3315   * send_bast			receive_bast
3316   * send_lookup			receive_lookup
3317   * send_remove			receive_remove
3318   *
3319   * 				send_common_reply
3320   * receive_request_reply	send_request_reply
3321   * receive_convert_reply	send_convert_reply
3322   * receive_unlock_reply		send_unlock_reply
3323   * receive_cancel_reply		send_cancel_reply
3324   * receive_lookup_reply		send_lookup_reply
3325   */
3326  
_create_message(struct dlm_ls * ls,int mb_len,int to_nodeid,int mstype,struct dlm_message ** ms_ret,struct dlm_mhandle ** mh_ret,gfp_t allocation)3327  static int _create_message(struct dlm_ls *ls, int mb_len,
3328  			   int to_nodeid, int mstype,
3329  			   struct dlm_message **ms_ret,
3330  			   struct dlm_mhandle **mh_ret,
3331  			   gfp_t allocation)
3332  {
3333  	struct dlm_message *ms;
3334  	struct dlm_mhandle *mh;
3335  	char *mb;
3336  
3337  	/* get_buffer gives us a message handle (mh) that we need to
3338  	   pass into midcomms_commit and a message buffer (mb) that we
3339  	   write our data into */
3340  
3341  	mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, allocation, &mb);
3342  	if (!mh)
3343  		return -ENOBUFS;
3344  
3345  	ms = (struct dlm_message *) mb;
3346  
3347  	ms->m_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3348  	ms->m_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id);
3349  	ms->m_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
3350  	ms->m_header.h_length = cpu_to_le16(mb_len);
3351  	ms->m_header.h_cmd = DLM_MSG;
3352  
3353  	ms->m_type = cpu_to_le32(mstype);
3354  
3355  	*mh_ret = mh;
3356  	*ms_ret = ms;
3357  	return 0;
3358  }
3359  
create_message(struct dlm_rsb * r,struct dlm_lkb * lkb,int to_nodeid,int mstype,struct dlm_message ** ms_ret,struct dlm_mhandle ** mh_ret,gfp_t allocation)3360  static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3361  			  int to_nodeid, int mstype,
3362  			  struct dlm_message **ms_ret,
3363  			  struct dlm_mhandle **mh_ret,
3364  			  gfp_t allocation)
3365  {
3366  	int mb_len = sizeof(struct dlm_message);
3367  
3368  	switch (mstype) {
3369  	case DLM_MSG_REQUEST:
3370  	case DLM_MSG_LOOKUP:
3371  	case DLM_MSG_REMOVE:
3372  		mb_len += r->res_length;
3373  		break;
3374  	case DLM_MSG_CONVERT:
3375  	case DLM_MSG_UNLOCK:
3376  	case DLM_MSG_REQUEST_REPLY:
3377  	case DLM_MSG_CONVERT_REPLY:
3378  	case DLM_MSG_GRANT:
3379  		if (lkb && lkb->lkb_lvbptr && (lkb->lkb_exflags & DLM_LKF_VALBLK))
3380  			mb_len += r->res_ls->ls_lvblen;
3381  		break;
3382  	}
3383  
3384  	return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3385  			       ms_ret, mh_ret, allocation);
3386  }
3387  
3388  /* further lowcomms enhancements or alternate implementations may make
3389     the return value from this function useful at some point */
3390  
send_message(struct dlm_mhandle * mh,struct dlm_message * ms,const void * name,int namelen)3391  static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms,
3392  			const void *name, int namelen)
3393  {
3394  	dlm_midcomms_commit_mhandle(mh, name, namelen);
3395  	return 0;
3396  }
3397  
send_args(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)3398  static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3399  		      struct dlm_message *ms)
3400  {
3401  	ms->m_nodeid   = cpu_to_le32(lkb->lkb_nodeid);
3402  	ms->m_pid      = cpu_to_le32(lkb->lkb_ownpid);
3403  	ms->m_lkid     = cpu_to_le32(lkb->lkb_id);
3404  	ms->m_remid    = cpu_to_le32(lkb->lkb_remid);
3405  	ms->m_exflags  = cpu_to_le32(lkb->lkb_exflags);
3406  	ms->m_sbflags  = cpu_to_le32(dlm_sbflags_val(lkb));
3407  	ms->m_flags    = cpu_to_le32(dlm_dflags_val(lkb));
3408  	ms->m_lvbseq   = cpu_to_le32(lkb->lkb_lvbseq);
3409  	ms->m_status   = cpu_to_le32(lkb->lkb_status);
3410  	ms->m_grmode   = cpu_to_le32(lkb->lkb_grmode);
3411  	ms->m_rqmode   = cpu_to_le32(lkb->lkb_rqmode);
3412  	ms->m_hash     = cpu_to_le32(r->res_hash);
3413  
3414  	/* m_result and m_bastmode are set from function args,
3415  	   not from lkb fields */
3416  
3417  	if (lkb->lkb_bastfn)
3418  		ms->m_asts |= cpu_to_le32(DLM_CB_BAST);
3419  	if (lkb->lkb_astfn)
3420  		ms->m_asts |= cpu_to_le32(DLM_CB_CAST);
3421  
3422  	/* compare with switch in create_message; send_remove() doesn't
3423  	   use send_args() */
3424  
3425  	switch (ms->m_type) {
3426  	case cpu_to_le32(DLM_MSG_REQUEST):
3427  	case cpu_to_le32(DLM_MSG_LOOKUP):
3428  		memcpy(ms->m_extra, r->res_name, r->res_length);
3429  		break;
3430  	case cpu_to_le32(DLM_MSG_CONVERT):
3431  	case cpu_to_le32(DLM_MSG_UNLOCK):
3432  	case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
3433  	case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
3434  	case cpu_to_le32(DLM_MSG_GRANT):
3435  		if (!lkb->lkb_lvbptr || !(lkb->lkb_exflags & DLM_LKF_VALBLK))
3436  			break;
3437  		memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3438  		break;
3439  	}
3440  }
3441  
send_common(struct dlm_rsb * r,struct dlm_lkb * lkb,int mstype)3442  static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3443  {
3444  	struct dlm_message *ms;
3445  	struct dlm_mhandle *mh;
3446  	int to_nodeid, error;
3447  
3448  	to_nodeid = r->res_nodeid;
3449  
3450  	error = add_to_waiters(lkb, mstype, to_nodeid);
3451  	if (error)
3452  		return error;
3453  
3454  	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh, GFP_NOFS);
3455  	if (error)
3456  		goto fail;
3457  
3458  	send_args(r, lkb, ms);
3459  
3460  	error = send_message(mh, ms, r->res_name, r->res_length);
3461  	if (error)
3462  		goto fail;
3463  	return 0;
3464  
3465   fail:
3466  	remove_from_waiters(lkb, msg_reply_type(mstype));
3467  	return error;
3468  }
3469  
send_request(struct dlm_rsb * r,struct dlm_lkb * lkb)3470  static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3471  {
3472  	return send_common(r, lkb, DLM_MSG_REQUEST);
3473  }
3474  
send_convert(struct dlm_rsb * r,struct dlm_lkb * lkb)3475  static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3476  {
3477  	int error;
3478  
3479  	error = send_common(r, lkb, DLM_MSG_CONVERT);
3480  
3481  	/* down conversions go without a reply from the master */
3482  	if (!error && down_conversion(lkb)) {
3483  		remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3484  		r->res_ls->ls_local_ms.m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY);
3485  		r->res_ls->ls_local_ms.m_result = 0;
3486  		__receive_convert_reply(r, lkb, &r->res_ls->ls_local_ms, true);
3487  	}
3488  
3489  	return error;
3490  }
3491  
3492  /* FIXME: if this lkb is the only lock we hold on the rsb, then set
3493     MASTER_UNCERTAIN to force the next request on the rsb to confirm
3494     that the master is still correct. */
3495  
send_unlock(struct dlm_rsb * r,struct dlm_lkb * lkb)3496  static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3497  {
3498  	return send_common(r, lkb, DLM_MSG_UNLOCK);
3499  }
3500  
send_cancel(struct dlm_rsb * r,struct dlm_lkb * lkb)3501  static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3502  {
3503  	return send_common(r, lkb, DLM_MSG_CANCEL);
3504  }
3505  
send_grant(struct dlm_rsb * r,struct dlm_lkb * lkb)3506  static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3507  {
3508  	struct dlm_message *ms;
3509  	struct dlm_mhandle *mh;
3510  	int to_nodeid, error;
3511  
3512  	to_nodeid = lkb->lkb_nodeid;
3513  
3514  	error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh,
3515  			       GFP_NOFS);
3516  	if (error)
3517  		goto out;
3518  
3519  	send_args(r, lkb, ms);
3520  
3521  	ms->m_result = 0;
3522  
3523  	error = send_message(mh, ms, r->res_name, r->res_length);
3524   out:
3525  	return error;
3526  }
3527  
send_bast(struct dlm_rsb * r,struct dlm_lkb * lkb,int mode)3528  static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3529  {
3530  	struct dlm_message *ms;
3531  	struct dlm_mhandle *mh;
3532  	int to_nodeid, error;
3533  
3534  	to_nodeid = lkb->lkb_nodeid;
3535  
3536  	error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh,
3537  			       GFP_NOFS);
3538  	if (error)
3539  		goto out;
3540  
3541  	send_args(r, lkb, ms);
3542  
3543  	ms->m_bastmode = cpu_to_le32(mode);
3544  
3545  	error = send_message(mh, ms, r->res_name, r->res_length);
3546   out:
3547  	return error;
3548  }
3549  
send_lookup(struct dlm_rsb * r,struct dlm_lkb * lkb)3550  static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3551  {
3552  	struct dlm_message *ms;
3553  	struct dlm_mhandle *mh;
3554  	int to_nodeid, error;
3555  
3556  	to_nodeid = dlm_dir_nodeid(r);
3557  
3558  	error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3559  	if (error)
3560  		return error;
3561  
3562  	error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh,
3563  			       GFP_NOFS);
3564  	if (error)
3565  		goto fail;
3566  
3567  	send_args(r, lkb, ms);
3568  
3569  	error = send_message(mh, ms, r->res_name, r->res_length);
3570  	if (error)
3571  		goto fail;
3572  	return 0;
3573  
3574   fail:
3575  	remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3576  	return error;
3577  }
3578  
send_remove(struct dlm_rsb * r)3579  static int send_remove(struct dlm_rsb *r)
3580  {
3581  	struct dlm_message *ms;
3582  	struct dlm_mhandle *mh;
3583  	int to_nodeid, error;
3584  
3585  	to_nodeid = dlm_dir_nodeid(r);
3586  
3587  	error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh,
3588  			       GFP_ATOMIC);
3589  	if (error)
3590  		goto out;
3591  
3592  	memcpy(ms->m_extra, r->res_name, r->res_length);
3593  	ms->m_hash = cpu_to_le32(r->res_hash);
3594  
3595  	error = send_message(mh, ms, r->res_name, r->res_length);
3596   out:
3597  	return error;
3598  }
3599  
send_common_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int mstype,int rv)3600  static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3601  			     int mstype, int rv)
3602  {
3603  	struct dlm_message *ms;
3604  	struct dlm_mhandle *mh;
3605  	int to_nodeid, error;
3606  
3607  	to_nodeid = lkb->lkb_nodeid;
3608  
3609  	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh, GFP_NOFS);
3610  	if (error)
3611  		goto out;
3612  
3613  	send_args(r, lkb, ms);
3614  
3615  	ms->m_result = cpu_to_le32(to_dlm_errno(rv));
3616  
3617  	error = send_message(mh, ms, r->res_name, r->res_length);
3618   out:
3619  	return error;
3620  }
3621  
send_request_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3622  static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3623  {
3624  	return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3625  }
3626  
send_convert_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3627  static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3628  {
3629  	return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3630  }
3631  
send_unlock_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3632  static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3633  {
3634  	return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3635  }
3636  
send_cancel_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3637  static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3638  {
3639  	return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3640  }
3641  
send_lookup_reply(struct dlm_ls * ls,const struct dlm_message * ms_in,int ret_nodeid,int rv)3642  static int send_lookup_reply(struct dlm_ls *ls,
3643  			     const struct dlm_message *ms_in, int ret_nodeid,
3644  			     int rv)
3645  {
3646  	struct dlm_rsb *r = &ls->ls_local_rsb;
3647  	struct dlm_message *ms;
3648  	struct dlm_mhandle *mh;
3649  	int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid);
3650  
3651  	error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh,
3652  			       GFP_NOFS);
3653  	if (error)
3654  		goto out;
3655  
3656  	ms->m_lkid = ms_in->m_lkid;
3657  	ms->m_result = cpu_to_le32(to_dlm_errno(rv));
3658  	ms->m_nodeid = cpu_to_le32(ret_nodeid);
3659  
3660  	error = send_message(mh, ms, ms_in->m_extra, receive_extralen(ms_in));
3661   out:
3662  	return error;
3663  }
3664  
3665  /* which args we save from a received message depends heavily on the type
3666     of message, unlike the send side where we can safely send everything about
3667     the lkb for any type of message */
3668  
receive_flags(struct dlm_lkb * lkb,const struct dlm_message * ms)3669  static void receive_flags(struct dlm_lkb *lkb, const struct dlm_message *ms)
3670  {
3671  	lkb->lkb_exflags = le32_to_cpu(ms->m_exflags);
3672  	dlm_set_sbflags_val(lkb, le32_to_cpu(ms->m_sbflags));
3673  	dlm_set_dflags_val(lkb, le32_to_cpu(ms->m_flags));
3674  }
3675  
receive_flags_reply(struct dlm_lkb * lkb,const struct dlm_message * ms,bool local)3676  static void receive_flags_reply(struct dlm_lkb *lkb,
3677  				const struct dlm_message *ms,
3678  				bool local)
3679  {
3680  	if (local)
3681  		return;
3682  
3683  	dlm_set_sbflags_val(lkb, le32_to_cpu(ms->m_sbflags));
3684  	dlm_set_dflags_val(lkb, le32_to_cpu(ms->m_flags));
3685  }
3686  
receive_extralen(const struct dlm_message * ms)3687  static int receive_extralen(const struct dlm_message *ms)
3688  {
3689  	return (le16_to_cpu(ms->m_header.h_length) -
3690  		sizeof(struct dlm_message));
3691  }
3692  
receive_lvb(struct dlm_ls * ls,struct dlm_lkb * lkb,const struct dlm_message * ms)3693  static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3694  		       const struct dlm_message *ms)
3695  {
3696  	int len;
3697  
3698  	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3699  		if (!lkb->lkb_lvbptr)
3700  			lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3701  		if (!lkb->lkb_lvbptr)
3702  			return -ENOMEM;
3703  		len = receive_extralen(ms);
3704  		if (len > ls->ls_lvblen)
3705  			len = ls->ls_lvblen;
3706  		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3707  	}
3708  	return 0;
3709  }
3710  
fake_bastfn(void * astparam,int mode)3711  static void fake_bastfn(void *astparam, int mode)
3712  {
3713  	log_print("fake_bastfn should not be called");
3714  }
3715  
fake_astfn(void * astparam)3716  static void fake_astfn(void *astparam)
3717  {
3718  	log_print("fake_astfn should not be called");
3719  }
3720  
receive_request_args(struct dlm_ls * ls,struct dlm_lkb * lkb,const struct dlm_message * ms)3721  static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3722  				const struct dlm_message *ms)
3723  {
3724  	lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
3725  	lkb->lkb_ownpid = le32_to_cpu(ms->m_pid);
3726  	lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
3727  	lkb->lkb_grmode = DLM_LOCK_IV;
3728  	lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode);
3729  
3730  	lkb->lkb_bastfn = (ms->m_asts & cpu_to_le32(DLM_CB_BAST)) ? &fake_bastfn : NULL;
3731  	lkb->lkb_astfn = (ms->m_asts & cpu_to_le32(DLM_CB_CAST)) ? &fake_astfn : NULL;
3732  
3733  	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3734  		/* lkb was just created so there won't be an lvb yet */
3735  		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3736  		if (!lkb->lkb_lvbptr)
3737  			return -ENOMEM;
3738  	}
3739  
3740  	return 0;
3741  }
3742  
receive_convert_args(struct dlm_ls * ls,struct dlm_lkb * lkb,const struct dlm_message * ms)3743  static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3744  				const struct dlm_message *ms)
3745  {
3746  	if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3747  		return -EBUSY;
3748  
3749  	if (receive_lvb(ls, lkb, ms))
3750  		return -ENOMEM;
3751  
3752  	lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode);
3753  	lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq);
3754  
3755  	return 0;
3756  }
3757  
receive_unlock_args(struct dlm_ls * ls,struct dlm_lkb * lkb,const struct dlm_message * ms)3758  static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3759  			       const struct dlm_message *ms)
3760  {
3761  	if (receive_lvb(ls, lkb, ms))
3762  		return -ENOMEM;
3763  	return 0;
3764  }
3765  
3766  /* We fill in the local-lkb fields with the info that send_xxxx_reply()
3767     uses to send a reply and that the remote end uses to process the reply. */
3768  
setup_local_lkb(struct dlm_ls * ls,const struct dlm_message * ms)3769  static void setup_local_lkb(struct dlm_ls *ls, const struct dlm_message *ms)
3770  {
3771  	struct dlm_lkb *lkb = &ls->ls_local_lkb;
3772  	lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
3773  	lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
3774  }
3775  
3776  /* This is called after the rsb is locked so that we can safely inspect
3777     fields in the lkb. */
3778  
validate_message(struct dlm_lkb * lkb,const struct dlm_message * ms)3779  static int validate_message(struct dlm_lkb *lkb, const struct dlm_message *ms)
3780  {
3781  	int from = le32_to_cpu(ms->m_header.h_nodeid);
3782  	int error = 0;
3783  
3784  	/* currently mixing of user/kernel locks are not supported */
3785  	if (ms->m_flags & cpu_to_le32(BIT(DLM_DFL_USER_BIT)) &&
3786  	    !test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
3787  		log_error(lkb->lkb_resource->res_ls,
3788  			  "got user dlm message for a kernel lock");
3789  		error = -EINVAL;
3790  		goto out;
3791  	}
3792  
3793  	switch (ms->m_type) {
3794  	case cpu_to_le32(DLM_MSG_CONVERT):
3795  	case cpu_to_le32(DLM_MSG_UNLOCK):
3796  	case cpu_to_le32(DLM_MSG_CANCEL):
3797  		if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3798  			error = -EINVAL;
3799  		break;
3800  
3801  	case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
3802  	case cpu_to_le32(DLM_MSG_UNLOCK_REPLY):
3803  	case cpu_to_le32(DLM_MSG_CANCEL_REPLY):
3804  	case cpu_to_le32(DLM_MSG_GRANT):
3805  	case cpu_to_le32(DLM_MSG_BAST):
3806  		if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
3807  			error = -EINVAL;
3808  		break;
3809  
3810  	case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
3811  		if (!is_process_copy(lkb))
3812  			error = -EINVAL;
3813  		else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
3814  			error = -EINVAL;
3815  		break;
3816  
3817  	default:
3818  		error = -EINVAL;
3819  	}
3820  
3821  out:
3822  	if (error)
3823  		log_error(lkb->lkb_resource->res_ls,
3824  			  "ignore invalid message %d from %d %x %x %x %d",
3825  			  le32_to_cpu(ms->m_type), from, lkb->lkb_id,
3826  			  lkb->lkb_remid, dlm_iflags_val(lkb),
3827  			  lkb->lkb_nodeid);
3828  	return error;
3829  }
3830  
receive_request(struct dlm_ls * ls,const struct dlm_message * ms)3831  static int receive_request(struct dlm_ls *ls, const struct dlm_message *ms)
3832  {
3833  	struct dlm_lkb *lkb;
3834  	struct dlm_rsb *r;
3835  	int from_nodeid;
3836  	int error, namelen = 0;
3837  
3838  	from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
3839  
3840  	error = create_lkb(ls, &lkb);
3841  	if (error)
3842  		goto fail;
3843  
3844  	receive_flags(lkb, ms);
3845  	set_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags);
3846  	error = receive_request_args(ls, lkb, ms);
3847  	if (error) {
3848  		__put_lkb(ls, lkb);
3849  		goto fail;
3850  	}
3851  
3852  	/* The dir node is the authority on whether we are the master
3853  	   for this rsb or not, so if the master sends us a request, we should
3854  	   recreate the rsb if we've destroyed it.   This race happens when we
3855  	   send a remove message to the dir node at the same time that the dir
3856  	   node sends us a request for the rsb. */
3857  
3858  	namelen = receive_extralen(ms);
3859  
3860  	error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
3861  			 R_RECEIVE_REQUEST, &r);
3862  	if (error) {
3863  		__put_lkb(ls, lkb);
3864  		goto fail;
3865  	}
3866  
3867  	lock_rsb(r);
3868  
3869  	if (r->res_master_nodeid != dlm_our_nodeid()) {
3870  		error = validate_master_nodeid(ls, r, from_nodeid);
3871  		if (error) {
3872  			unlock_rsb(r);
3873  			put_rsb(r);
3874  			__put_lkb(ls, lkb);
3875  			goto fail;
3876  		}
3877  	}
3878  
3879  	attach_lkb(r, lkb);
3880  	error = do_request(r, lkb);
3881  	send_request_reply(r, lkb, error);
3882  	do_request_effects(r, lkb, error);
3883  
3884  	unlock_rsb(r);
3885  	put_rsb(r);
3886  
3887  	if (error == -EINPROGRESS)
3888  		error = 0;
3889  	if (error)
3890  		dlm_put_lkb(lkb);
3891  	return 0;
3892  
3893   fail:
3894  	/* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
3895  	   and do this receive_request again from process_lookup_list once
3896  	   we get the lookup reply.  This would avoid a many repeated
3897  	   ENOTBLK request failures when the lookup reply designating us
3898  	   as master is delayed. */
3899  
3900  	if (error != -ENOTBLK) {
3901  		log_limit(ls, "receive_request %x from %d %d",
3902  			  le32_to_cpu(ms->m_lkid), from_nodeid, error);
3903  	}
3904  
3905  	setup_local_lkb(ls, ms);
3906  	send_request_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error);
3907  	return error;
3908  }
3909  
receive_convert(struct dlm_ls * ls,const struct dlm_message * ms)3910  static int receive_convert(struct dlm_ls *ls, const struct dlm_message *ms)
3911  {
3912  	struct dlm_lkb *lkb;
3913  	struct dlm_rsb *r;
3914  	int error, reply = 1;
3915  
3916  	error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
3917  	if (error)
3918  		goto fail;
3919  
3920  	if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) {
3921  		log_error(ls, "receive_convert %x remid %x recover_seq %llu "
3922  			  "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
3923  			  (unsigned long long)lkb->lkb_recover_seq,
3924  			  le32_to_cpu(ms->m_header.h_nodeid),
3925  			  le32_to_cpu(ms->m_lkid));
3926  		error = -ENOENT;
3927  		dlm_put_lkb(lkb);
3928  		goto fail;
3929  	}
3930  
3931  	r = lkb->lkb_resource;
3932  
3933  	hold_rsb(r);
3934  	lock_rsb(r);
3935  
3936  	error = validate_message(lkb, ms);
3937  	if (error)
3938  		goto out;
3939  
3940  	receive_flags(lkb, ms);
3941  
3942  	error = receive_convert_args(ls, lkb, ms);
3943  	if (error) {
3944  		send_convert_reply(r, lkb, error);
3945  		goto out;
3946  	}
3947  
3948  	reply = !down_conversion(lkb);
3949  
3950  	error = do_convert(r, lkb);
3951  	if (reply)
3952  		send_convert_reply(r, lkb, error);
3953  	do_convert_effects(r, lkb, error);
3954   out:
3955  	unlock_rsb(r);
3956  	put_rsb(r);
3957  	dlm_put_lkb(lkb);
3958  	return 0;
3959  
3960   fail:
3961  	setup_local_lkb(ls, ms);
3962  	send_convert_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error);
3963  	return error;
3964  }
3965  
receive_unlock(struct dlm_ls * ls,const struct dlm_message * ms)3966  static int receive_unlock(struct dlm_ls *ls, const struct dlm_message *ms)
3967  {
3968  	struct dlm_lkb *lkb;
3969  	struct dlm_rsb *r;
3970  	int error;
3971  
3972  	error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
3973  	if (error)
3974  		goto fail;
3975  
3976  	if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) {
3977  		log_error(ls, "receive_unlock %x remid %x remote %d %x",
3978  			  lkb->lkb_id, lkb->lkb_remid,
3979  			  le32_to_cpu(ms->m_header.h_nodeid),
3980  			  le32_to_cpu(ms->m_lkid));
3981  		error = -ENOENT;
3982  		dlm_put_lkb(lkb);
3983  		goto fail;
3984  	}
3985  
3986  	r = lkb->lkb_resource;
3987  
3988  	hold_rsb(r);
3989  	lock_rsb(r);
3990  
3991  	error = validate_message(lkb, ms);
3992  	if (error)
3993  		goto out;
3994  
3995  	receive_flags(lkb, ms);
3996  
3997  	error = receive_unlock_args(ls, lkb, ms);
3998  	if (error) {
3999  		send_unlock_reply(r, lkb, error);
4000  		goto out;
4001  	}
4002  
4003  	error = do_unlock(r, lkb);
4004  	send_unlock_reply(r, lkb, error);
4005  	do_unlock_effects(r, lkb, error);
4006   out:
4007  	unlock_rsb(r);
4008  	put_rsb(r);
4009  	dlm_put_lkb(lkb);
4010  	return 0;
4011  
4012   fail:
4013  	setup_local_lkb(ls, ms);
4014  	send_unlock_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error);
4015  	return error;
4016  }
4017  
receive_cancel(struct dlm_ls * ls,const struct dlm_message * ms)4018  static int receive_cancel(struct dlm_ls *ls, const struct dlm_message *ms)
4019  {
4020  	struct dlm_lkb *lkb;
4021  	struct dlm_rsb *r;
4022  	int error;
4023  
4024  	error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4025  	if (error)
4026  		goto fail;
4027  
4028  	receive_flags(lkb, ms);
4029  
4030  	r = lkb->lkb_resource;
4031  
4032  	hold_rsb(r);
4033  	lock_rsb(r);
4034  
4035  	error = validate_message(lkb, ms);
4036  	if (error)
4037  		goto out;
4038  
4039  	error = do_cancel(r, lkb);
4040  	send_cancel_reply(r, lkb, error);
4041  	do_cancel_effects(r, lkb, error);
4042   out:
4043  	unlock_rsb(r);
4044  	put_rsb(r);
4045  	dlm_put_lkb(lkb);
4046  	return 0;
4047  
4048   fail:
4049  	setup_local_lkb(ls, ms);
4050  	send_cancel_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error);
4051  	return error;
4052  }
4053  
receive_grant(struct dlm_ls * ls,const struct dlm_message * ms)4054  static int receive_grant(struct dlm_ls *ls, const struct dlm_message *ms)
4055  {
4056  	struct dlm_lkb *lkb;
4057  	struct dlm_rsb *r;
4058  	int error;
4059  
4060  	error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4061  	if (error)
4062  		return error;
4063  
4064  	r = lkb->lkb_resource;
4065  
4066  	hold_rsb(r);
4067  	lock_rsb(r);
4068  
4069  	error = validate_message(lkb, ms);
4070  	if (error)
4071  		goto out;
4072  
4073  	receive_flags_reply(lkb, ms, false);
4074  	if (is_altmode(lkb))
4075  		munge_altmode(lkb, ms);
4076  	grant_lock_pc(r, lkb, ms);
4077  	queue_cast(r, lkb, 0);
4078   out:
4079  	unlock_rsb(r);
4080  	put_rsb(r);
4081  	dlm_put_lkb(lkb);
4082  	return 0;
4083  }
4084  
receive_bast(struct dlm_ls * ls,const struct dlm_message * ms)4085  static int receive_bast(struct dlm_ls *ls, const struct dlm_message *ms)
4086  {
4087  	struct dlm_lkb *lkb;
4088  	struct dlm_rsb *r;
4089  	int error;
4090  
4091  	error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4092  	if (error)
4093  		return error;
4094  
4095  	r = lkb->lkb_resource;
4096  
4097  	hold_rsb(r);
4098  	lock_rsb(r);
4099  
4100  	error = validate_message(lkb, ms);
4101  	if (error)
4102  		goto out;
4103  
4104  	queue_bast(r, lkb, le32_to_cpu(ms->m_bastmode));
4105  	lkb->lkb_highbast = le32_to_cpu(ms->m_bastmode);
4106   out:
4107  	unlock_rsb(r);
4108  	put_rsb(r);
4109  	dlm_put_lkb(lkb);
4110  	return 0;
4111  }
4112  
receive_lookup(struct dlm_ls * ls,const struct dlm_message * ms)4113  static void receive_lookup(struct dlm_ls *ls, const struct dlm_message *ms)
4114  {
4115  	int len, error, ret_nodeid, from_nodeid, our_nodeid;
4116  
4117  	from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
4118  	our_nodeid = dlm_our_nodeid();
4119  
4120  	len = receive_extralen(ms);
4121  
4122  	error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4123  				  &ret_nodeid, NULL);
4124  
4125  	/* Optimization: we're master so treat lookup as a request */
4126  	if (!error && ret_nodeid == our_nodeid) {
4127  		receive_request(ls, ms);
4128  		return;
4129  	}
4130  	send_lookup_reply(ls, ms, ret_nodeid, error);
4131  }
4132  
receive_remove(struct dlm_ls * ls,const struct dlm_message * ms)4133  static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
4134  {
4135  	char name[DLM_RESNAME_MAXLEN+1];
4136  	struct dlm_rsb *r;
4137  	uint32_t hash, b;
4138  	int rv, len, dir_nodeid, from_nodeid;
4139  
4140  	from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
4141  
4142  	len = receive_extralen(ms);
4143  
4144  	if (len > DLM_RESNAME_MAXLEN) {
4145  		log_error(ls, "receive_remove from %d bad len %d",
4146  			  from_nodeid, len);
4147  		return;
4148  	}
4149  
4150  	dir_nodeid = dlm_hash2nodeid(ls, le32_to_cpu(ms->m_hash));
4151  	if (dir_nodeid != dlm_our_nodeid()) {
4152  		log_error(ls, "receive_remove from %d bad nodeid %d",
4153  			  from_nodeid, dir_nodeid);
4154  		return;
4155  	}
4156  
4157  	/* Look for name on rsbtbl.toss, if it's there, kill it.
4158  	   If it's on rsbtbl.keep, it's being used, and we should ignore this
4159  	   message.  This is an expected race between the dir node sending a
4160  	   request to the master node at the same time as the master node sends
4161  	   a remove to the dir node.  The resolution to that race is for the
4162  	   dir node to ignore the remove message, and the master node to
4163  	   recreate the master rsb when it gets a request from the dir node for
4164  	   an rsb it doesn't have. */
4165  
4166  	memset(name, 0, sizeof(name));
4167  	memcpy(name, ms->m_extra, len);
4168  
4169  	hash = jhash(name, len, 0);
4170  	b = hash & (ls->ls_rsbtbl_size - 1);
4171  
4172  	spin_lock(&ls->ls_rsbtbl[b].lock);
4173  
4174  	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4175  	if (rv) {
4176  		/* verify the rsb is on keep list per comment above */
4177  		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4178  		if (rv) {
4179  			/* should not happen */
4180  			log_error(ls, "receive_remove from %d not found %s",
4181  				  from_nodeid, name);
4182  			spin_unlock(&ls->ls_rsbtbl[b].lock);
4183  			return;
4184  		}
4185  		if (r->res_master_nodeid != from_nodeid) {
4186  			/* should not happen */
4187  			log_error(ls, "receive_remove keep from %d master %d",
4188  				  from_nodeid, r->res_master_nodeid);
4189  			dlm_print_rsb(r);
4190  			spin_unlock(&ls->ls_rsbtbl[b].lock);
4191  			return;
4192  		}
4193  
4194  		log_debug(ls, "receive_remove from %d master %d first %x %s",
4195  			  from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4196  			  name);
4197  		spin_unlock(&ls->ls_rsbtbl[b].lock);
4198  		return;
4199  	}
4200  
4201  	if (r->res_master_nodeid != from_nodeid) {
4202  		log_error(ls, "receive_remove toss from %d master %d",
4203  			  from_nodeid, r->res_master_nodeid);
4204  		dlm_print_rsb(r);
4205  		spin_unlock(&ls->ls_rsbtbl[b].lock);
4206  		return;
4207  	}
4208  
4209  	if (kref_put(&r->res_ref, kill_rsb)) {
4210  		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4211  		spin_unlock(&ls->ls_rsbtbl[b].lock);
4212  		dlm_free_rsb(r);
4213  	} else {
4214  		log_error(ls, "receive_remove from %d rsb ref error",
4215  			  from_nodeid);
4216  		dlm_print_rsb(r);
4217  		spin_unlock(&ls->ls_rsbtbl[b].lock);
4218  	}
4219  }
4220  
receive_purge(struct dlm_ls * ls,const struct dlm_message * ms)4221  static void receive_purge(struct dlm_ls *ls, const struct dlm_message *ms)
4222  {
4223  	do_purge(ls, le32_to_cpu(ms->m_nodeid), le32_to_cpu(ms->m_pid));
4224  }
4225  
receive_request_reply(struct dlm_ls * ls,const struct dlm_message * ms)4226  static int receive_request_reply(struct dlm_ls *ls,
4227  				 const struct dlm_message *ms)
4228  {
4229  	struct dlm_lkb *lkb;
4230  	struct dlm_rsb *r;
4231  	int error, mstype, result;
4232  	int from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
4233  
4234  	error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4235  	if (error)
4236  		return error;
4237  
4238  	r = lkb->lkb_resource;
4239  	hold_rsb(r);
4240  	lock_rsb(r);
4241  
4242  	error = validate_message(lkb, ms);
4243  	if (error)
4244  		goto out;
4245  
4246  	mstype = lkb->lkb_wait_type;
4247  	error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4248  	if (error) {
4249  		log_error(ls, "receive_request_reply %x remote %d %x result %d",
4250  			  lkb->lkb_id, from_nodeid, le32_to_cpu(ms->m_lkid),
4251  			  from_dlm_errno(le32_to_cpu(ms->m_result)));
4252  		dlm_dump_rsb(r);
4253  		goto out;
4254  	}
4255  
4256  	/* Optimization: the dir node was also the master, so it took our
4257  	   lookup as a request and sent request reply instead of lookup reply */
4258  	if (mstype == DLM_MSG_LOOKUP) {
4259  		r->res_master_nodeid = from_nodeid;
4260  		r->res_nodeid = from_nodeid;
4261  		lkb->lkb_nodeid = from_nodeid;
4262  	}
4263  
4264  	/* this is the value returned from do_request() on the master */
4265  	result = from_dlm_errno(le32_to_cpu(ms->m_result));
4266  
4267  	switch (result) {
4268  	case -EAGAIN:
4269  		/* request would block (be queued) on remote master */
4270  		queue_cast(r, lkb, -EAGAIN);
4271  		confirm_master(r, -EAGAIN);
4272  		unhold_lkb(lkb); /* undoes create_lkb() */
4273  		break;
4274  
4275  	case -EINPROGRESS:
4276  	case 0:
4277  		/* request was queued or granted on remote master */
4278  		receive_flags_reply(lkb, ms, false);
4279  		lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
4280  		if (is_altmode(lkb))
4281  			munge_altmode(lkb, ms);
4282  		if (result) {
4283  			add_lkb(r, lkb, DLM_LKSTS_WAITING);
4284  		} else {
4285  			grant_lock_pc(r, lkb, ms);
4286  			queue_cast(r, lkb, 0);
4287  		}
4288  		confirm_master(r, result);
4289  		break;
4290  
4291  	case -EBADR:
4292  	case -ENOTBLK:
4293  		/* find_rsb failed to find rsb or rsb wasn't master */
4294  		log_limit(ls, "receive_request_reply %x from %d %d "
4295  			  "master %d dir %d first %x %s", lkb->lkb_id,
4296  			  from_nodeid, result, r->res_master_nodeid,
4297  			  r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4298  
4299  		if (r->res_dir_nodeid != dlm_our_nodeid() &&
4300  		    r->res_master_nodeid != dlm_our_nodeid()) {
4301  			/* cause _request_lock->set_master->send_lookup */
4302  			r->res_master_nodeid = 0;
4303  			r->res_nodeid = -1;
4304  			lkb->lkb_nodeid = -1;
4305  		}
4306  
4307  		if (is_overlap(lkb)) {
4308  			/* we'll ignore error in cancel/unlock reply */
4309  			queue_cast_overlap(r, lkb);
4310  			confirm_master(r, result);
4311  			unhold_lkb(lkb); /* undoes create_lkb() */
4312  		} else {
4313  			_request_lock(r, lkb);
4314  
4315  			if (r->res_master_nodeid == dlm_our_nodeid())
4316  				confirm_master(r, 0);
4317  		}
4318  		break;
4319  
4320  	default:
4321  		log_error(ls, "receive_request_reply %x error %d",
4322  			  lkb->lkb_id, result);
4323  	}
4324  
4325  	if ((result == 0 || result == -EINPROGRESS) &&
4326  	    test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags)) {
4327  		log_debug(ls, "receive_request_reply %x result %d unlock",
4328  			  lkb->lkb_id, result);
4329  		clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
4330  		send_unlock(r, lkb);
4331  	} else if ((result == -EINPROGRESS) &&
4332  		   test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT,
4333  				      &lkb->lkb_iflags)) {
4334  		log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4335  		clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags);
4336  		send_cancel(r, lkb);
4337  	} else {
4338  		clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
4339  		clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags);
4340  	}
4341   out:
4342  	unlock_rsb(r);
4343  	put_rsb(r);
4344  	dlm_put_lkb(lkb);
4345  	return 0;
4346  }
4347  
__receive_convert_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,const struct dlm_message * ms,bool local)4348  static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4349  				    const struct dlm_message *ms, bool local)
4350  {
4351  	/* this is the value returned from do_convert() on the master */
4352  	switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
4353  	case -EAGAIN:
4354  		/* convert would block (be queued) on remote master */
4355  		queue_cast(r, lkb, -EAGAIN);
4356  		break;
4357  
4358  	case -EDEADLK:
4359  		receive_flags_reply(lkb, ms, local);
4360  		revert_lock_pc(r, lkb);
4361  		queue_cast(r, lkb, -EDEADLK);
4362  		break;
4363  
4364  	case -EINPROGRESS:
4365  		/* convert was queued on remote master */
4366  		receive_flags_reply(lkb, ms, local);
4367  		if (is_demoted(lkb))
4368  			munge_demoted(lkb);
4369  		del_lkb(r, lkb);
4370  		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4371  		break;
4372  
4373  	case 0:
4374  		/* convert was granted on remote master */
4375  		receive_flags_reply(lkb, ms, local);
4376  		if (is_demoted(lkb))
4377  			munge_demoted(lkb);
4378  		grant_lock_pc(r, lkb, ms);
4379  		queue_cast(r, lkb, 0);
4380  		break;
4381  
4382  	default:
4383  		log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4384  			  lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
4385  			  le32_to_cpu(ms->m_lkid),
4386  			  from_dlm_errno(le32_to_cpu(ms->m_result)));
4387  		dlm_print_rsb(r);
4388  		dlm_print_lkb(lkb);
4389  	}
4390  }
4391  
_receive_convert_reply(struct dlm_lkb * lkb,const struct dlm_message * ms,bool local)4392  static void _receive_convert_reply(struct dlm_lkb *lkb,
4393  				   const struct dlm_message *ms, bool local)
4394  {
4395  	struct dlm_rsb *r = lkb->lkb_resource;
4396  	int error;
4397  
4398  	hold_rsb(r);
4399  	lock_rsb(r);
4400  
4401  	error = validate_message(lkb, ms);
4402  	if (error)
4403  		goto out;
4404  
4405  	/* local reply can happen with waiters_mutex held */
4406  	error = remove_from_waiters_ms(lkb, ms, local);
4407  	if (error)
4408  		goto out;
4409  
4410  	__receive_convert_reply(r, lkb, ms, local);
4411   out:
4412  	unlock_rsb(r);
4413  	put_rsb(r);
4414  }
4415  
receive_convert_reply(struct dlm_ls * ls,const struct dlm_message * ms)4416  static int receive_convert_reply(struct dlm_ls *ls,
4417  				 const struct dlm_message *ms)
4418  {
4419  	struct dlm_lkb *lkb;
4420  	int error;
4421  
4422  	error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4423  	if (error)
4424  		return error;
4425  
4426  	_receive_convert_reply(lkb, ms, false);
4427  	dlm_put_lkb(lkb);
4428  	return 0;
4429  }
4430  
_receive_unlock_reply(struct dlm_lkb * lkb,const struct dlm_message * ms,bool local)4431  static void _receive_unlock_reply(struct dlm_lkb *lkb,
4432  				  const struct dlm_message *ms, bool local)
4433  {
4434  	struct dlm_rsb *r = lkb->lkb_resource;
4435  	int error;
4436  
4437  	hold_rsb(r);
4438  	lock_rsb(r);
4439  
4440  	error = validate_message(lkb, ms);
4441  	if (error)
4442  		goto out;
4443  
4444  	/* local reply can happen with waiters_mutex held */
4445  	error = remove_from_waiters_ms(lkb, ms, local);
4446  	if (error)
4447  		goto out;
4448  
4449  	/* this is the value returned from do_unlock() on the master */
4450  
4451  	switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
4452  	case -DLM_EUNLOCK:
4453  		receive_flags_reply(lkb, ms, local);
4454  		remove_lock_pc(r, lkb);
4455  		queue_cast(r, lkb, -DLM_EUNLOCK);
4456  		break;
4457  	case -ENOENT:
4458  		break;
4459  	default:
4460  		log_error(r->res_ls, "receive_unlock_reply %x error %d",
4461  			  lkb->lkb_id, from_dlm_errno(le32_to_cpu(ms->m_result)));
4462  	}
4463   out:
4464  	unlock_rsb(r);
4465  	put_rsb(r);
4466  }
4467  
receive_unlock_reply(struct dlm_ls * ls,const struct dlm_message * ms)4468  static int receive_unlock_reply(struct dlm_ls *ls,
4469  				const struct dlm_message *ms)
4470  {
4471  	struct dlm_lkb *lkb;
4472  	int error;
4473  
4474  	error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4475  	if (error)
4476  		return error;
4477  
4478  	_receive_unlock_reply(lkb, ms, false);
4479  	dlm_put_lkb(lkb);
4480  	return 0;
4481  }
4482  
_receive_cancel_reply(struct dlm_lkb * lkb,const struct dlm_message * ms,bool local)4483  static void _receive_cancel_reply(struct dlm_lkb *lkb,
4484  				  const struct dlm_message *ms, bool local)
4485  {
4486  	struct dlm_rsb *r = lkb->lkb_resource;
4487  	int error;
4488  
4489  	hold_rsb(r);
4490  	lock_rsb(r);
4491  
4492  	error = validate_message(lkb, ms);
4493  	if (error)
4494  		goto out;
4495  
4496  	/* local reply can happen with waiters_mutex held */
4497  	error = remove_from_waiters_ms(lkb, ms, local);
4498  	if (error)
4499  		goto out;
4500  
4501  	/* this is the value returned from do_cancel() on the master */
4502  
4503  	switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
4504  	case -DLM_ECANCEL:
4505  		receive_flags_reply(lkb, ms, local);
4506  		revert_lock_pc(r, lkb);
4507  		queue_cast(r, lkb, -DLM_ECANCEL);
4508  		break;
4509  	case 0:
4510  		break;
4511  	default:
4512  		log_error(r->res_ls, "receive_cancel_reply %x error %d",
4513  			  lkb->lkb_id,
4514  			  from_dlm_errno(le32_to_cpu(ms->m_result)));
4515  	}
4516   out:
4517  	unlock_rsb(r);
4518  	put_rsb(r);
4519  }
4520  
receive_cancel_reply(struct dlm_ls * ls,const struct dlm_message * ms)4521  static int receive_cancel_reply(struct dlm_ls *ls,
4522  				const struct dlm_message *ms)
4523  {
4524  	struct dlm_lkb *lkb;
4525  	int error;
4526  
4527  	error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4528  	if (error)
4529  		return error;
4530  
4531  	_receive_cancel_reply(lkb, ms, false);
4532  	dlm_put_lkb(lkb);
4533  	return 0;
4534  }
4535  
receive_lookup_reply(struct dlm_ls * ls,const struct dlm_message * ms)4536  static void receive_lookup_reply(struct dlm_ls *ls,
4537  				 const struct dlm_message *ms)
4538  {
4539  	struct dlm_lkb *lkb;
4540  	struct dlm_rsb *r;
4541  	int error, ret_nodeid;
4542  	int do_lookup_list = 0;
4543  
4544  	error = find_lkb(ls, le32_to_cpu(ms->m_lkid), &lkb);
4545  	if (error) {
4546  		log_error(ls, "%s no lkid %x", __func__,
4547  			  le32_to_cpu(ms->m_lkid));
4548  		return;
4549  	}
4550  
4551  	/* ms->m_result is the value returned by dlm_master_lookup on dir node
4552  	   FIXME: will a non-zero error ever be returned? */
4553  
4554  	r = lkb->lkb_resource;
4555  	hold_rsb(r);
4556  	lock_rsb(r);
4557  
4558  	error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4559  	if (error)
4560  		goto out;
4561  
4562  	ret_nodeid = le32_to_cpu(ms->m_nodeid);
4563  
4564  	/* We sometimes receive a request from the dir node for this
4565  	   rsb before we've received the dir node's loookup_reply for it.
4566  	   The request from the dir node implies we're the master, so we set
4567  	   ourself as master in receive_request_reply, and verify here that
4568  	   we are indeed the master. */
4569  
4570  	if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4571  		/* This should never happen */
4572  		log_error(ls, "receive_lookup_reply %x from %d ret %d "
4573  			  "master %d dir %d our %d first %x %s",
4574  			  lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
4575  			  ret_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
4576  			  dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4577  	}
4578  
4579  	if (ret_nodeid == dlm_our_nodeid()) {
4580  		r->res_master_nodeid = ret_nodeid;
4581  		r->res_nodeid = 0;
4582  		do_lookup_list = 1;
4583  		r->res_first_lkid = 0;
4584  	} else if (ret_nodeid == -1) {
4585  		/* the remote node doesn't believe it's the dir node */
4586  		log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4587  			  lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid));
4588  		r->res_master_nodeid = 0;
4589  		r->res_nodeid = -1;
4590  		lkb->lkb_nodeid = -1;
4591  	} else {
4592  		/* set_master() will set lkb_nodeid from r */
4593  		r->res_master_nodeid = ret_nodeid;
4594  		r->res_nodeid = ret_nodeid;
4595  	}
4596  
4597  	if (is_overlap(lkb)) {
4598  		log_debug(ls, "receive_lookup_reply %x unlock %x",
4599  			  lkb->lkb_id, dlm_iflags_val(lkb));
4600  		queue_cast_overlap(r, lkb);
4601  		unhold_lkb(lkb); /* undoes create_lkb() */
4602  		goto out_list;
4603  	}
4604  
4605  	_request_lock(r, lkb);
4606  
4607   out_list:
4608  	if (do_lookup_list)
4609  		process_lookup_list(r);
4610   out:
4611  	unlock_rsb(r);
4612  	put_rsb(r);
4613  	dlm_put_lkb(lkb);
4614  }
4615  
_receive_message(struct dlm_ls * ls,const struct dlm_message * ms,uint32_t saved_seq)4616  static void _receive_message(struct dlm_ls *ls, const struct dlm_message *ms,
4617  			     uint32_t saved_seq)
4618  {
4619  	int error = 0, noent = 0;
4620  
4621  	if (WARN_ON_ONCE(!dlm_is_member(ls, le32_to_cpu(ms->m_header.h_nodeid)))) {
4622  		log_limit(ls, "receive %d from non-member %d %x %x %d",
4623  			  le32_to_cpu(ms->m_type),
4624  			  le32_to_cpu(ms->m_header.h_nodeid),
4625  			  le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
4626  			  from_dlm_errno(le32_to_cpu(ms->m_result)));
4627  		return;
4628  	}
4629  
4630  	switch (ms->m_type) {
4631  
4632  	/* messages sent to a master node */
4633  
4634  	case cpu_to_le32(DLM_MSG_REQUEST):
4635  		error = receive_request(ls, ms);
4636  		break;
4637  
4638  	case cpu_to_le32(DLM_MSG_CONVERT):
4639  		error = receive_convert(ls, ms);
4640  		break;
4641  
4642  	case cpu_to_le32(DLM_MSG_UNLOCK):
4643  		error = receive_unlock(ls, ms);
4644  		break;
4645  
4646  	case cpu_to_le32(DLM_MSG_CANCEL):
4647  		noent = 1;
4648  		error = receive_cancel(ls, ms);
4649  		break;
4650  
4651  	/* messages sent from a master node (replies to above) */
4652  
4653  	case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
4654  		error = receive_request_reply(ls, ms);
4655  		break;
4656  
4657  	case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
4658  		error = receive_convert_reply(ls, ms);
4659  		break;
4660  
4661  	case cpu_to_le32(DLM_MSG_UNLOCK_REPLY):
4662  		error = receive_unlock_reply(ls, ms);
4663  		break;
4664  
4665  	case cpu_to_le32(DLM_MSG_CANCEL_REPLY):
4666  		error = receive_cancel_reply(ls, ms);
4667  		break;
4668  
4669  	/* messages sent from a master node (only two types of async msg) */
4670  
4671  	case cpu_to_le32(DLM_MSG_GRANT):
4672  		noent = 1;
4673  		error = receive_grant(ls, ms);
4674  		break;
4675  
4676  	case cpu_to_le32(DLM_MSG_BAST):
4677  		noent = 1;
4678  		error = receive_bast(ls, ms);
4679  		break;
4680  
4681  	/* messages sent to a dir node */
4682  
4683  	case cpu_to_le32(DLM_MSG_LOOKUP):
4684  		receive_lookup(ls, ms);
4685  		break;
4686  
4687  	case cpu_to_le32(DLM_MSG_REMOVE):
4688  		receive_remove(ls, ms);
4689  		break;
4690  
4691  	/* messages sent from a dir node (remove has no reply) */
4692  
4693  	case cpu_to_le32(DLM_MSG_LOOKUP_REPLY):
4694  		receive_lookup_reply(ls, ms);
4695  		break;
4696  
4697  	/* other messages */
4698  
4699  	case cpu_to_le32(DLM_MSG_PURGE):
4700  		receive_purge(ls, ms);
4701  		break;
4702  
4703  	default:
4704  		log_error(ls, "unknown message type %d",
4705  			  le32_to_cpu(ms->m_type));
4706  	}
4707  
4708  	/*
4709  	 * When checking for ENOENT, we're checking the result of
4710  	 * find_lkb(m_remid):
4711  	 *
4712  	 * The lock id referenced in the message wasn't found.  This may
4713  	 * happen in normal usage for the async messages and cancel, so
4714  	 * only use log_debug for them.
4715  	 *
4716  	 * Some errors are expected and normal.
4717  	 */
4718  
4719  	if (error == -ENOENT && noent) {
4720  		log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4721  			  le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid),
4722  			  le32_to_cpu(ms->m_header.h_nodeid),
4723  			  le32_to_cpu(ms->m_lkid), saved_seq);
4724  	} else if (error == -ENOENT) {
4725  		log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4726  			  le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid),
4727  			  le32_to_cpu(ms->m_header.h_nodeid),
4728  			  le32_to_cpu(ms->m_lkid), saved_seq);
4729  
4730  		if (ms->m_type == cpu_to_le32(DLM_MSG_CONVERT))
4731  			dlm_dump_rsb_hash(ls, le32_to_cpu(ms->m_hash));
4732  	}
4733  
4734  	if (error == -EINVAL) {
4735  		log_error(ls, "receive %d inval from %d lkid %x remid %x "
4736  			  "saved_seq %u",
4737  			  le32_to_cpu(ms->m_type),
4738  			  le32_to_cpu(ms->m_header.h_nodeid),
4739  			  le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
4740  			  saved_seq);
4741  	}
4742  }
4743  
4744  /* If the lockspace is in recovery mode (locking stopped), then normal
4745     messages are saved on the requestqueue for processing after recovery is
4746     done.  When not in recovery mode, we wait for dlm_recoverd to drain saved
4747     messages off the requestqueue before we process new ones. This occurs right
4748     after recovery completes when we transition from saving all messages on
4749     requestqueue, to processing all the saved messages, to processing new
4750     messages as they arrive. */
4751  
dlm_receive_message(struct dlm_ls * ls,const struct dlm_message * ms,int nodeid)4752  static void dlm_receive_message(struct dlm_ls *ls, const struct dlm_message *ms,
4753  				int nodeid)
4754  {
4755  	if (dlm_locking_stopped(ls)) {
4756  		/* If we were a member of this lockspace, left, and rejoined,
4757  		   other nodes may still be sending us messages from the
4758  		   lockspace generation before we left. */
4759  		if (WARN_ON_ONCE(!ls->ls_generation)) {
4760  			log_limit(ls, "receive %d from %d ignore old gen",
4761  				  le32_to_cpu(ms->m_type), nodeid);
4762  			return;
4763  		}
4764  
4765  		dlm_add_requestqueue(ls, nodeid, ms);
4766  	} else {
4767  		dlm_wait_requestqueue(ls);
4768  		_receive_message(ls, ms, 0);
4769  	}
4770  }
4771  
4772  /* This is called by dlm_recoverd to process messages that were saved on
4773     the requestqueue. */
4774  
dlm_receive_message_saved(struct dlm_ls * ls,const struct dlm_message * ms,uint32_t saved_seq)4775  void dlm_receive_message_saved(struct dlm_ls *ls, const struct dlm_message *ms,
4776  			       uint32_t saved_seq)
4777  {
4778  	_receive_message(ls, ms, saved_seq);
4779  }
4780  
4781  /* This is called by the midcomms layer when something is received for
4782     the lockspace.  It could be either a MSG (normal message sent as part of
4783     standard locking activity) or an RCOM (recovery message sent as part of
4784     lockspace recovery). */
4785  
dlm_receive_buffer(const union dlm_packet * p,int nodeid)4786  void dlm_receive_buffer(const union dlm_packet *p, int nodeid)
4787  {
4788  	const struct dlm_header *hd = &p->header;
4789  	struct dlm_ls *ls;
4790  	int type = 0;
4791  
4792  	switch (hd->h_cmd) {
4793  	case DLM_MSG:
4794  		type = le32_to_cpu(p->message.m_type);
4795  		break;
4796  	case DLM_RCOM:
4797  		type = le32_to_cpu(p->rcom.rc_type);
4798  		break;
4799  	default:
4800  		log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
4801  		return;
4802  	}
4803  
4804  	if (le32_to_cpu(hd->h_nodeid) != nodeid) {
4805  		log_print("invalid h_nodeid %d from %d lockspace %x",
4806  			  le32_to_cpu(hd->h_nodeid), nodeid,
4807  			  le32_to_cpu(hd->u.h_lockspace));
4808  		return;
4809  	}
4810  
4811  	ls = dlm_find_lockspace_global(le32_to_cpu(hd->u.h_lockspace));
4812  	if (!ls) {
4813  		if (dlm_config.ci_log_debug) {
4814  			printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
4815  				"%u from %d cmd %d type %d\n",
4816  				le32_to_cpu(hd->u.h_lockspace), nodeid,
4817  				hd->h_cmd, type);
4818  		}
4819  
4820  		if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
4821  			dlm_send_ls_not_ready(nodeid, &p->rcom);
4822  		return;
4823  	}
4824  
4825  	/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
4826  	   be inactive (in this ls) before transitioning to recovery mode */
4827  
4828  	down_read(&ls->ls_recv_active);
4829  	if (hd->h_cmd == DLM_MSG)
4830  		dlm_receive_message(ls, &p->message, nodeid);
4831  	else if (hd->h_cmd == DLM_RCOM)
4832  		dlm_receive_rcom(ls, &p->rcom, nodeid);
4833  	else
4834  		log_error(ls, "invalid h_cmd %d from %d lockspace %x",
4835  			  hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace));
4836  	up_read(&ls->ls_recv_active);
4837  
4838  	dlm_put_lockspace(ls);
4839  }
4840  
recover_convert_waiter(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms_local)4841  static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
4842  				   struct dlm_message *ms_local)
4843  {
4844  	if (middle_conversion(lkb)) {
4845  		hold_lkb(lkb);
4846  		memset(ms_local, 0, sizeof(struct dlm_message));
4847  		ms_local->m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY);
4848  		ms_local->m_result = cpu_to_le32(to_dlm_errno(-EINPROGRESS));
4849  		ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
4850  		_receive_convert_reply(lkb, ms_local, true);
4851  
4852  		/* Same special case as in receive_rcom_lock_args() */
4853  		lkb->lkb_grmode = DLM_LOCK_IV;
4854  		rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
4855  		unhold_lkb(lkb);
4856  
4857  	} else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
4858  		set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags);
4859  	}
4860  
4861  	/* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
4862  	   conversions are async; there's no reply from the remote master */
4863  }
4864  
4865  /* A waiting lkb needs recovery if the master node has failed, or
4866     the master node is changing (only when no directory is used) */
4867  
waiter_needs_recovery(struct dlm_ls * ls,struct dlm_lkb * lkb,int dir_nodeid)4868  static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
4869  				 int dir_nodeid)
4870  {
4871  	if (dlm_no_directory(ls))
4872  		return 1;
4873  
4874  	if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
4875  		return 1;
4876  
4877  	return 0;
4878  }
4879  
4880  /* Recovery for locks that are waiting for replies from nodes that are now
4881     gone.  We can just complete unlocks and cancels by faking a reply from the
4882     dead node.  Requests and up-conversions we flag to be resent after
4883     recovery.  Down-conversions can just be completed with a fake reply like
4884     unlocks.  Conversions between PR and CW need special attention. */
4885  
dlm_recover_waiters_pre(struct dlm_ls * ls)4886  void dlm_recover_waiters_pre(struct dlm_ls *ls)
4887  {
4888  	struct dlm_lkb *lkb, *safe;
4889  	struct dlm_message *ms_local;
4890  	int wait_type, local_unlock_result, local_cancel_result;
4891  	int dir_nodeid;
4892  
4893  	ms_local = kmalloc(sizeof(*ms_local), GFP_KERNEL);
4894  	if (!ms_local)
4895  		return;
4896  
4897  	mutex_lock(&ls->ls_waiters_mutex);
4898  
4899  	list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
4900  
4901  		dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
4902  
4903  		/* exclude debug messages about unlocks because there can be so
4904  		   many and they aren't very interesting */
4905  
4906  		if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
4907  			log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
4908  				  "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
4909  				  lkb->lkb_id,
4910  				  lkb->lkb_remid,
4911  				  lkb->lkb_wait_type,
4912  				  lkb->lkb_resource->res_nodeid,
4913  				  lkb->lkb_nodeid,
4914  				  lkb->lkb_wait_nodeid,
4915  				  dir_nodeid);
4916  		}
4917  
4918  		/* all outstanding lookups, regardless of destination  will be
4919  		   resent after recovery is done */
4920  
4921  		if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
4922  			set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags);
4923  			continue;
4924  		}
4925  
4926  		if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
4927  			continue;
4928  
4929  		wait_type = lkb->lkb_wait_type;
4930  		local_unlock_result = -DLM_EUNLOCK;
4931  		local_cancel_result = -DLM_ECANCEL;
4932  
4933  		/* Main reply may have been received leaving a zero wait_type,
4934  		   but a reply for the overlapping op may not have been
4935  		   received.  In that case we need to fake the appropriate
4936  		   reply for the overlap op. */
4937  
4938  		if (!wait_type) {
4939  			if (is_overlap_cancel(lkb)) {
4940  				wait_type = DLM_MSG_CANCEL;
4941  				if (lkb->lkb_grmode == DLM_LOCK_IV)
4942  					local_cancel_result = 0;
4943  			}
4944  			if (is_overlap_unlock(lkb)) {
4945  				wait_type = DLM_MSG_UNLOCK;
4946  				if (lkb->lkb_grmode == DLM_LOCK_IV)
4947  					local_unlock_result = -ENOENT;
4948  			}
4949  
4950  			log_debug(ls, "rwpre overlap %x %x %d %d %d",
4951  				  lkb->lkb_id, dlm_iflags_val(lkb), wait_type,
4952  				  local_cancel_result, local_unlock_result);
4953  		}
4954  
4955  		switch (wait_type) {
4956  
4957  		case DLM_MSG_REQUEST:
4958  			set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags);
4959  			break;
4960  
4961  		case DLM_MSG_CONVERT:
4962  			recover_convert_waiter(ls, lkb, ms_local);
4963  			break;
4964  
4965  		case DLM_MSG_UNLOCK:
4966  			hold_lkb(lkb);
4967  			memset(ms_local, 0, sizeof(struct dlm_message));
4968  			ms_local->m_type = cpu_to_le32(DLM_MSG_UNLOCK_REPLY);
4969  			ms_local->m_result = cpu_to_le32(to_dlm_errno(local_unlock_result));
4970  			ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
4971  			_receive_unlock_reply(lkb, ms_local, true);
4972  			dlm_put_lkb(lkb);
4973  			break;
4974  
4975  		case DLM_MSG_CANCEL:
4976  			hold_lkb(lkb);
4977  			memset(ms_local, 0, sizeof(struct dlm_message));
4978  			ms_local->m_type = cpu_to_le32(DLM_MSG_CANCEL_REPLY);
4979  			ms_local->m_result = cpu_to_le32(to_dlm_errno(local_cancel_result));
4980  			ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
4981  			_receive_cancel_reply(lkb, ms_local, true);
4982  			dlm_put_lkb(lkb);
4983  			break;
4984  
4985  		default:
4986  			log_error(ls, "invalid lkb wait_type %d %d",
4987  				  lkb->lkb_wait_type, wait_type);
4988  		}
4989  		schedule();
4990  	}
4991  	mutex_unlock(&ls->ls_waiters_mutex);
4992  	kfree(ms_local);
4993  }
4994  
find_resend_waiter(struct dlm_ls * ls)4995  static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
4996  {
4997  	struct dlm_lkb *lkb = NULL, *iter;
4998  
4999  	mutex_lock(&ls->ls_waiters_mutex);
5000  	list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
5001  		if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) {
5002  			hold_lkb(iter);
5003  			lkb = iter;
5004  			break;
5005  		}
5006  	}
5007  	mutex_unlock(&ls->ls_waiters_mutex);
5008  
5009  	return lkb;
5010  }
5011  
5012  /* Deal with lookups and lkb's marked RESEND from _pre.  We may now be the
5013     master or dir-node for r.  Processing the lkb may result in it being placed
5014     back on waiters. */
5015  
5016  /* We do this after normal locking has been enabled and any saved messages
5017     (in requestqueue) have been processed.  We should be confident that at
5018     this point we won't get or process a reply to any of these waiting
5019     operations.  But, new ops may be coming in on the rsbs/locks here from
5020     userspace or remotely. */
5021  
5022  /* there may have been an overlap unlock/cancel prior to recovery or after
5023     recovery.  if before, the lkb may still have a pos wait_count; if after, the
5024     overlap flag would just have been set and nothing new sent.  we can be
5025     confident here than any replies to either the initial op or overlap ops
5026     prior to recovery have been received. */
5027  
dlm_recover_waiters_post(struct dlm_ls * ls)5028  int dlm_recover_waiters_post(struct dlm_ls *ls)
5029  {
5030  	struct dlm_lkb *lkb;
5031  	struct dlm_rsb *r;
5032  	int error = 0, mstype, err, oc, ou;
5033  
5034  	while (1) {
5035  		if (dlm_locking_stopped(ls)) {
5036  			log_debug(ls, "recover_waiters_post aborted");
5037  			error = -EINTR;
5038  			break;
5039  		}
5040  
5041  		lkb = find_resend_waiter(ls);
5042  		if (!lkb)
5043  			break;
5044  
5045  		r = lkb->lkb_resource;
5046  		hold_rsb(r);
5047  		lock_rsb(r);
5048  
5049  		mstype = lkb->lkb_wait_type;
5050  		oc = test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT,
5051  					&lkb->lkb_iflags);
5052  		ou = test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT,
5053  					&lkb->lkb_iflags);
5054  		err = 0;
5055  
5056  		log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5057  			  "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5058  			  "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5059  			  r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5060  			  dlm_dir_nodeid(r), oc, ou);
5061  
5062  		/* At this point we assume that we won't get a reply to any
5063  		   previous op or overlap op on this lock.  First, do a big
5064  		   remove_from_waiters() for all previous ops. */
5065  
5066  		clear_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags);
5067  		lkb->lkb_wait_type = 0;
5068  		/* drop all wait_count references we still
5069  		 * hold a reference for this iteration.
5070  		 */
5071  		while (!atomic_dec_and_test(&lkb->lkb_wait_count))
5072  			unhold_lkb(lkb);
5073  
5074  		mutex_lock(&ls->ls_waiters_mutex);
5075  		list_del_init(&lkb->lkb_wait_reply);
5076  		mutex_unlock(&ls->ls_waiters_mutex);
5077  
5078  		if (oc || ou) {
5079  			/* do an unlock or cancel instead of resending */
5080  			switch (mstype) {
5081  			case DLM_MSG_LOOKUP:
5082  			case DLM_MSG_REQUEST:
5083  				queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5084  							-DLM_ECANCEL);
5085  				unhold_lkb(lkb); /* undoes create_lkb() */
5086  				break;
5087  			case DLM_MSG_CONVERT:
5088  				if (oc) {
5089  					queue_cast(r, lkb, -DLM_ECANCEL);
5090  				} else {
5091  					lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5092  					_unlock_lock(r, lkb);
5093  				}
5094  				break;
5095  			default:
5096  				err = 1;
5097  			}
5098  		} else {
5099  			switch (mstype) {
5100  			case DLM_MSG_LOOKUP:
5101  			case DLM_MSG_REQUEST:
5102  				_request_lock(r, lkb);
5103  				if (is_master(r))
5104  					confirm_master(r, 0);
5105  				break;
5106  			case DLM_MSG_CONVERT:
5107  				_convert_lock(r, lkb);
5108  				break;
5109  			default:
5110  				err = 1;
5111  			}
5112  		}
5113  
5114  		if (err) {
5115  			log_error(ls, "waiter %x msg %d r_nodeid %d "
5116  				  "dir_nodeid %d overlap %d %d",
5117  				  lkb->lkb_id, mstype, r->res_nodeid,
5118  				  dlm_dir_nodeid(r), oc, ou);
5119  		}
5120  		unlock_rsb(r);
5121  		put_rsb(r);
5122  		dlm_put_lkb(lkb);
5123  	}
5124  
5125  	return error;
5126  }
5127  
purge_mstcpy_list(struct dlm_ls * ls,struct dlm_rsb * r,struct list_head * list)5128  static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5129  			      struct list_head *list)
5130  {
5131  	struct dlm_lkb *lkb, *safe;
5132  
5133  	list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5134  		if (!is_master_copy(lkb))
5135  			continue;
5136  
5137  		/* don't purge lkbs we've added in recover_master_copy for
5138  		   the current recovery seq */
5139  
5140  		if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5141  			continue;
5142  
5143  		del_lkb(r, lkb);
5144  
5145  		/* this put should free the lkb */
5146  		if (!dlm_put_lkb(lkb))
5147  			log_error(ls, "purged mstcpy lkb not released");
5148  	}
5149  }
5150  
dlm_purge_mstcpy_locks(struct dlm_rsb * r)5151  void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5152  {
5153  	struct dlm_ls *ls = r->res_ls;
5154  
5155  	purge_mstcpy_list(ls, r, &r->res_grantqueue);
5156  	purge_mstcpy_list(ls, r, &r->res_convertqueue);
5157  	purge_mstcpy_list(ls, r, &r->res_waitqueue);
5158  }
5159  
purge_dead_list(struct dlm_ls * ls,struct dlm_rsb * r,struct list_head * list,int nodeid_gone,unsigned int * count)5160  static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5161  			    struct list_head *list,
5162  			    int nodeid_gone, unsigned int *count)
5163  {
5164  	struct dlm_lkb *lkb, *safe;
5165  
5166  	list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5167  		if (!is_master_copy(lkb))
5168  			continue;
5169  
5170  		if ((lkb->lkb_nodeid == nodeid_gone) ||
5171  		    dlm_is_removed(ls, lkb->lkb_nodeid)) {
5172  
5173  			/* tell recover_lvb to invalidate the lvb
5174  			   because a node holding EX/PW failed */
5175  			if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5176  			    (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5177  				rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5178  			}
5179  
5180  			del_lkb(r, lkb);
5181  
5182  			/* this put should free the lkb */
5183  			if (!dlm_put_lkb(lkb))
5184  				log_error(ls, "purged dead lkb not released");
5185  
5186  			rsb_set_flag(r, RSB_RECOVER_GRANT);
5187  
5188  			(*count)++;
5189  		}
5190  	}
5191  }
5192  
5193  /* Get rid of locks held by nodes that are gone. */
5194  
dlm_recover_purge(struct dlm_ls * ls)5195  void dlm_recover_purge(struct dlm_ls *ls)
5196  {
5197  	struct dlm_rsb *r;
5198  	struct dlm_member *memb;
5199  	int nodes_count = 0;
5200  	int nodeid_gone = 0;
5201  	unsigned int lkb_count = 0;
5202  
5203  	/* cache one removed nodeid to optimize the common
5204  	   case of a single node removed */
5205  
5206  	list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5207  		nodes_count++;
5208  		nodeid_gone = memb->nodeid;
5209  	}
5210  
5211  	if (!nodes_count)
5212  		return;
5213  
5214  	down_write(&ls->ls_root_sem);
5215  	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5216  		hold_rsb(r);
5217  		lock_rsb(r);
5218  		if (is_master(r)) {
5219  			purge_dead_list(ls, r, &r->res_grantqueue,
5220  					nodeid_gone, &lkb_count);
5221  			purge_dead_list(ls, r, &r->res_convertqueue,
5222  					nodeid_gone, &lkb_count);
5223  			purge_dead_list(ls, r, &r->res_waitqueue,
5224  					nodeid_gone, &lkb_count);
5225  		}
5226  		unlock_rsb(r);
5227  		unhold_rsb(r);
5228  		cond_resched();
5229  	}
5230  	up_write(&ls->ls_root_sem);
5231  
5232  	if (lkb_count)
5233  		log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
5234  			  lkb_count, nodes_count);
5235  }
5236  
find_grant_rsb(struct dlm_ls * ls,int bucket)5237  static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5238  {
5239  	struct rb_node *n;
5240  	struct dlm_rsb *r;
5241  
5242  	spin_lock(&ls->ls_rsbtbl[bucket].lock);
5243  	for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5244  		r = rb_entry(n, struct dlm_rsb, res_hashnode);
5245  
5246  		if (!rsb_flag(r, RSB_RECOVER_GRANT))
5247  			continue;
5248  		if (!is_master(r)) {
5249  			rsb_clear_flag(r, RSB_RECOVER_GRANT);
5250  			continue;
5251  		}
5252  		hold_rsb(r);
5253  		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5254  		return r;
5255  	}
5256  	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5257  	return NULL;
5258  }
5259  
5260  /*
5261   * Attempt to grant locks on resources that we are the master of.
5262   * Locks may have become grantable during recovery because locks
5263   * from departed nodes have been purged (or not rebuilt), allowing
5264   * previously blocked locks to now be granted.  The subset of rsb's
5265   * we are interested in are those with lkb's on either the convert or
5266   * waiting queues.
5267   *
5268   * Simplest would be to go through each master rsb and check for non-empty
5269   * convert or waiting queues, and attempt to grant on those rsbs.
5270   * Checking the queues requires lock_rsb, though, for which we'd need
5271   * to release the rsbtbl lock.  This would make iterating through all
5272   * rsb's very inefficient.  So, we rely on earlier recovery routines
5273   * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5274   * locks for.
5275   */
5276  
dlm_recover_grant(struct dlm_ls * ls)5277  void dlm_recover_grant(struct dlm_ls *ls)
5278  {
5279  	struct dlm_rsb *r;
5280  	int bucket = 0;
5281  	unsigned int count = 0;
5282  	unsigned int rsb_count = 0;
5283  	unsigned int lkb_count = 0;
5284  
5285  	while (1) {
5286  		r = find_grant_rsb(ls, bucket);
5287  		if (!r) {
5288  			if (bucket == ls->ls_rsbtbl_size - 1)
5289  				break;
5290  			bucket++;
5291  			continue;
5292  		}
5293  		rsb_count++;
5294  		count = 0;
5295  		lock_rsb(r);
5296  		/* the RECOVER_GRANT flag is checked in the grant path */
5297  		grant_pending_locks(r, &count);
5298  		rsb_clear_flag(r, RSB_RECOVER_GRANT);
5299  		lkb_count += count;
5300  		confirm_master(r, 0);
5301  		unlock_rsb(r);
5302  		put_rsb(r);
5303  		cond_resched();
5304  	}
5305  
5306  	if (lkb_count)
5307  		log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
5308  			  lkb_count, rsb_count);
5309  }
5310  
search_remid_list(struct list_head * head,int nodeid,uint32_t remid)5311  static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5312  					 uint32_t remid)
5313  {
5314  	struct dlm_lkb *lkb;
5315  
5316  	list_for_each_entry(lkb, head, lkb_statequeue) {
5317  		if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5318  			return lkb;
5319  	}
5320  	return NULL;
5321  }
5322  
search_remid(struct dlm_rsb * r,int nodeid,uint32_t remid)5323  static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5324  				    uint32_t remid)
5325  {
5326  	struct dlm_lkb *lkb;
5327  
5328  	lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5329  	if (lkb)
5330  		return lkb;
5331  	lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5332  	if (lkb)
5333  		return lkb;
5334  	lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5335  	if (lkb)
5336  		return lkb;
5337  	return NULL;
5338  }
5339  
5340  /* needs at least dlm_rcom + rcom_lock */
receive_rcom_lock_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_rsb * r,const struct dlm_rcom * rc)5341  static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5342  				  struct dlm_rsb *r, const struct dlm_rcom *rc)
5343  {
5344  	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5345  
5346  	lkb->lkb_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
5347  	lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5348  	lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5349  	lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5350  	dlm_set_dflags_val(lkb, le32_to_cpu(rl->rl_flags));
5351  	set_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags);
5352  	lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5353  	lkb->lkb_rqmode = rl->rl_rqmode;
5354  	lkb->lkb_grmode = rl->rl_grmode;
5355  	/* don't set lkb_status because add_lkb wants to itself */
5356  
5357  	lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5358  	lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5359  
5360  	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5361  		int lvblen = le16_to_cpu(rc->rc_header.h_length) -
5362  			sizeof(struct dlm_rcom) - sizeof(struct rcom_lock);
5363  		if (lvblen > ls->ls_lvblen)
5364  			return -EINVAL;
5365  		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5366  		if (!lkb->lkb_lvbptr)
5367  			return -ENOMEM;
5368  		memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5369  	}
5370  
5371  	/* Conversions between PR and CW (middle modes) need special handling.
5372  	   The real granted mode of these converting locks cannot be determined
5373  	   until all locks have been rebuilt on the rsb (recover_conversion) */
5374  
5375  	if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5376  	    middle_conversion(lkb)) {
5377  		rl->rl_status = DLM_LKSTS_CONVERT;
5378  		lkb->lkb_grmode = DLM_LOCK_IV;
5379  		rsb_set_flag(r, RSB_RECOVER_CONVERT);
5380  	}
5381  
5382  	return 0;
5383  }
5384  
5385  /* This lkb may have been recovered in a previous aborted recovery so we need
5386     to check if the rsb already has an lkb with the given remote nodeid/lkid.
5387     If so we just send back a standard reply.  If not, we create a new lkb with
5388     the given values and send back our lkid.  We send back our lkid by sending
5389     back the rcom_lock struct we got but with the remid field filled in. */
5390  
5391  /* needs at least dlm_rcom + rcom_lock */
dlm_recover_master_copy(struct dlm_ls * ls,const struct dlm_rcom * rc,__le32 * rl_remid,__le32 * rl_result)5392  int dlm_recover_master_copy(struct dlm_ls *ls, const struct dlm_rcom *rc,
5393  			    __le32 *rl_remid, __le32 *rl_result)
5394  {
5395  	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5396  	struct dlm_rsb *r;
5397  	struct dlm_lkb *lkb;
5398  	uint32_t remid = 0;
5399  	int from_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
5400  	int error;
5401  
5402  	/* init rl_remid with rcom lock rl_remid */
5403  	*rl_remid = rl->rl_remid;
5404  
5405  	if (rl->rl_parent_lkid) {
5406  		error = -EOPNOTSUPP;
5407  		goto out;
5408  	}
5409  
5410  	remid = le32_to_cpu(rl->rl_lkid);
5411  
5412  	/* In general we expect the rsb returned to be R_MASTER, but we don't
5413  	   have to require it.  Recovery of masters on one node can overlap
5414  	   recovery of locks on another node, so one node can send us MSTCPY
5415  	   locks before we've made ourselves master of this rsb.  We can still
5416  	   add new MSTCPY locks that we receive here without any harm; when
5417  	   we make ourselves master, dlm_recover_masters() won't touch the
5418  	   MSTCPY locks we've received early. */
5419  
5420  	error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5421  			 from_nodeid, R_RECEIVE_RECOVER, &r);
5422  	if (error)
5423  		goto out;
5424  
5425  	lock_rsb(r);
5426  
5427  	if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5428  		log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5429  			  from_nodeid, remid);
5430  		error = -EBADR;
5431  		goto out_unlock;
5432  	}
5433  
5434  	lkb = search_remid(r, from_nodeid, remid);
5435  	if (lkb) {
5436  		error = -EEXIST;
5437  		goto out_remid;
5438  	}
5439  
5440  	error = create_lkb(ls, &lkb);
5441  	if (error)
5442  		goto out_unlock;
5443  
5444  	error = receive_rcom_lock_args(ls, lkb, r, rc);
5445  	if (error) {
5446  		__put_lkb(ls, lkb);
5447  		goto out_unlock;
5448  	}
5449  
5450  	attach_lkb(r, lkb);
5451  	add_lkb(r, lkb, rl->rl_status);
5452  	ls->ls_recover_locks_in++;
5453  
5454  	if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5455  		rsb_set_flag(r, RSB_RECOVER_GRANT);
5456  
5457   out_remid:
5458  	/* this is the new value returned to the lock holder for
5459  	   saving in its process-copy lkb */
5460  	*rl_remid = cpu_to_le32(lkb->lkb_id);
5461  
5462  	lkb->lkb_recover_seq = ls->ls_recover_seq;
5463  
5464   out_unlock:
5465  	unlock_rsb(r);
5466  	put_rsb(r);
5467   out:
5468  	if (error && error != -EEXIST)
5469  		log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
5470  			  from_nodeid, remid, error);
5471  	*rl_result = cpu_to_le32(error);
5472  	return error;
5473  }
5474  
5475  /* needs at least dlm_rcom + rcom_lock */
dlm_recover_process_copy(struct dlm_ls * ls,const struct dlm_rcom * rc,uint64_t seq)5476  int dlm_recover_process_copy(struct dlm_ls *ls, const struct dlm_rcom *rc,
5477  			     uint64_t seq)
5478  {
5479  	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5480  	struct dlm_rsb *r;
5481  	struct dlm_lkb *lkb;
5482  	uint32_t lkid, remid;
5483  	int error, result;
5484  
5485  	lkid = le32_to_cpu(rl->rl_lkid);
5486  	remid = le32_to_cpu(rl->rl_remid);
5487  	result = le32_to_cpu(rl->rl_result);
5488  
5489  	error = find_lkb(ls, lkid, &lkb);
5490  	if (error) {
5491  		log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5492  			  lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5493  			  result);
5494  		return error;
5495  	}
5496  
5497  	r = lkb->lkb_resource;
5498  	hold_rsb(r);
5499  	lock_rsb(r);
5500  
5501  	if (!is_process_copy(lkb)) {
5502  		log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5503  			  lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5504  			  result);
5505  		dlm_dump_rsb(r);
5506  		unlock_rsb(r);
5507  		put_rsb(r);
5508  		dlm_put_lkb(lkb);
5509  		return -EINVAL;
5510  	}
5511  
5512  	switch (result) {
5513  	case -EBADR:
5514  		/* There's a chance the new master received our lock before
5515  		   dlm_recover_master_reply(), this wouldn't happen if we did
5516  		   a barrier between recover_masters and recover_locks. */
5517  
5518  		log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5519  			  lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5520  			  result);
5521  
5522  		dlm_send_rcom_lock(r, lkb, seq);
5523  		goto out;
5524  	case -EEXIST:
5525  	case 0:
5526  		lkb->lkb_remid = remid;
5527  		break;
5528  	default:
5529  		log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5530  			  lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5531  			  result);
5532  	}
5533  
5534  	/* an ack for dlm_recover_locks() which waits for replies from
5535  	   all the locks it sends to new masters */
5536  	dlm_recovered_lock(r);
5537   out:
5538  	unlock_rsb(r);
5539  	put_rsb(r);
5540  	dlm_put_lkb(lkb);
5541  
5542  	return 0;
5543  }
5544  
dlm_user_request(struct dlm_ls * ls,struct dlm_user_args * ua,int mode,uint32_t flags,void * name,unsigned int namelen)5545  int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5546  		     int mode, uint32_t flags, void *name, unsigned int namelen)
5547  {
5548  	struct dlm_lkb *lkb;
5549  	struct dlm_args args;
5550  	bool do_put = true;
5551  	int error;
5552  
5553  	dlm_lock_recovery(ls);
5554  
5555  	error = create_lkb(ls, &lkb);
5556  	if (error) {
5557  		kfree(ua);
5558  		goto out;
5559  	}
5560  
5561  	trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
5562  
5563  	if (flags & DLM_LKF_VALBLK) {
5564  		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5565  		if (!ua->lksb.sb_lvbptr) {
5566  			kfree(ua);
5567  			error = -ENOMEM;
5568  			goto out_put;
5569  		}
5570  	}
5571  	error = set_lock_args(mode, &ua->lksb, flags, namelen, fake_astfn, ua,
5572  			      fake_bastfn, &args);
5573  	if (error) {
5574  		kfree(ua->lksb.sb_lvbptr);
5575  		ua->lksb.sb_lvbptr = NULL;
5576  		kfree(ua);
5577  		goto out_put;
5578  	}
5579  
5580  	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
5581  	   When DLM_DFL_USER_BIT is set, the dlm knows that this is a userspace
5582  	   lock and that lkb_astparam is the dlm_user_args structure. */
5583  	set_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags);
5584  	error = request_lock(ls, lkb, name, namelen, &args);
5585  
5586  	switch (error) {
5587  	case 0:
5588  		break;
5589  	case -EINPROGRESS:
5590  		error = 0;
5591  		break;
5592  	case -EAGAIN:
5593  		error = 0;
5594  		fallthrough;
5595  	default:
5596  		goto out_put;
5597  	}
5598  
5599  	/* add this new lkb to the per-process list of locks */
5600  	spin_lock(&ua->proc->locks_spin);
5601  	hold_lkb(lkb);
5602  	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5603  	spin_unlock(&ua->proc->locks_spin);
5604  	do_put = false;
5605   out_put:
5606  	trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false);
5607  	if (do_put)
5608  		__put_lkb(ls, lkb);
5609   out:
5610  	dlm_unlock_recovery(ls);
5611  	return error;
5612  }
5613  
dlm_user_convert(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,int mode,uint32_t flags,uint32_t lkid,char * lvb_in)5614  int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5615  		     int mode, uint32_t flags, uint32_t lkid, char *lvb_in)
5616  {
5617  	struct dlm_lkb *lkb;
5618  	struct dlm_args args;
5619  	struct dlm_user_args *ua;
5620  	int error;
5621  
5622  	dlm_lock_recovery(ls);
5623  
5624  	error = find_lkb(ls, lkid, &lkb);
5625  	if (error)
5626  		goto out;
5627  
5628  	trace_dlm_lock_start(ls, lkb, NULL, 0, mode, flags);
5629  
5630  	/* user can change the params on its lock when it converts it, or
5631  	   add an lvb that didn't exist before */
5632  
5633  	ua = lkb->lkb_ua;
5634  
5635  	if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5636  		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5637  		if (!ua->lksb.sb_lvbptr) {
5638  			error = -ENOMEM;
5639  			goto out_put;
5640  		}
5641  	}
5642  	if (lvb_in && ua->lksb.sb_lvbptr)
5643  		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5644  
5645  	ua->xid = ua_tmp->xid;
5646  	ua->castparam = ua_tmp->castparam;
5647  	ua->castaddr = ua_tmp->castaddr;
5648  	ua->bastparam = ua_tmp->bastparam;
5649  	ua->bastaddr = ua_tmp->bastaddr;
5650  	ua->user_lksb = ua_tmp->user_lksb;
5651  
5652  	error = set_lock_args(mode, &ua->lksb, flags, 0, fake_astfn, ua,
5653  			      fake_bastfn, &args);
5654  	if (error)
5655  		goto out_put;
5656  
5657  	error = convert_lock(ls, lkb, &args);
5658  
5659  	if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5660  		error = 0;
5661   out_put:
5662  	trace_dlm_lock_end(ls, lkb, NULL, 0, mode, flags, error, false);
5663  	dlm_put_lkb(lkb);
5664   out:
5665  	dlm_unlock_recovery(ls);
5666  	kfree(ua_tmp);
5667  	return error;
5668  }
5669  
5670  /*
5671   * The caller asks for an orphan lock on a given resource with a given mode.
5672   * If a matching lock exists, it's moved to the owner's list of locks and
5673   * the lkid is returned.
5674   */
5675  
dlm_user_adopt_orphan(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,int mode,uint32_t flags,void * name,unsigned int namelen,uint32_t * lkid)5676  int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5677  		     int mode, uint32_t flags, void *name, unsigned int namelen,
5678  		     uint32_t *lkid)
5679  {
5680  	struct dlm_lkb *lkb = NULL, *iter;
5681  	struct dlm_user_args *ua;
5682  	int found_other_mode = 0;
5683  	int rv = 0;
5684  
5685  	mutex_lock(&ls->ls_orphans_mutex);
5686  	list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
5687  		if (iter->lkb_resource->res_length != namelen)
5688  			continue;
5689  		if (memcmp(iter->lkb_resource->res_name, name, namelen))
5690  			continue;
5691  		if (iter->lkb_grmode != mode) {
5692  			found_other_mode = 1;
5693  			continue;
5694  		}
5695  
5696  		lkb = iter;
5697  		list_del_init(&iter->lkb_ownqueue);
5698  		clear_bit(DLM_DFL_ORPHAN_BIT, &iter->lkb_dflags);
5699  		*lkid = iter->lkb_id;
5700  		break;
5701  	}
5702  	mutex_unlock(&ls->ls_orphans_mutex);
5703  
5704  	if (!lkb && found_other_mode) {
5705  		rv = -EAGAIN;
5706  		goto out;
5707  	}
5708  
5709  	if (!lkb) {
5710  		rv = -ENOENT;
5711  		goto out;
5712  	}
5713  
5714  	lkb->lkb_exflags = flags;
5715  	lkb->lkb_ownpid = (int) current->pid;
5716  
5717  	ua = lkb->lkb_ua;
5718  
5719  	ua->proc = ua_tmp->proc;
5720  	ua->xid = ua_tmp->xid;
5721  	ua->castparam = ua_tmp->castparam;
5722  	ua->castaddr = ua_tmp->castaddr;
5723  	ua->bastparam = ua_tmp->bastparam;
5724  	ua->bastaddr = ua_tmp->bastaddr;
5725  	ua->user_lksb = ua_tmp->user_lksb;
5726  
5727  	/*
5728  	 * The lkb reference from the ls_orphans list was not
5729  	 * removed above, and is now considered the reference
5730  	 * for the proc locks list.
5731  	 */
5732  
5733  	spin_lock(&ua->proc->locks_spin);
5734  	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5735  	spin_unlock(&ua->proc->locks_spin);
5736   out:
5737  	kfree(ua_tmp);
5738  	return rv;
5739  }
5740  
dlm_user_unlock(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,uint32_t flags,uint32_t lkid,char * lvb_in)5741  int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5742  		    uint32_t flags, uint32_t lkid, char *lvb_in)
5743  {
5744  	struct dlm_lkb *lkb;
5745  	struct dlm_args args;
5746  	struct dlm_user_args *ua;
5747  	int error;
5748  
5749  	dlm_lock_recovery(ls);
5750  
5751  	error = find_lkb(ls, lkid, &lkb);
5752  	if (error)
5753  		goto out;
5754  
5755  	trace_dlm_unlock_start(ls, lkb, flags);
5756  
5757  	ua = lkb->lkb_ua;
5758  
5759  	if (lvb_in && ua->lksb.sb_lvbptr)
5760  		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5761  	if (ua_tmp->castparam)
5762  		ua->castparam = ua_tmp->castparam;
5763  	ua->user_lksb = ua_tmp->user_lksb;
5764  
5765  	error = set_unlock_args(flags, ua, &args);
5766  	if (error)
5767  		goto out_put;
5768  
5769  	error = unlock_lock(ls, lkb, &args);
5770  
5771  	if (error == -DLM_EUNLOCK)
5772  		error = 0;
5773  	/* from validate_unlock_args() */
5774  	if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
5775  		error = 0;
5776  	if (error)
5777  		goto out_put;
5778  
5779  	spin_lock(&ua->proc->locks_spin);
5780  	/* dlm_user_add_cb() may have already taken lkb off the proc list */
5781  	if (!list_empty(&lkb->lkb_ownqueue))
5782  		list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
5783  	spin_unlock(&ua->proc->locks_spin);
5784   out_put:
5785  	trace_dlm_unlock_end(ls, lkb, flags, error);
5786  	dlm_put_lkb(lkb);
5787   out:
5788  	dlm_unlock_recovery(ls);
5789  	kfree(ua_tmp);
5790  	return error;
5791  }
5792  
dlm_user_cancel(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,uint32_t flags,uint32_t lkid)5793  int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5794  		    uint32_t flags, uint32_t lkid)
5795  {
5796  	struct dlm_lkb *lkb;
5797  	struct dlm_args args;
5798  	struct dlm_user_args *ua;
5799  	int error;
5800  
5801  	dlm_lock_recovery(ls);
5802  
5803  	error = find_lkb(ls, lkid, &lkb);
5804  	if (error)
5805  		goto out;
5806  
5807  	trace_dlm_unlock_start(ls, lkb, flags);
5808  
5809  	ua = lkb->lkb_ua;
5810  	if (ua_tmp->castparam)
5811  		ua->castparam = ua_tmp->castparam;
5812  	ua->user_lksb = ua_tmp->user_lksb;
5813  
5814  	error = set_unlock_args(flags, ua, &args);
5815  	if (error)
5816  		goto out_put;
5817  
5818  	error = cancel_lock(ls, lkb, &args);
5819  
5820  	if (error == -DLM_ECANCEL)
5821  		error = 0;
5822  	/* from validate_unlock_args() */
5823  	if (error == -EBUSY)
5824  		error = 0;
5825   out_put:
5826  	trace_dlm_unlock_end(ls, lkb, flags, error);
5827  	dlm_put_lkb(lkb);
5828   out:
5829  	dlm_unlock_recovery(ls);
5830  	kfree(ua_tmp);
5831  	return error;
5832  }
5833  
dlm_user_deadlock(struct dlm_ls * ls,uint32_t flags,uint32_t lkid)5834  int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
5835  {
5836  	struct dlm_lkb *lkb;
5837  	struct dlm_args args;
5838  	struct dlm_user_args *ua;
5839  	struct dlm_rsb *r;
5840  	int error;
5841  
5842  	dlm_lock_recovery(ls);
5843  
5844  	error = find_lkb(ls, lkid, &lkb);
5845  	if (error)
5846  		goto out;
5847  
5848  	trace_dlm_unlock_start(ls, lkb, flags);
5849  
5850  	ua = lkb->lkb_ua;
5851  
5852  	error = set_unlock_args(flags, ua, &args);
5853  	if (error)
5854  		goto out_put;
5855  
5856  	/* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
5857  
5858  	r = lkb->lkb_resource;
5859  	hold_rsb(r);
5860  	lock_rsb(r);
5861  
5862  	error = validate_unlock_args(lkb, &args);
5863  	if (error)
5864  		goto out_r;
5865  	set_bit(DLM_IFL_DEADLOCK_CANCEL_BIT, &lkb->lkb_iflags);
5866  
5867  	error = _cancel_lock(r, lkb);
5868   out_r:
5869  	unlock_rsb(r);
5870  	put_rsb(r);
5871  
5872  	if (error == -DLM_ECANCEL)
5873  		error = 0;
5874  	/* from validate_unlock_args() */
5875  	if (error == -EBUSY)
5876  		error = 0;
5877   out_put:
5878  	trace_dlm_unlock_end(ls, lkb, flags, error);
5879  	dlm_put_lkb(lkb);
5880   out:
5881  	dlm_unlock_recovery(ls);
5882  	return error;
5883  }
5884  
5885  /* lkb's that are removed from the waiters list by revert are just left on the
5886     orphans list with the granted orphan locks, to be freed by purge */
5887  
orphan_proc_lock(struct dlm_ls * ls,struct dlm_lkb * lkb)5888  static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
5889  {
5890  	struct dlm_args args;
5891  	int error;
5892  
5893  	hold_lkb(lkb); /* reference for the ls_orphans list */
5894  	mutex_lock(&ls->ls_orphans_mutex);
5895  	list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
5896  	mutex_unlock(&ls->ls_orphans_mutex);
5897  
5898  	set_unlock_args(0, lkb->lkb_ua, &args);
5899  
5900  	error = cancel_lock(ls, lkb, &args);
5901  	if (error == -DLM_ECANCEL)
5902  		error = 0;
5903  	return error;
5904  }
5905  
5906  /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
5907     granted.  Regardless of what rsb queue the lock is on, it's removed and
5908     freed.  The IVVALBLK flag causes the lvb on the resource to be invalidated
5909     if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
5910  
unlock_proc_lock(struct dlm_ls * ls,struct dlm_lkb * lkb)5911  static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
5912  {
5913  	struct dlm_args args;
5914  	int error;
5915  
5916  	set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
5917  			lkb->lkb_ua, &args);
5918  
5919  	error = unlock_lock(ls, lkb, &args);
5920  	if (error == -DLM_EUNLOCK)
5921  		error = 0;
5922  	return error;
5923  }
5924  
5925  /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
5926     (which does lock_rsb) due to deadlock with receiving a message that does
5927     lock_rsb followed by dlm_user_add_cb() */
5928  
del_proc_lock(struct dlm_ls * ls,struct dlm_user_proc * proc)5929  static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
5930  				     struct dlm_user_proc *proc)
5931  {
5932  	struct dlm_lkb *lkb = NULL;
5933  
5934  	spin_lock(&ls->ls_clear_proc_locks);
5935  	if (list_empty(&proc->locks))
5936  		goto out;
5937  
5938  	lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
5939  	list_del_init(&lkb->lkb_ownqueue);
5940  
5941  	if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
5942  		set_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags);
5943  	else
5944  		set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags);
5945   out:
5946  	spin_unlock(&ls->ls_clear_proc_locks);
5947  	return lkb;
5948  }
5949  
5950  /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
5951     1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
5952     which we clear here. */
5953  
5954  /* proc CLOSING flag is set so no more device_reads should look at proc->asts
5955     list, and no more device_writes should add lkb's to proc->locks list; so we
5956     shouldn't need to take asts_spin or locks_spin here.  this assumes that
5957     device reads/writes/closes are serialized -- FIXME: we may need to serialize
5958     them ourself. */
5959  
dlm_clear_proc_locks(struct dlm_ls * ls,struct dlm_user_proc * proc)5960  void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
5961  {
5962  	struct dlm_lkb *lkb, *safe;
5963  
5964  	dlm_lock_recovery(ls);
5965  
5966  	while (1) {
5967  		lkb = del_proc_lock(ls, proc);
5968  		if (!lkb)
5969  			break;
5970  		if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
5971  			orphan_proc_lock(ls, lkb);
5972  		else
5973  			unlock_proc_lock(ls, lkb);
5974  
5975  		/* this removes the reference for the proc->locks list
5976  		   added by dlm_user_request, it may result in the lkb
5977  		   being freed */
5978  
5979  		dlm_put_lkb(lkb);
5980  	}
5981  
5982  	spin_lock(&ls->ls_clear_proc_locks);
5983  
5984  	/* in-progress unlocks */
5985  	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
5986  		list_del_init(&lkb->lkb_ownqueue);
5987  		set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags);
5988  		dlm_put_lkb(lkb);
5989  	}
5990  
5991  	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
5992  		dlm_purge_lkb_callbacks(lkb);
5993  		list_del_init(&lkb->lkb_cb_list);
5994  		dlm_put_lkb(lkb);
5995  	}
5996  
5997  	spin_unlock(&ls->ls_clear_proc_locks);
5998  	dlm_unlock_recovery(ls);
5999  }
6000  
purge_proc_locks(struct dlm_ls * ls,struct dlm_user_proc * proc)6001  static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6002  {
6003  	struct dlm_lkb *lkb, *safe;
6004  
6005  	while (1) {
6006  		lkb = NULL;
6007  		spin_lock(&proc->locks_spin);
6008  		if (!list_empty(&proc->locks)) {
6009  			lkb = list_entry(proc->locks.next, struct dlm_lkb,
6010  					 lkb_ownqueue);
6011  			list_del_init(&lkb->lkb_ownqueue);
6012  		}
6013  		spin_unlock(&proc->locks_spin);
6014  
6015  		if (!lkb)
6016  			break;
6017  
6018  		set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags);
6019  		unlock_proc_lock(ls, lkb);
6020  		dlm_put_lkb(lkb); /* ref from proc->locks list */
6021  	}
6022  
6023  	spin_lock(&proc->locks_spin);
6024  	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6025  		list_del_init(&lkb->lkb_ownqueue);
6026  		set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags);
6027  		dlm_put_lkb(lkb);
6028  	}
6029  	spin_unlock(&proc->locks_spin);
6030  
6031  	spin_lock(&proc->asts_spin);
6032  	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6033  		dlm_purge_lkb_callbacks(lkb);
6034  		list_del_init(&lkb->lkb_cb_list);
6035  		dlm_put_lkb(lkb);
6036  	}
6037  	spin_unlock(&proc->asts_spin);
6038  }
6039  
6040  /* pid of 0 means purge all orphans */
6041  
do_purge(struct dlm_ls * ls,int nodeid,int pid)6042  static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6043  {
6044  	struct dlm_lkb *lkb, *safe;
6045  
6046  	mutex_lock(&ls->ls_orphans_mutex);
6047  	list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6048  		if (pid && lkb->lkb_ownpid != pid)
6049  			continue;
6050  		unlock_proc_lock(ls, lkb);
6051  		list_del_init(&lkb->lkb_ownqueue);
6052  		dlm_put_lkb(lkb);
6053  	}
6054  	mutex_unlock(&ls->ls_orphans_mutex);
6055  }
6056  
send_purge(struct dlm_ls * ls,int nodeid,int pid)6057  static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6058  {
6059  	struct dlm_message *ms;
6060  	struct dlm_mhandle *mh;
6061  	int error;
6062  
6063  	error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6064  				DLM_MSG_PURGE, &ms, &mh, GFP_NOFS);
6065  	if (error)
6066  		return error;
6067  	ms->m_nodeid = cpu_to_le32(nodeid);
6068  	ms->m_pid = cpu_to_le32(pid);
6069  
6070  	return send_message(mh, ms, NULL, 0);
6071  }
6072  
dlm_user_purge(struct dlm_ls * ls,struct dlm_user_proc * proc,int nodeid,int pid)6073  int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6074  		   int nodeid, int pid)
6075  {
6076  	int error = 0;
6077  
6078  	if (nodeid && (nodeid != dlm_our_nodeid())) {
6079  		error = send_purge(ls, nodeid, pid);
6080  	} else {
6081  		dlm_lock_recovery(ls);
6082  		if (pid == current->pid)
6083  			purge_proc_locks(ls, proc);
6084  		else
6085  			do_purge(ls, nodeid, pid);
6086  		dlm_unlock_recovery(ls);
6087  	}
6088  	return error;
6089  }
6090  
6091  /* debug functionality */
dlm_debug_add_lkb(struct dlm_ls * ls,uint32_t lkb_id,char * name,int len,int lkb_nodeid,unsigned int lkb_dflags,int lkb_status)6092  int dlm_debug_add_lkb(struct dlm_ls *ls, uint32_t lkb_id, char *name, int len,
6093  		      int lkb_nodeid, unsigned int lkb_dflags, int lkb_status)
6094  {
6095  	struct dlm_lksb *lksb;
6096  	struct dlm_lkb *lkb;
6097  	struct dlm_rsb *r;
6098  	int error;
6099  
6100  	/* we currently can't set a valid user lock */
6101  	if (lkb_dflags & BIT(DLM_DFL_USER_BIT))
6102  		return -EOPNOTSUPP;
6103  
6104  	lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
6105  	if (!lksb)
6106  		return -ENOMEM;
6107  
6108  	error = _create_lkb(ls, &lkb, lkb_id, lkb_id + 1);
6109  	if (error) {
6110  		kfree(lksb);
6111  		return error;
6112  	}
6113  
6114  	dlm_set_dflags_val(lkb, lkb_dflags);
6115  	lkb->lkb_nodeid = lkb_nodeid;
6116  	lkb->lkb_lksb = lksb;
6117  	/* user specific pointer, just don't have it NULL for kernel locks */
6118  	if (~lkb_dflags & BIT(DLM_DFL_USER_BIT))
6119  		lkb->lkb_astparam = (void *)0xDEADBEEF;
6120  
6121  	error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
6122  	if (error) {
6123  		kfree(lksb);
6124  		__put_lkb(ls, lkb);
6125  		return error;
6126  	}
6127  
6128  	lock_rsb(r);
6129  	attach_lkb(r, lkb);
6130  	add_lkb(r, lkb, lkb_status);
6131  	unlock_rsb(r);
6132  	put_rsb(r);
6133  
6134  	return 0;
6135  }
6136  
dlm_debug_add_lkb_to_waiters(struct dlm_ls * ls,uint32_t lkb_id,int mstype,int to_nodeid)6137  int dlm_debug_add_lkb_to_waiters(struct dlm_ls *ls, uint32_t lkb_id,
6138  				 int mstype, int to_nodeid)
6139  {
6140  	struct dlm_lkb *lkb;
6141  	int error;
6142  
6143  	error = find_lkb(ls, lkb_id, &lkb);
6144  	if (error)
6145  		return error;
6146  
6147  	error = add_to_waiters(lkb, mstype, to_nodeid);
6148  	dlm_put_lkb(lkb);
6149  	return error;
6150  }
6151  
6152