xref: /openbmc/linux/fs/dlm/ast.c (revision 436396f2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
4 **
5 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
6 **  Copyright (C) 2004-2010 Red Hat, Inc.  All rights reserved.
7 **
8 **
9 *******************************************************************************
10 ******************************************************************************/
11 
12 #include <trace/events/dlm.h>
13 
14 #include "dlm_internal.h"
15 #include "memory.h"
16 #include "lock.h"
17 #include "user.h"
18 #include "ast.h"
19 
20 void dlm_release_callback(struct kref *ref)
21 {
22 	struct dlm_callback *cb = container_of(ref, struct dlm_callback, ref);
23 
24 	dlm_free_cb(cb);
25 }
26 
27 void dlm_callback_set_last_ptr(struct dlm_callback **from,
28 			       struct dlm_callback *to)
29 {
30 	if (*from)
31 		kref_put(&(*from)->ref, dlm_release_callback);
32 
33 	if (to)
34 		kref_get(&to->ref);
35 
36 	*from = to;
37 }
38 
39 void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb)
40 {
41 	struct dlm_callback *cb, *safe;
42 
43 	list_for_each_entry_safe(cb, safe, &lkb->lkb_callbacks, list) {
44 		list_del(&cb->list);
45 		kref_put(&cb->ref, dlm_release_callback);
46 	}
47 
48 	lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;
49 
50 	/* invalidate */
51 	dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL);
52 	dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL);
53 	lkb->lkb_last_bast_mode = -1;
54 }
55 
56 int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
57 			     int status, uint32_t sbflags)
58 {
59 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
60 	int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
61 	struct dlm_callback *cb;
62 	int prev_mode;
63 
64 	if (flags & DLM_CB_BAST) {
65 		/* if cb is a bast, it should be skipped if the blocking mode is
66 		 * compatible with the last granted mode
67 		 */
68 		if (lkb->lkb_last_cast) {
69 			if (dlm_modes_compat(mode, lkb->lkb_last_cast->mode)) {
70 				log_debug(ls, "skip %x bast mode %d for cast mode %d",
71 					  lkb->lkb_id, mode,
72 					  lkb->lkb_last_cast->mode);
73 				goto out;
74 			}
75 		}
76 
77 		/*
78 		 * Suppress some redundant basts here, do more on removal.
79 		 * Don't even add a bast if the callback just before it
80 		 * is a bast for the same mode or a more restrictive mode.
81 		 * (the addional > PR check is needed for PR/CW inversion)
82 		 */
83 		if (lkb->lkb_last_cb && lkb->lkb_last_cb->flags & DLM_CB_BAST) {
84 			prev_mode = lkb->lkb_last_cb->mode;
85 
86 			if ((prev_mode == mode) ||
87 			    (prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
88 				log_debug(ls, "skip %x add bast mode %d for bast mode %d",
89 					  lkb->lkb_id, mode, prev_mode);
90 				goto out;
91 			}
92 		}
93 	}
94 
95 	cb = dlm_allocate_cb();
96 	if (!cb) {
97 		rv = DLM_ENQUEUE_CALLBACK_FAILURE;
98 		goto out;
99 	}
100 
101 	cb->flags = flags;
102 	cb->mode = mode;
103 	cb->sb_status = status;
104 	cb->sb_flags = (sbflags & 0x000000FF);
105 	kref_init(&cb->ref);
106 	if (!(lkb->lkb_flags & DLM_IFL_CB_PENDING)) {
107 		lkb->lkb_flags |= DLM_IFL_CB_PENDING;
108 		rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
109 	}
110 	list_add_tail(&cb->list, &lkb->lkb_callbacks);
111 
112 	if (flags & DLM_CB_CAST)
113 		dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb);
114 
115 	dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb);
116 
117  out:
118 	return rv;
119 }
120 
121 int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb)
122 {
123 	/* oldest undelivered cb is callbacks first entry */
124 	*cb = list_first_entry_or_null(&lkb->lkb_callbacks,
125 				       struct dlm_callback, list);
126 	if (!*cb)
127 		return DLM_DEQUEUE_CALLBACK_EMPTY;
128 
129 	/* remove it from callbacks so shift others down */
130 	list_del(&(*cb)->list);
131 	if (list_empty(&lkb->lkb_callbacks))
132 		return DLM_DEQUEUE_CALLBACK_LAST;
133 
134 	return DLM_DEQUEUE_CALLBACK_SUCCESS;
135 }
136 
137 void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
138 		uint32_t sbflags)
139 {
140 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
141 	int rv;
142 
143 	if (lkb->lkb_flags & DLM_IFL_USER) {
144 		dlm_user_add_ast(lkb, flags, mode, status, sbflags);
145 		return;
146 	}
147 
148 	spin_lock(&lkb->lkb_cb_lock);
149 	rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
150 	switch (rv) {
151 	case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
152 		kref_get(&lkb->lkb_ref);
153 
154 		spin_lock(&ls->ls_cb_lock);
155 		if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
156 			list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
157 		} else {
158 			queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
159 		}
160 		spin_unlock(&ls->ls_cb_lock);
161 		break;
162 	case DLM_ENQUEUE_CALLBACK_FAILURE:
163 		WARN_ON_ONCE(1);
164 		break;
165 	case DLM_ENQUEUE_CALLBACK_SUCCESS:
166 		break;
167 	default:
168 		WARN_ON_ONCE(1);
169 		break;
170 	}
171 	spin_unlock(&lkb->lkb_cb_lock);
172 }
173 
174 void dlm_callback_work(struct work_struct *work)
175 {
176 	struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work);
177 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
178 	void (*castfn) (void *astparam);
179 	void (*bastfn) (void *astparam, int mode);
180 	struct dlm_callback *cb;
181 	int rv;
182 
183 	spin_lock(&lkb->lkb_cb_lock);
184 	rv = dlm_dequeue_lkb_callback(lkb, &cb);
185 	spin_unlock(&lkb->lkb_cb_lock);
186 
187 	if (WARN_ON_ONCE(rv == DLM_DEQUEUE_CALLBACK_EMPTY))
188 		goto out;
189 
190 	for (;;) {
191 		castfn = lkb->lkb_astfn;
192 		bastfn = lkb->lkb_bastfn;
193 
194 		if (cb->flags & DLM_CB_BAST) {
195 			trace_dlm_bast(ls, lkb, cb->mode);
196 			lkb->lkb_last_bast_time = ktime_get();
197 			lkb->lkb_last_bast_mode = cb->mode;
198 			bastfn(lkb->lkb_astparam, cb->mode);
199 		} else if (cb->flags & DLM_CB_CAST) {
200 			lkb->lkb_lksb->sb_status = cb->sb_status;
201 			lkb->lkb_lksb->sb_flags = cb->sb_flags;
202 			trace_dlm_ast(ls, lkb);
203 			lkb->lkb_last_cast_time = ktime_get();
204 			castfn(lkb->lkb_astparam);
205 		}
206 
207 		kref_put(&cb->ref, dlm_release_callback);
208 
209 		spin_lock(&lkb->lkb_cb_lock);
210 		rv = dlm_dequeue_lkb_callback(lkb, &cb);
211 		if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) {
212 			lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;
213 			spin_unlock(&lkb->lkb_cb_lock);
214 			break;
215 		}
216 		spin_unlock(&lkb->lkb_cb_lock);
217 	}
218 
219 out:
220 	/* undo kref_get from dlm_add_callback, may cause lkb to be freed */
221 	dlm_put_lkb(lkb);
222 }
223 
224 int dlm_callback_start(struct dlm_ls *ls)
225 {
226 	ls->ls_callback_wq = alloc_workqueue("dlm_callback",
227 					     WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
228 	if (!ls->ls_callback_wq) {
229 		log_print("can't start dlm_callback workqueue");
230 		return -ENOMEM;
231 	}
232 	return 0;
233 }
234 
235 void dlm_callback_stop(struct dlm_ls *ls)
236 {
237 	if (ls->ls_callback_wq)
238 		destroy_workqueue(ls->ls_callback_wq);
239 }
240 
241 void dlm_callback_suspend(struct dlm_ls *ls)
242 {
243 	if (ls->ls_callback_wq) {
244 		spin_lock(&ls->ls_cb_lock);
245 		set_bit(LSFL_CB_DELAY, &ls->ls_flags);
246 		spin_unlock(&ls->ls_cb_lock);
247 
248 		flush_workqueue(ls->ls_callback_wq);
249 	}
250 }
251 
252 #define MAX_CB_QUEUE 25
253 
254 void dlm_callback_resume(struct dlm_ls *ls)
255 {
256 	struct dlm_lkb *lkb, *safe;
257 	int count = 0, sum = 0;
258 	bool empty;
259 
260 	if (!ls->ls_callback_wq)
261 		return;
262 
263 more:
264 	spin_lock(&ls->ls_cb_lock);
265 	list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
266 		list_del_init(&lkb->lkb_cb_list);
267 		queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
268 		count++;
269 		if (count == MAX_CB_QUEUE)
270 			break;
271 	}
272 	empty = list_empty(&ls->ls_cb_delay);
273 	if (empty)
274 		clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
275 	spin_unlock(&ls->ls_cb_lock);
276 
277 	sum += count;
278 	if (!empty) {
279 		count = 0;
280 		cond_resched();
281 		goto more;
282 	}
283 
284 	if (sum)
285 		log_rinfo(ls, "%s %d", __func__, sum);
286 }
287 
288