ast.c (27d3994ebb5cea9c26f52064a3da8b0e606a8d11) ast.c (61bed0baa4dba17dd06cdfe20481a580718d6c7c)
1// SPDX-License-Identifier: GPL-2.0-only
2/******************************************************************************
3*******************************************************************************
4**
5** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
7**
8**
9*******************************************************************************
10******************************************************************************/
11
12#include <trace/events/dlm.h>
13
14#include "dlm_internal.h"
1// SPDX-License-Identifier: GPL-2.0-only
2/******************************************************************************
3*******************************************************************************
4**
5** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
7**
8**
9*******************************************************************************
10******************************************************************************/
11
12#include <trace/events/dlm.h>
13
14#include "dlm_internal.h"
15#include "memory.h"
15#include "lock.h"
16#include "user.h"
17#include "ast.h"
18
16#include "lock.h"
17#include "user.h"
18#include "ast.h"
19
19static uint64_t dlm_cb_seq;
20static DEFINE_SPINLOCK(dlm_cb_seq_spin);
20void dlm_release_callback(struct kref *ref)
21{
22 struct dlm_callback *cb = container_of(ref, struct dlm_callback, ref);
21
23
22static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb)
24 dlm_free_cb(cb);
25}
26
27void dlm_callback_set_last_ptr(struct dlm_callback **from,
28 struct dlm_callback *to)
23{
29{
24 int i;
30 if (*from)
31 kref_put(&(*from)->ref, dlm_release_callback);
25
32
26 log_print("last_bast %x %llu flags %x mode %d sb %d %x",
27 lkb->lkb_id,
28 (unsigned long long)lkb->lkb_last_bast.seq,
29 lkb->lkb_last_bast.flags,
30 lkb->lkb_last_bast.mode,
31 lkb->lkb_last_bast.sb_status,
32 lkb->lkb_last_bast.sb_flags);
33 if (to)
34 kref_get(&to->ref);
33
35
34 log_print("last_cast %x %llu flags %x mode %d sb %d %x",
35 lkb->lkb_id,
36 (unsigned long long)lkb->lkb_last_cast.seq,
37 lkb->lkb_last_cast.flags,
38 lkb->lkb_last_cast.mode,
39 lkb->lkb_last_cast.sb_status,
40 lkb->lkb_last_cast.sb_flags);
36 *from = to;
37}
41
38
42 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
43 log_print("cb %x %llu flags %x mode %d sb %d %x",
44 lkb->lkb_id,
45 (unsigned long long)lkb->lkb_callbacks[i].seq,
46 lkb->lkb_callbacks[i].flags,
47 lkb->lkb_callbacks[i].mode,
48 lkb->lkb_callbacks[i].sb_status,
49 lkb->lkb_callbacks[i].sb_flags);
39void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb)
40{
41 struct dlm_callback *cb, *safe;
42
43 list_for_each_entry_safe(cb, safe, &lkb->lkb_callbacks, list) {
44 list_del(&cb->list);
45 kref_put(&cb->ref, dlm_release_callback);
50 }
46 }
47
48 /* TODO */
49 lkb->lkb_flags &= ~DLM_IFL_NEED_SCHED;
50
51 /* invalidate */
52 dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL);
53 dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL);
54 lkb->lkb_last_bast_mode = -1;
51}
52
55}
56
53int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
54 int status, uint32_t sbflags, uint64_t seq)
57int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
58 int status, uint32_t sbflags)
55{
56 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
59{
60 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
57 uint64_t prev_seq;
61 int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
62 struct dlm_callback *cb;
58 int prev_mode;
63 int prev_mode;
59 int i, rv;
60
64
61 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
62 if (lkb->lkb_callbacks[i].seq)
63 continue;
65 if (flags & DLM_CB_BAST) {
66 /* if cb is a bast, it should be skipped if the blocking mode is
67 * compatible with the last granted mode
68 */
69 if (lkb->lkb_last_cast) {
70 if (dlm_modes_compat(mode, lkb->lkb_last_cast->mode)) {
71 log_debug(ls, "skip %x bast mode %d for cast mode %d",
72 lkb->lkb_id, mode,
73 lkb->lkb_last_cast->mode);
74 goto out;
75 }
76 }
64
65 /*
66 * Suppress some redundant basts here, do more on removal.
67 * Don't even add a bast if the callback just before it
68 * is a bast for the same mode or a more restrictive mode.
69 * (the addional > PR check is needed for PR/CW inversion)
70 */
77
78 /*
79 * Suppress some redundant basts here, do more on removal.
80 * Don't even add a bast if the callback just before it
81 * is a bast for the same mode or a more restrictive mode.
82 * (the addional > PR check is needed for PR/CW inversion)
83 */
84 if (lkb->lkb_last_cb && lkb->lkb_last_cb->flags & DLM_CB_BAST) {
85 prev_mode = lkb->lkb_last_cb->mode;
71
86
72 if ((i > 0) && (flags & DLM_CB_BAST) &&
73 (lkb->lkb_callbacks[i-1].flags & DLM_CB_BAST)) {
74
75 prev_seq = lkb->lkb_callbacks[i-1].seq;
76 prev_mode = lkb->lkb_callbacks[i-1].mode;
77
78 if ((prev_mode == mode) ||
79 (prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
87 if ((prev_mode == mode) ||
88 (prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
80
81 log_debug(ls, "skip %x add bast %llu mode %d "
82 "for bast %llu mode %d",
83 lkb->lkb_id,
84 (unsigned long long)seq,
85 mode,
86 (unsigned long long)prev_seq,
87 prev_mode);
88 rv = 0;
89 log_debug(ls, "skip %x add bast mode %d for bast mode %d",
90 lkb->lkb_id, mode, prev_mode);
89 goto out;
90 }
91 }
91 goto out;
92 }
93 }
92
93 lkb->lkb_callbacks[i].seq = seq;
94 lkb->lkb_callbacks[i].flags = flags;
95 lkb->lkb_callbacks[i].mode = mode;
96 lkb->lkb_callbacks[i].sb_status = status;
97 lkb->lkb_callbacks[i].sb_flags = (sbflags & 0x000000FF);
98 rv = 0;
99 break;
100 }
101
94 }
95
102 if (i == DLM_CALLBACKS_SIZE) {
103 log_error(ls, "no callbacks %x %llu flags %x mode %d sb %d %x",
104 lkb->lkb_id, (unsigned long long)seq,
105 flags, mode, status, sbflags);
106 dlm_dump_lkb_callbacks(lkb);
107 rv = -1;
96 cb = dlm_allocate_cb();
97 if (!cb) {
98 rv = DLM_ENQUEUE_CALLBACK_FAILURE;
108 goto out;
109 }
99 goto out;
100 }
110 out:
111 return rv;
112}
113
101
114int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
115 struct dlm_callback *cb, int *resid)
116{
117 int i, rv;
118
119 *resid = 0;
120
121 if (!lkb->lkb_callbacks[0].seq) {
122 rv = -ENOENT;
123 goto out;
102 cb->flags = flags;
103 cb->mode = mode;
104 cb->sb_status = status;
105 cb->sb_flags = (sbflags & 0x000000FF);
106 kref_init(&cb->ref);
107 if (!(lkb->lkb_flags & DLM_IFL_NEED_SCHED)) {
108 lkb->lkb_flags |= DLM_IFL_NEED_SCHED;
109 rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
124 }
110 }
111 list_add_tail(&cb->list, &lkb->lkb_callbacks);
125
112
126 /* oldest undelivered cb is callbacks[0] */
113 if (flags & DLM_CB_CAST)
114 dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb);
127
115
128 memcpy(cb, &lkb->lkb_callbacks[0], sizeof(struct dlm_callback));
129 memset(&lkb->lkb_callbacks[0], 0, sizeof(struct dlm_callback));
116 dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb);
130
117
131 /* shift others down */
118 out:
119 return rv;
120}
132
121
133 for (i = 1; i < DLM_CALLBACKS_SIZE; i++) {
134 if (!lkb->lkb_callbacks[i].seq)
135 break;
136 memcpy(&lkb->lkb_callbacks[i-1], &lkb->lkb_callbacks[i],
137 sizeof(struct dlm_callback));
138 memset(&lkb->lkb_callbacks[i], 0, sizeof(struct dlm_callback));
139 (*resid)++;
140 }
122int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb)
123{
124 /* oldest undelivered cb is callbacks first entry */
125 *cb = list_first_entry_or_null(&lkb->lkb_callbacks,
126 struct dlm_callback, list);
127 if (!*cb)
128 return DLM_DEQUEUE_CALLBACK_EMPTY;
141
129
142 /* if cb is a bast, it should be skipped if the blocking mode is
143 compatible with the last granted mode */
130 /* remove it from callbacks so shift others down */
131 list_del(&(*cb)->list);
132 if (list_empty(&lkb->lkb_callbacks))
133 return DLM_DEQUEUE_CALLBACK_LAST;
144
134
145 if ((cb->flags & DLM_CB_BAST) && lkb->lkb_last_cast.seq) {
146 if (dlm_modes_compat(cb->mode, lkb->lkb_last_cast.mode)) {
147 cb->flags |= DLM_CB_SKIP;
148
149 log_debug(ls, "skip %x bast %llu mode %d "
150 "for cast %llu mode %d",
151 lkb->lkb_id,
152 (unsigned long long)cb->seq,
153 cb->mode,
154 (unsigned long long)lkb->lkb_last_cast.seq,
155 lkb->lkb_last_cast.mode);
156 rv = 0;
157 goto out;
158 }
159 }
160
161 if (cb->flags & DLM_CB_CAST)
162 memcpy(&lkb->lkb_last_cast, cb, sizeof(struct dlm_callback));
163
164 if (cb->flags & DLM_CB_BAST)
165 memcpy(&lkb->lkb_last_bast, cb, sizeof(struct dlm_callback));
166 rv = 0;
167 out:
168 return rv;
135 return DLM_DEQUEUE_CALLBACK_SUCCESS;
169}
170
171void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
172 uint32_t sbflags)
173{
174 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
136}
137
138void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
139 uint32_t sbflags)
140{
141 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
175 uint64_t new_seq, prev_seq;
176 int rv;
177
142 int rv;
143
178 spin_lock(&dlm_cb_seq_spin);
179 new_seq = ++dlm_cb_seq;
180 if (!dlm_cb_seq)
181 new_seq = ++dlm_cb_seq;
182 spin_unlock(&dlm_cb_seq_spin);
183
184 if (lkb->lkb_flags & DLM_IFL_USER) {
144 if (lkb->lkb_flags & DLM_IFL_USER) {
185 dlm_user_add_ast(lkb, flags, mode, status, sbflags, new_seq);
145 dlm_user_add_ast(lkb, flags, mode, status, sbflags);
186 return;
187 }
188
189 spin_lock(&lkb->lkb_cb_lock);
146 return;
147 }
148
149 spin_lock(&lkb->lkb_cb_lock);
190 prev_seq = lkb->lkb_callbacks[0].seq;
191
192 rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, new_seq);
193 if (rv < 0)
194 goto out;
195
196 if (!prev_seq) {
150 rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
151 switch (rv) {
152 case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
197 kref_get(&lkb->lkb_ref);
198
199 spin_lock(&ls->ls_cb_lock);
200 if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
201 list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
202 } else {
203 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
204 }
205 spin_unlock(&ls->ls_cb_lock);
153 kref_get(&lkb->lkb_ref);
154
155 spin_lock(&ls->ls_cb_lock);
156 if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
157 list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
158 } else {
159 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
160 }
161 spin_unlock(&ls->ls_cb_lock);
162 break;
163 case DLM_ENQUEUE_CALLBACK_FAILURE:
164 WARN_ON(1);
165 break;
166 case DLM_ENQUEUE_CALLBACK_SUCCESS:
167 break;
168 default:
169 WARN_ON(1);
170 break;
206 }
171 }
207 out:
208 spin_unlock(&lkb->lkb_cb_lock);
209}
210
211void dlm_callback_work(struct work_struct *work)
212{
213 struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work);
214 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
215 void (*castfn) (void *astparam);
216 void (*bastfn) (void *astparam, int mode);
172 spin_unlock(&lkb->lkb_cb_lock);
173}
174
175void dlm_callback_work(struct work_struct *work)
176{
177 struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work);
178 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
179 void (*castfn) (void *astparam);
180 void (*bastfn) (void *astparam, int mode);
217 struct dlm_callback callbacks[DLM_CALLBACKS_SIZE];
218 int i, rv, resid;
181 struct dlm_callback *cb;
182 int rv;
219
183
220 memset(&callbacks, 0, sizeof(callbacks));
221
222 spin_lock(&lkb->lkb_cb_lock);
184 spin_lock(&lkb->lkb_cb_lock);
223 if (!lkb->lkb_callbacks[0].seq) {
224 /* no callback work exists, shouldn't happen */
225 log_error(ls, "dlm_callback_work %x no work", lkb->lkb_id);
226 dlm_print_lkb(lkb);
227 dlm_dump_lkb_callbacks(lkb);
228 }
229
230 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
231 rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid);
232 if (rv < 0)
233 break;
234 }
235
236 if (resid) {
237 /* cbs remain, loop should have removed all, shouldn't happen */
238 log_error(ls, "dlm_callback_work %x resid %d", lkb->lkb_id,
239 resid);
240 dlm_print_lkb(lkb);
241 dlm_dump_lkb_callbacks(lkb);
242 }
185 rv = dlm_dequeue_lkb_callback(lkb, &cb);
243 spin_unlock(&lkb->lkb_cb_lock);
244
186 spin_unlock(&lkb->lkb_cb_lock);
187
245 castfn = lkb->lkb_astfn;
246 bastfn = lkb->lkb_bastfn;
188 if (WARN_ON(rv == DLM_DEQUEUE_CALLBACK_EMPTY))
189 return;
247
190
248 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
249 if (!callbacks[i].seq)
250 break;
251 if (callbacks[i].flags & DLM_CB_SKIP) {
252 continue;
253 } else if (callbacks[i].flags & DLM_CB_BAST) {
254 trace_dlm_bast(ls, lkb, callbacks[i].mode);
191 for (;;) {
192 castfn = lkb->lkb_astfn;
193 bastfn = lkb->lkb_bastfn;
194
195 if (cb->flags & DLM_CB_BAST) {
196 trace_dlm_bast(ls, lkb, cb->mode);
255 lkb->lkb_last_bast_time = ktime_get();
197 lkb->lkb_last_bast_time = ktime_get();
256 bastfn(lkb->lkb_astparam, callbacks[i].mode);
257 } else if (callbacks[i].flags & DLM_CB_CAST) {
258 lkb->lkb_lksb->sb_status = callbacks[i].sb_status;
259 lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags;
198 lkb->lkb_last_bast_mode = cb->mode;
199 bastfn(lkb->lkb_astparam, cb->mode);
200 } else if (cb->flags & DLM_CB_CAST) {
201 lkb->lkb_lksb->sb_status = cb->sb_status;
202 lkb->lkb_lksb->sb_flags = cb->sb_flags;
260 trace_dlm_ast(ls, lkb);
261 lkb->lkb_last_cast_time = ktime_get();
262 castfn(lkb->lkb_astparam);
263 }
203 trace_dlm_ast(ls, lkb);
204 lkb->lkb_last_cast_time = ktime_get();
205 castfn(lkb->lkb_astparam);
206 }
207
208 kref_put(&cb->ref, dlm_release_callback);
209
210 spin_lock(&lkb->lkb_cb_lock);
211 rv = dlm_dequeue_lkb_callback(lkb, &cb);
212 if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) {
213 lkb->lkb_flags &= ~DLM_IFL_NEED_SCHED;
214 spin_unlock(&lkb->lkb_cb_lock);
215 break;
216 }
217 spin_unlock(&lkb->lkb_cb_lock);
264 }
265
266 /* undo kref_get from dlm_add_callback, may cause lkb to be freed */
267 dlm_put_lkb(lkb);
268}
269
270int dlm_callback_start(struct dlm_ls *ls)
271{

--- 62 unchanged lines hidden ---
218 }
219
220 /* undo kref_get from dlm_add_callback, may cause lkb to be freed */
221 dlm_put_lkb(lkb);
222}
223
224int dlm_callback_start(struct dlm_ls *ls)
225{

--- 62 unchanged lines hidden ---