1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 ******************************************************************************* 4 ** 5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 6 ** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. 7 ** 8 ** 9 ******************************************************************************* 10 ******************************************************************************/ 11 12 #include <trace/events/dlm.h> 13 14 #include "dlm_internal.h" 15 #include "lock.h" 16 #include "user.h" 17 #include "ast.h" 18 19 static uint64_t dlm_cb_seq; 20 static DEFINE_SPINLOCK(dlm_cb_seq_spin); 21 22 static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb) 23 { 24 int i; 25 26 log_print("last_bast %x %llu flags %x mode %d sb %d %x", 27 lkb->lkb_id, 28 (unsigned long long)lkb->lkb_last_bast.seq, 29 lkb->lkb_last_bast.flags, 30 lkb->lkb_last_bast.mode, 31 lkb->lkb_last_bast.sb_status, 32 lkb->lkb_last_bast.sb_flags); 33 34 log_print("last_cast %x %llu flags %x mode %d sb %d %x", 35 lkb->lkb_id, 36 (unsigned long long)lkb->lkb_last_cast.seq, 37 lkb->lkb_last_cast.flags, 38 lkb->lkb_last_cast.mode, 39 lkb->lkb_last_cast.sb_status, 40 lkb->lkb_last_cast.sb_flags); 41 42 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { 43 log_print("cb %x %llu flags %x mode %d sb %d %x", 44 lkb->lkb_id, 45 (unsigned long long)lkb->lkb_callbacks[i].seq, 46 lkb->lkb_callbacks[i].flags, 47 lkb->lkb_callbacks[i].mode, 48 lkb->lkb_callbacks[i].sb_status, 49 lkb->lkb_callbacks[i].sb_flags); 50 } 51 } 52 53 int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, 54 int status, uint32_t sbflags, uint64_t seq) 55 { 56 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 57 uint64_t prev_seq; 58 int prev_mode; 59 int i, rv; 60 61 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { 62 if (lkb->lkb_callbacks[i].seq) 63 continue; 64 65 /* 66 * Suppress some redundant basts here, do more on removal. 67 * Don't even add a bast if the callback just before it 68 * is a bast for the same mode or a more restrictive mode. 69 * (the addional > PR check is needed for PR/CW inversion) 70 */ 71 72 if ((i > 0) && (flags & DLM_CB_BAST) && 73 (lkb->lkb_callbacks[i-1].flags & DLM_CB_BAST)) { 74 75 prev_seq = lkb->lkb_callbacks[i-1].seq; 76 prev_mode = lkb->lkb_callbacks[i-1].mode; 77 78 if ((prev_mode == mode) || 79 (prev_mode > mode && prev_mode > DLM_LOCK_PR)) { 80 81 log_debug(ls, "skip %x add bast %llu mode %d " 82 "for bast %llu mode %d", 83 lkb->lkb_id, 84 (unsigned long long)seq, 85 mode, 86 (unsigned long long)prev_seq, 87 prev_mode); 88 rv = 0; 89 goto out; 90 } 91 } 92 93 lkb->lkb_callbacks[i].seq = seq; 94 lkb->lkb_callbacks[i].flags = flags; 95 lkb->lkb_callbacks[i].mode = mode; 96 lkb->lkb_callbacks[i].sb_status = status; 97 lkb->lkb_callbacks[i].sb_flags = (sbflags & 0x000000FF); 98 rv = 0; 99 break; 100 } 101 102 if (i == DLM_CALLBACKS_SIZE) { 103 log_error(ls, "no callbacks %x %llu flags %x mode %d sb %d %x", 104 lkb->lkb_id, (unsigned long long)seq, 105 flags, mode, status, sbflags); 106 dlm_dump_lkb_callbacks(lkb); 107 rv = -1; 108 goto out; 109 } 110 out: 111 return rv; 112 } 113 114 int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb, 115 struct dlm_callback *cb, int *resid) 116 { 117 int i, rv; 118 119 *resid = 0; 120 121 if (!lkb->lkb_callbacks[0].seq) { 122 rv = -ENOENT; 123 goto out; 124 } 125 126 /* oldest undelivered cb is callbacks[0] */ 127 128 memcpy(cb, &lkb->lkb_callbacks[0], sizeof(struct dlm_callback)); 129 memset(&lkb->lkb_callbacks[0], 0, sizeof(struct dlm_callback)); 130 131 /* shift others down */ 132 133 for (i = 1; i < DLM_CALLBACKS_SIZE; i++) { 134 if (!lkb->lkb_callbacks[i].seq) 135 break; 136 memcpy(&lkb->lkb_callbacks[i-1], &lkb->lkb_callbacks[i], 137 sizeof(struct dlm_callback)); 138 memset(&lkb->lkb_callbacks[i], 0, sizeof(struct dlm_callback)); 139 (*resid)++; 140 } 141 142 /* if cb is a bast, it should be skipped if the blocking mode is 143 compatible with the last granted mode */ 144 145 if ((cb->flags & DLM_CB_BAST) && lkb->lkb_last_cast.seq) { 146 if (dlm_modes_compat(cb->mode, lkb->lkb_last_cast.mode)) { 147 cb->flags |= DLM_CB_SKIP; 148 149 log_debug(ls, "skip %x bast %llu mode %d " 150 "for cast %llu mode %d", 151 lkb->lkb_id, 152 (unsigned long long)cb->seq, 153 cb->mode, 154 (unsigned long long)lkb->lkb_last_cast.seq, 155 lkb->lkb_last_cast.mode); 156 rv = 0; 157 goto out; 158 } 159 } 160 161 if (cb->flags & DLM_CB_CAST) { 162 memcpy(&lkb->lkb_last_cast, cb, sizeof(struct dlm_callback)); 163 lkb->lkb_last_cast_time = ktime_get(); 164 } 165 166 if (cb->flags & DLM_CB_BAST) { 167 memcpy(&lkb->lkb_last_bast, cb, sizeof(struct dlm_callback)); 168 lkb->lkb_last_bast_time = ktime_get(); 169 } 170 rv = 0; 171 out: 172 return rv; 173 } 174 175 void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, 176 uint32_t sbflags) 177 { 178 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 179 uint64_t new_seq, prev_seq; 180 int rv; 181 182 spin_lock(&dlm_cb_seq_spin); 183 new_seq = ++dlm_cb_seq; 184 if (!dlm_cb_seq) 185 new_seq = ++dlm_cb_seq; 186 spin_unlock(&dlm_cb_seq_spin); 187 188 if (lkb->lkb_flags & DLM_IFL_USER) { 189 dlm_user_add_ast(lkb, flags, mode, status, sbflags, new_seq); 190 return; 191 } 192 193 mutex_lock(&lkb->lkb_cb_mutex); 194 prev_seq = lkb->lkb_callbacks[0].seq; 195 196 rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, new_seq); 197 if (rv < 0) 198 goto out; 199 200 if (!prev_seq) { 201 kref_get(&lkb->lkb_ref); 202 203 if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { 204 mutex_lock(&ls->ls_cb_mutex); 205 list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay); 206 mutex_unlock(&ls->ls_cb_mutex); 207 } else { 208 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); 209 } 210 } 211 out: 212 mutex_unlock(&lkb->lkb_cb_mutex); 213 } 214 215 void dlm_callback_work(struct work_struct *work) 216 { 217 struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work); 218 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 219 void (*castfn) (void *astparam); 220 void (*bastfn) (void *astparam, int mode); 221 struct dlm_callback callbacks[DLM_CALLBACKS_SIZE]; 222 int i, rv, resid; 223 224 memset(&callbacks, 0, sizeof(callbacks)); 225 226 mutex_lock(&lkb->lkb_cb_mutex); 227 if (!lkb->lkb_callbacks[0].seq) { 228 /* no callback work exists, shouldn't happen */ 229 log_error(ls, "dlm_callback_work %x no work", lkb->lkb_id); 230 dlm_print_lkb(lkb); 231 dlm_dump_lkb_callbacks(lkb); 232 } 233 234 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { 235 rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid); 236 if (rv < 0) 237 break; 238 } 239 240 if (resid) { 241 /* cbs remain, loop should have removed all, shouldn't happen */ 242 log_error(ls, "dlm_callback_work %x resid %d", lkb->lkb_id, 243 resid); 244 dlm_print_lkb(lkb); 245 dlm_dump_lkb_callbacks(lkb); 246 } 247 mutex_unlock(&lkb->lkb_cb_mutex); 248 249 castfn = lkb->lkb_astfn; 250 bastfn = lkb->lkb_bastfn; 251 252 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { 253 if (!callbacks[i].seq) 254 break; 255 if (callbacks[i].flags & DLM_CB_SKIP) { 256 continue; 257 } else if (callbacks[i].flags & DLM_CB_BAST) { 258 bastfn(lkb->lkb_astparam, callbacks[i].mode); 259 trace_dlm_bast(ls, lkb, callbacks[i].mode); 260 } else if (callbacks[i].flags & DLM_CB_CAST) { 261 lkb->lkb_lksb->sb_status = callbacks[i].sb_status; 262 lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags; 263 castfn(lkb->lkb_astparam); 264 trace_dlm_ast(ls, lkb, lkb->lkb_lksb); 265 } 266 } 267 268 /* undo kref_get from dlm_add_callback, may cause lkb to be freed */ 269 dlm_put_lkb(lkb); 270 } 271 272 int dlm_callback_start(struct dlm_ls *ls) 273 { 274 ls->ls_callback_wq = alloc_workqueue("dlm_callback", 275 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); 276 if (!ls->ls_callback_wq) { 277 log_print("can't start dlm_callback workqueue"); 278 return -ENOMEM; 279 } 280 return 0; 281 } 282 283 void dlm_callback_stop(struct dlm_ls *ls) 284 { 285 if (ls->ls_callback_wq) 286 destroy_workqueue(ls->ls_callback_wq); 287 } 288 289 void dlm_callback_suspend(struct dlm_ls *ls) 290 { 291 set_bit(LSFL_CB_DELAY, &ls->ls_flags); 292 293 if (ls->ls_callback_wq) 294 flush_workqueue(ls->ls_callback_wq); 295 } 296 297 #define MAX_CB_QUEUE 25 298 299 void dlm_callback_resume(struct dlm_ls *ls) 300 { 301 struct dlm_lkb *lkb, *safe; 302 int count = 0, sum = 0; 303 bool empty; 304 305 clear_bit(LSFL_CB_DELAY, &ls->ls_flags); 306 307 if (!ls->ls_callback_wq) 308 return; 309 310 more: 311 mutex_lock(&ls->ls_cb_mutex); 312 list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) { 313 list_del_init(&lkb->lkb_cb_list); 314 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); 315 count++; 316 if (count == MAX_CB_QUEUE) 317 break; 318 } 319 empty = list_empty(&ls->ls_cb_delay); 320 mutex_unlock(&ls->ls_cb_mutex); 321 322 sum += count; 323 if (!empty) { 324 count = 0; 325 cond_resched(); 326 goto more; 327 } 328 329 if (sum) 330 log_rinfo(ls, "%s %d", __func__, sum); 331 } 332 333