1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 ******************************************************************************* 4 ** 5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 6 ** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. 7 ** 8 ** 9 ******************************************************************************* 10 ******************************************************************************/ 11 12 #include <trace/events/dlm.h> 13 14 #include "dlm_internal.h" 15 #include "lvb_table.h" 16 #include "memory.h" 17 #include "lock.h" 18 #include "user.h" 19 #include "ast.h" 20 21 void dlm_release_callback(struct kref *ref) 22 { 23 struct dlm_callback *cb = container_of(ref, struct dlm_callback, ref); 24 25 dlm_free_cb(cb); 26 } 27 28 void dlm_callback_set_last_ptr(struct dlm_callback **from, 29 struct dlm_callback *to) 30 { 31 if (*from) 32 kref_put(&(*from)->ref, dlm_release_callback); 33 34 if (to) 35 kref_get(&to->ref); 36 37 *from = to; 38 } 39 40 int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, 41 int status, uint32_t sbflags) 42 { 43 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 44 int rv = DLM_ENQUEUE_CALLBACK_SUCCESS; 45 struct dlm_callback *cb; 46 int copy_lvb = 0; 47 int prev_mode; 48 49 if (flags & DLM_CB_BAST) { 50 /* if cb is a bast, it should be skipped if the blocking mode is 51 * compatible with the last granted mode 52 */ 53 if (lkb->lkb_last_cast) { 54 if (dlm_modes_compat(mode, lkb->lkb_last_cast->mode)) { 55 log_debug(ls, "skip %x bast mode %d for cast mode %d", 56 lkb->lkb_id, mode, 57 lkb->lkb_last_cast->mode); 58 goto out; 59 } 60 } 61 62 /* 63 * Suppress some redundant basts here, do more on removal. 64 * Don't even add a bast if the callback just before it 65 * is a bast for the same mode or a more restrictive mode. 66 * (the addional > PR check is needed for PR/CW inversion) 67 */ 68 if (lkb->lkb_last_cb && lkb->lkb_last_cb->flags & DLM_CB_BAST) { 69 prev_mode = lkb->lkb_last_cb->mode; 70 71 if ((prev_mode == mode) || 72 (prev_mode > mode && prev_mode > DLM_LOCK_PR)) { 73 log_debug(ls, "skip %x add bast mode %d for bast mode %d", 74 lkb->lkb_id, mode, prev_mode); 75 goto out; 76 } 77 } 78 } else if (flags & DLM_CB_CAST) { 79 if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { 80 if (lkb->lkb_last_cast) 81 prev_mode = lkb->lkb_last_cb->mode; 82 else 83 prev_mode = -1; 84 85 if (!status && lkb->lkb_lksb->sb_lvbptr && 86 dlm_lvb_operations[prev_mode + 1][mode + 1]) 87 copy_lvb = 1; 88 } 89 } 90 91 cb = dlm_allocate_cb(); 92 if (!cb) { 93 rv = DLM_ENQUEUE_CALLBACK_FAILURE; 94 goto out; 95 } 96 97 cb->flags = flags; 98 cb->mode = mode; 99 cb->sb_status = status; 100 cb->sb_flags = (sbflags & 0x000000FF); 101 cb->copy_lvb = copy_lvb; 102 kref_init(&cb->ref); 103 if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags)) 104 rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED; 105 106 list_add_tail(&cb->list, &lkb->lkb_callbacks); 107 108 if (flags & DLM_CB_CAST) 109 dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb); 110 111 dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb); 112 113 out: 114 return rv; 115 } 116 117 int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb) 118 { 119 /* oldest undelivered cb is callbacks first entry */ 120 *cb = list_first_entry_or_null(&lkb->lkb_callbacks, 121 struct dlm_callback, list); 122 if (!*cb) 123 return DLM_DEQUEUE_CALLBACK_EMPTY; 124 125 /* remove it from callbacks so shift others down */ 126 list_del(&(*cb)->list); 127 if (list_empty(&lkb->lkb_callbacks)) 128 return DLM_DEQUEUE_CALLBACK_LAST; 129 130 return DLM_DEQUEUE_CALLBACK_SUCCESS; 131 } 132 133 void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, 134 uint32_t sbflags) 135 { 136 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 137 int rv; 138 139 if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { 140 dlm_user_add_ast(lkb, flags, mode, status, sbflags); 141 return; 142 } 143 144 spin_lock(&lkb->lkb_cb_lock); 145 rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags); 146 switch (rv) { 147 case DLM_ENQUEUE_CALLBACK_NEED_SCHED: 148 kref_get(&lkb->lkb_ref); 149 150 spin_lock(&ls->ls_cb_lock); 151 if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { 152 list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay); 153 } else { 154 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); 155 } 156 spin_unlock(&ls->ls_cb_lock); 157 break; 158 case DLM_ENQUEUE_CALLBACK_FAILURE: 159 WARN_ON_ONCE(1); 160 break; 161 case DLM_ENQUEUE_CALLBACK_SUCCESS: 162 break; 163 default: 164 WARN_ON_ONCE(1); 165 break; 166 } 167 spin_unlock(&lkb->lkb_cb_lock); 168 } 169 170 void dlm_callback_work(struct work_struct *work) 171 { 172 struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work); 173 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 174 void (*castfn) (void *astparam); 175 void (*bastfn) (void *astparam, int mode); 176 struct dlm_callback *cb; 177 int rv; 178 179 spin_lock(&lkb->lkb_cb_lock); 180 rv = dlm_dequeue_lkb_callback(lkb, &cb); 181 if (WARN_ON_ONCE(rv == DLM_DEQUEUE_CALLBACK_EMPTY)) { 182 clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags); 183 spin_unlock(&lkb->lkb_cb_lock); 184 goto out; 185 } 186 spin_unlock(&lkb->lkb_cb_lock); 187 188 for (;;) { 189 castfn = lkb->lkb_astfn; 190 bastfn = lkb->lkb_bastfn; 191 192 if (cb->flags & DLM_CB_BAST) { 193 trace_dlm_bast(ls, lkb, cb->mode); 194 lkb->lkb_last_bast_time = ktime_get(); 195 lkb->lkb_last_bast_mode = cb->mode; 196 bastfn(lkb->lkb_astparam, cb->mode); 197 } else if (cb->flags & DLM_CB_CAST) { 198 lkb->lkb_lksb->sb_status = cb->sb_status; 199 lkb->lkb_lksb->sb_flags = cb->sb_flags; 200 trace_dlm_ast(ls, lkb); 201 lkb->lkb_last_cast_time = ktime_get(); 202 castfn(lkb->lkb_astparam); 203 } 204 205 kref_put(&cb->ref, dlm_release_callback); 206 207 spin_lock(&lkb->lkb_cb_lock); 208 rv = dlm_dequeue_lkb_callback(lkb, &cb); 209 if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) { 210 clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags); 211 spin_unlock(&lkb->lkb_cb_lock); 212 break; 213 } 214 spin_unlock(&lkb->lkb_cb_lock); 215 } 216 217 out: 218 /* undo kref_get from dlm_add_callback, may cause lkb to be freed */ 219 dlm_put_lkb(lkb); 220 } 221 222 int dlm_callback_start(struct dlm_ls *ls) 223 { 224 ls->ls_callback_wq = alloc_workqueue("dlm_callback", 225 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); 226 if (!ls->ls_callback_wq) { 227 log_print("can't start dlm_callback workqueue"); 228 return -ENOMEM; 229 } 230 return 0; 231 } 232 233 void dlm_callback_stop(struct dlm_ls *ls) 234 { 235 if (ls->ls_callback_wq) 236 destroy_workqueue(ls->ls_callback_wq); 237 } 238 239 void dlm_callback_suspend(struct dlm_ls *ls) 240 { 241 if (ls->ls_callback_wq) { 242 spin_lock(&ls->ls_cb_lock); 243 set_bit(LSFL_CB_DELAY, &ls->ls_flags); 244 spin_unlock(&ls->ls_cb_lock); 245 246 flush_workqueue(ls->ls_callback_wq); 247 } 248 } 249 250 #define MAX_CB_QUEUE 25 251 252 void dlm_callback_resume(struct dlm_ls *ls) 253 { 254 struct dlm_lkb *lkb, *safe; 255 int count = 0, sum = 0; 256 bool empty; 257 258 if (!ls->ls_callback_wq) 259 return; 260 261 more: 262 spin_lock(&ls->ls_cb_lock); 263 list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) { 264 list_del_init(&lkb->lkb_cb_list); 265 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); 266 count++; 267 if (count == MAX_CB_QUEUE) 268 break; 269 } 270 empty = list_empty(&ls->ls_cb_delay); 271 if (empty) 272 clear_bit(LSFL_CB_DELAY, &ls->ls_flags); 273 spin_unlock(&ls->ls_cb_lock); 274 275 sum += count; 276 if (!empty) { 277 count = 0; 278 cond_resched(); 279 goto more; 280 } 281 282 if (sum) 283 log_rinfo(ls, "%s %d", __func__, sum); 284 } 285 286