1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 ******************************************************************************* 4 ** 5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 6 ** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. 7 ** 8 ** 9 ******************************************************************************* 10 ******************************************************************************/ 11 12 #include <trace/events/dlm.h> 13 14 #include "dlm_internal.h" 15 #include "memory.h" 16 #include "lock.h" 17 #include "user.h" 18 #include "ast.h" 19 20 void dlm_release_callback(struct kref *ref) 21 { 22 struct dlm_callback *cb = container_of(ref, struct dlm_callback, ref); 23 24 dlm_free_cb(cb); 25 } 26 27 void dlm_callback_set_last_ptr(struct dlm_callback **from, 28 struct dlm_callback *to) 29 { 30 if (*from) 31 kref_put(&(*from)->ref, dlm_release_callback); 32 33 if (to) 34 kref_get(&to->ref); 35 36 *from = to; 37 } 38 39 int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, 40 int status, uint32_t sbflags) 41 { 42 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 43 int rv = DLM_ENQUEUE_CALLBACK_SUCCESS; 44 struct dlm_callback *cb; 45 int prev_mode; 46 47 if (flags & DLM_CB_BAST) { 48 /* if cb is a bast, it should be skipped if the blocking mode is 49 * compatible with the last granted mode 50 */ 51 if (lkb->lkb_last_cast) { 52 if (dlm_modes_compat(mode, lkb->lkb_last_cast->mode)) { 53 log_debug(ls, "skip %x bast mode %d for cast mode %d", 54 lkb->lkb_id, mode, 55 lkb->lkb_last_cast->mode); 56 goto out; 57 } 58 } 59 60 /* 61 * Suppress some redundant basts here, do more on removal. 62 * Don't even add a bast if the callback just before it 63 * is a bast for the same mode or a more restrictive mode. 64 * (the addional > PR check is needed for PR/CW inversion) 65 */ 66 if (lkb->lkb_last_cb && lkb->lkb_last_cb->flags & DLM_CB_BAST) { 67 prev_mode = lkb->lkb_last_cb->mode; 68 69 if ((prev_mode == mode) || 70 (prev_mode > mode && prev_mode > DLM_LOCK_PR)) { 71 log_debug(ls, "skip %x add bast mode %d for bast mode %d", 72 lkb->lkb_id, mode, prev_mode); 73 goto out; 74 } 75 } 76 } 77 78 cb = dlm_allocate_cb(); 79 if (!cb) { 80 rv = DLM_ENQUEUE_CALLBACK_FAILURE; 81 goto out; 82 } 83 84 cb->flags = flags; 85 cb->mode = mode; 86 cb->sb_status = status; 87 cb->sb_flags = (sbflags & 0x000000FF); 88 kref_init(&cb->ref); 89 if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags)) 90 rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED; 91 92 list_add_tail(&cb->list, &lkb->lkb_callbacks); 93 94 if (flags & DLM_CB_CAST) 95 dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb); 96 97 dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb); 98 99 out: 100 return rv; 101 } 102 103 int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb) 104 { 105 /* oldest undelivered cb is callbacks first entry */ 106 *cb = list_first_entry_or_null(&lkb->lkb_callbacks, 107 struct dlm_callback, list); 108 if (!*cb) 109 return DLM_DEQUEUE_CALLBACK_EMPTY; 110 111 /* remove it from callbacks so shift others down */ 112 list_del(&(*cb)->list); 113 if (list_empty(&lkb->lkb_callbacks)) 114 return DLM_DEQUEUE_CALLBACK_LAST; 115 116 return DLM_DEQUEUE_CALLBACK_SUCCESS; 117 } 118 119 void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, 120 uint32_t sbflags) 121 { 122 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 123 int rv; 124 125 if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { 126 dlm_user_add_ast(lkb, flags, mode, status, sbflags); 127 return; 128 } 129 130 spin_lock(&lkb->lkb_cb_lock); 131 rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags); 132 switch (rv) { 133 case DLM_ENQUEUE_CALLBACK_NEED_SCHED: 134 kref_get(&lkb->lkb_ref); 135 136 spin_lock(&ls->ls_cb_lock); 137 if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { 138 list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay); 139 } else { 140 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); 141 } 142 spin_unlock(&ls->ls_cb_lock); 143 break; 144 case DLM_ENQUEUE_CALLBACK_FAILURE: 145 WARN_ON_ONCE(1); 146 break; 147 case DLM_ENQUEUE_CALLBACK_SUCCESS: 148 break; 149 default: 150 WARN_ON_ONCE(1); 151 break; 152 } 153 spin_unlock(&lkb->lkb_cb_lock); 154 } 155 156 void dlm_callback_work(struct work_struct *work) 157 { 158 struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work); 159 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 160 void (*castfn) (void *astparam); 161 void (*bastfn) (void *astparam, int mode); 162 struct dlm_callback *cb; 163 int rv; 164 165 spin_lock(&lkb->lkb_cb_lock); 166 rv = dlm_dequeue_lkb_callback(lkb, &cb); 167 if (WARN_ON_ONCE(rv == DLM_DEQUEUE_CALLBACK_EMPTY)) { 168 clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags); 169 spin_unlock(&lkb->lkb_cb_lock); 170 goto out; 171 } 172 spin_unlock(&lkb->lkb_cb_lock); 173 174 for (;;) { 175 castfn = lkb->lkb_astfn; 176 bastfn = lkb->lkb_bastfn; 177 178 if (cb->flags & DLM_CB_BAST) { 179 trace_dlm_bast(ls, lkb, cb->mode); 180 lkb->lkb_last_bast_time = ktime_get(); 181 lkb->lkb_last_bast_mode = cb->mode; 182 bastfn(lkb->lkb_astparam, cb->mode); 183 } else if (cb->flags & DLM_CB_CAST) { 184 lkb->lkb_lksb->sb_status = cb->sb_status; 185 lkb->lkb_lksb->sb_flags = cb->sb_flags; 186 trace_dlm_ast(ls, lkb); 187 lkb->lkb_last_cast_time = ktime_get(); 188 castfn(lkb->lkb_astparam); 189 } 190 191 kref_put(&cb->ref, dlm_release_callback); 192 193 spin_lock(&lkb->lkb_cb_lock); 194 rv = dlm_dequeue_lkb_callback(lkb, &cb); 195 if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) { 196 clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags); 197 spin_unlock(&lkb->lkb_cb_lock); 198 break; 199 } 200 spin_unlock(&lkb->lkb_cb_lock); 201 } 202 203 out: 204 /* undo kref_get from dlm_add_callback, may cause lkb to be freed */ 205 dlm_put_lkb(lkb); 206 } 207 208 int dlm_callback_start(struct dlm_ls *ls) 209 { 210 ls->ls_callback_wq = alloc_workqueue("dlm_callback", 211 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); 212 if (!ls->ls_callback_wq) { 213 log_print("can't start dlm_callback workqueue"); 214 return -ENOMEM; 215 } 216 return 0; 217 } 218 219 void dlm_callback_stop(struct dlm_ls *ls) 220 { 221 if (ls->ls_callback_wq) 222 destroy_workqueue(ls->ls_callback_wq); 223 } 224 225 void dlm_callback_suspend(struct dlm_ls *ls) 226 { 227 if (ls->ls_callback_wq) { 228 spin_lock(&ls->ls_cb_lock); 229 set_bit(LSFL_CB_DELAY, &ls->ls_flags); 230 spin_unlock(&ls->ls_cb_lock); 231 232 flush_workqueue(ls->ls_callback_wq); 233 } 234 } 235 236 #define MAX_CB_QUEUE 25 237 238 void dlm_callback_resume(struct dlm_ls *ls) 239 { 240 struct dlm_lkb *lkb, *safe; 241 int count = 0, sum = 0; 242 bool empty; 243 244 if (!ls->ls_callback_wq) 245 return; 246 247 more: 248 spin_lock(&ls->ls_cb_lock); 249 list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) { 250 list_del_init(&lkb->lkb_cb_list); 251 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); 252 count++; 253 if (count == MAX_CB_QUEUE) 254 break; 255 } 256 empty = list_empty(&ls->ls_cb_delay); 257 if (empty) 258 clear_bit(LSFL_CB_DELAY, &ls->ls_flags); 259 spin_unlock(&ls->ls_cb_lock); 260 261 sum += count; 262 if (!empty) { 263 count = 0; 264 cond_resched(); 265 goto more; 266 } 267 268 if (sum) 269 log_rinfo(ls, "%s %d", __func__, sum); 270 } 271 272