1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 ******************************************************************************* 4 ** 5 ** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved. 6 ** 7 ** 8 ******************************************************************************* 9 ******************************************************************************/ 10 11 #include "dlm_internal.h" 12 #include "member.h" 13 #include "lock.h" 14 #include "dir.h" 15 #include "config.h" 16 #include "requestqueue.h" 17 18 struct rq_entry { 19 struct list_head list; 20 uint32_t recover_seq; 21 int nodeid; 22 struct dlm_message request; 23 }; 24 25 /* 26 * Requests received while the lockspace is in recovery get added to the 27 * request queue and processed when recovery is complete. This happens when 28 * the lockspace is suspended on some nodes before it is on others, or the 29 * lockspace is enabled on some while still suspended on others. 30 */ 31 32 void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms) 33 { 34 struct rq_entry *e; 35 int length = ms->m_header.h_length - sizeof(struct dlm_message); 36 37 e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS); 38 if (!e) { 39 log_print("dlm_add_requestqueue: out of memory len %d", length); 40 return; 41 } 42 43 e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF; 44 e->nodeid = nodeid; 45 memcpy(&e->request, ms, ms->m_header.h_length); 46 47 atomic_inc(&ls->ls_requestqueue_cnt); 48 mutex_lock(&ls->ls_requestqueue_mutex); 49 list_add_tail(&e->list, &ls->ls_requestqueue); 50 mutex_unlock(&ls->ls_requestqueue_mutex); 51 } 52 53 /* 54 * Called by dlm_recoverd to process normal messages saved while recovery was 55 * happening. Normal locking has been enabled before this is called. dlm_recv 56 * upon receiving a message, will wait for all saved messages to be drained 57 * here before processing the message it got. If a new dlm_ls_stop() arrives 58 * while we're processing these saved messages, it may block trying to suspend 59 * dlm_recv if dlm_recv is waiting for us in dlm_wait_requestqueue. In that 60 * case, we don't abort since locking_stopped is still 0. If dlm_recv is not 61 * waiting for us, then this processing may be aborted due to locking_stopped. 62 */ 63 64 int dlm_process_requestqueue(struct dlm_ls *ls) 65 { 66 struct rq_entry *e; 67 struct dlm_message *ms; 68 int error = 0; 69 70 mutex_lock(&ls->ls_requestqueue_mutex); 71 72 for (;;) { 73 if (list_empty(&ls->ls_requestqueue)) { 74 mutex_unlock(&ls->ls_requestqueue_mutex); 75 error = 0; 76 break; 77 } 78 e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); 79 mutex_unlock(&ls->ls_requestqueue_mutex); 80 81 ms = &e->request; 82 83 log_limit(ls, "dlm_process_requestqueue msg %d from %d " 84 "lkid %x remid %x result %d seq %u", 85 ms->m_type, ms->m_header.h_nodeid, 86 ms->m_lkid, ms->m_remid, ms->m_result, 87 e->recover_seq); 88 89 dlm_receive_message_saved(ls, &e->request, e->recover_seq); 90 91 mutex_lock(&ls->ls_requestqueue_mutex); 92 list_del(&e->list); 93 if (atomic_dec_and_test(&ls->ls_requestqueue_cnt)) 94 wake_up(&ls->ls_requestqueue_wait); 95 kfree(e); 96 97 if (dlm_locking_stopped(ls)) { 98 log_debug(ls, "process_requestqueue abort running"); 99 mutex_unlock(&ls->ls_requestqueue_mutex); 100 error = -EINTR; 101 break; 102 } 103 schedule(); 104 } 105 106 return error; 107 } 108 109 /* 110 * After recovery is done, locking is resumed and dlm_recoverd takes all the 111 * saved requests and processes them as they would have been by dlm_recv. At 112 * the same time, dlm_recv will start receiving new requests from remote nodes. 113 * We want to delay dlm_recv processing new requests until dlm_recoverd has 114 * finished processing the old saved requests. We don't check for locking 115 * stopped here because dlm_ls_stop won't stop locking until it's suspended us 116 * (dlm_recv). 117 */ 118 119 void dlm_wait_requestqueue(struct dlm_ls *ls) 120 { 121 wait_event(ls->ls_requestqueue_wait, 122 atomic_read(&ls->ls_requestqueue_cnt) == 0); 123 } 124 125 static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) 126 { 127 uint32_t type = ms->m_type; 128 129 /* the ls is being cleaned up and freed by release_lockspace */ 130 if (!atomic_read(&ls->ls_count)) 131 return 1; 132 133 if (dlm_is_removed(ls, nodeid)) 134 return 1; 135 136 /* directory operations are always purged because the directory is 137 always rebuilt during recovery and the lookups resent */ 138 139 if (type == DLM_MSG_REMOVE || 140 type == DLM_MSG_LOOKUP || 141 type == DLM_MSG_LOOKUP_REPLY) 142 return 1; 143 144 if (!dlm_no_directory(ls)) 145 return 0; 146 147 return 1; 148 } 149 150 void dlm_purge_requestqueue(struct dlm_ls *ls) 151 { 152 struct dlm_message *ms; 153 struct rq_entry *e, *safe; 154 155 mutex_lock(&ls->ls_requestqueue_mutex); 156 list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { 157 ms = &e->request; 158 159 if (purge_request(ls, ms, e->nodeid)) { 160 list_del(&e->list); 161 if (atomic_dec_and_test(&ls->ls_requestqueue_cnt)) 162 wake_up(&ls->ls_requestqueue_wait); 163 kfree(e); 164 } 165 } 166 mutex_unlock(&ls->ls_requestqueue_mutex); 167 } 168 169