1e7fd4179SDavid Teigland /****************************************************************************** 2e7fd4179SDavid Teigland ******************************************************************************* 3e7fd4179SDavid Teigland ** 4e7fd4179SDavid Teigland ** Copyright (C) 2005 Red Hat, Inc. All rights reserved. 5e7fd4179SDavid Teigland ** 6e7fd4179SDavid Teigland ** This copyrighted material is made available to anyone wishing to use, 7e7fd4179SDavid Teigland ** modify, copy, or redistribute it subject to the terms and conditions 8e7fd4179SDavid Teigland ** of the GNU General Public License v.2. 9e7fd4179SDavid Teigland ** 10e7fd4179SDavid Teigland ******************************************************************************* 11e7fd4179SDavid Teigland ******************************************************************************/ 12e7fd4179SDavid Teigland 13e7fd4179SDavid Teigland #include "dlm_internal.h" 14e7fd4179SDavid Teigland #include "member.h" 15e7fd4179SDavid Teigland #include "lock.h" 16e7fd4179SDavid Teigland #include "dir.h" 17e7fd4179SDavid Teigland #include "config.h" 18e7fd4179SDavid Teigland #include "requestqueue.h" 19e7fd4179SDavid Teigland 20e7fd4179SDavid Teigland struct rq_entry { 21e7fd4179SDavid Teigland struct list_head list; 22e7fd4179SDavid Teigland int nodeid; 23e7fd4179SDavid Teigland char request[1]; 24e7fd4179SDavid Teigland }; 25e7fd4179SDavid Teigland 26e7fd4179SDavid Teigland /* 27e7fd4179SDavid Teigland * Requests received while the lockspace is in recovery get added to the 28e7fd4179SDavid Teigland * request queue and processed when recovery is complete. This happens when 29e7fd4179SDavid Teigland * the lockspace is suspended on some nodes before it is on others, or the 30e7fd4179SDavid Teigland * lockspace is enabled on some while still suspended on others. 31e7fd4179SDavid Teigland */ 32e7fd4179SDavid Teigland 33e7fd4179SDavid Teigland void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd) 34e7fd4179SDavid Teigland { 35e7fd4179SDavid Teigland struct rq_entry *e; 36e7fd4179SDavid Teigland int length = hd->h_length; 37e7fd4179SDavid Teigland 38e7fd4179SDavid Teigland if (dlm_is_removed(ls, nodeid)) 39e7fd4179SDavid Teigland return; 40e7fd4179SDavid Teigland 41e7fd4179SDavid Teigland e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); 42e7fd4179SDavid Teigland if (!e) { 43e7fd4179SDavid Teigland log_print("dlm_add_requestqueue: out of memory\n"); 44e7fd4179SDavid Teigland return; 45e7fd4179SDavid Teigland } 46e7fd4179SDavid Teigland 47e7fd4179SDavid Teigland e->nodeid = nodeid; 48e7fd4179SDavid Teigland memcpy(e->request, hd, length); 49e7fd4179SDavid Teigland 50*90135925SDavid Teigland mutex_lock(&ls->ls_requestqueue_mutex); 51e7fd4179SDavid Teigland list_add_tail(&e->list, &ls->ls_requestqueue); 52*90135925SDavid Teigland mutex_unlock(&ls->ls_requestqueue_mutex); 53e7fd4179SDavid Teigland } 54e7fd4179SDavid Teigland 55e7fd4179SDavid Teigland int dlm_process_requestqueue(struct dlm_ls *ls) 56e7fd4179SDavid Teigland { 57e7fd4179SDavid Teigland struct rq_entry *e; 58e7fd4179SDavid Teigland struct dlm_header *hd; 59e7fd4179SDavid Teigland int error = 0; 60e7fd4179SDavid Teigland 61*90135925SDavid Teigland mutex_lock(&ls->ls_requestqueue_mutex); 62e7fd4179SDavid Teigland 63e7fd4179SDavid Teigland for (;;) { 64e7fd4179SDavid Teigland if (list_empty(&ls->ls_requestqueue)) { 65*90135925SDavid Teigland mutex_unlock(&ls->ls_requestqueue_mutex); 66e7fd4179SDavid Teigland error = 0; 67e7fd4179SDavid Teigland break; 68e7fd4179SDavid Teigland } 69e7fd4179SDavid Teigland e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); 70*90135925SDavid Teigland mutex_unlock(&ls->ls_requestqueue_mutex); 71e7fd4179SDavid Teigland 72e7fd4179SDavid Teigland hd = (struct dlm_header *) e->request; 73*90135925SDavid Teigland error = dlm_receive_message(hd, e->nodeid, 1); 74e7fd4179SDavid Teigland 75e7fd4179SDavid Teigland if (error == -EINTR) { 76e7fd4179SDavid Teigland /* entry is left on requestqueue */ 77e7fd4179SDavid Teigland log_debug(ls, "process_requestqueue abort eintr"); 78e7fd4179SDavid Teigland break; 79e7fd4179SDavid Teigland } 80e7fd4179SDavid Teigland 81*90135925SDavid Teigland mutex_lock(&ls->ls_requestqueue_mutex); 82e7fd4179SDavid Teigland list_del(&e->list); 83e7fd4179SDavid Teigland kfree(e); 84e7fd4179SDavid Teigland 85e7fd4179SDavid Teigland if (dlm_locking_stopped(ls)) { 86e7fd4179SDavid Teigland log_debug(ls, "process_requestqueue abort running"); 87*90135925SDavid Teigland mutex_unlock(&ls->ls_requestqueue_mutex); 88e7fd4179SDavid Teigland error = -EINTR; 89e7fd4179SDavid Teigland break; 90e7fd4179SDavid Teigland } 91e7fd4179SDavid Teigland schedule(); 92e7fd4179SDavid Teigland } 93e7fd4179SDavid Teigland 94e7fd4179SDavid Teigland return error; 95e7fd4179SDavid Teigland } 96e7fd4179SDavid Teigland 97e7fd4179SDavid Teigland /* 98e7fd4179SDavid Teigland * After recovery is done, locking is resumed and dlm_recoverd takes all the 99e7fd4179SDavid Teigland * saved requests and processes them as they would have been by dlm_recvd. At 100e7fd4179SDavid Teigland * the same time, dlm_recvd will start receiving new requests from remote 101e7fd4179SDavid Teigland * nodes. We want to delay dlm_recvd processing new requests until 102e7fd4179SDavid Teigland * dlm_recoverd has finished processing the old saved requests. 103e7fd4179SDavid Teigland */ 104e7fd4179SDavid Teigland 105e7fd4179SDavid Teigland void dlm_wait_requestqueue(struct dlm_ls *ls) 106e7fd4179SDavid Teigland { 107e7fd4179SDavid Teigland for (;;) { 108*90135925SDavid Teigland mutex_lock(&ls->ls_requestqueue_mutex); 109e7fd4179SDavid Teigland if (list_empty(&ls->ls_requestqueue)) 110e7fd4179SDavid Teigland break; 111e7fd4179SDavid Teigland if (dlm_locking_stopped(ls)) 112e7fd4179SDavid Teigland break; 113*90135925SDavid Teigland mutex_unlock(&ls->ls_requestqueue_mutex); 114e7fd4179SDavid Teigland schedule(); 115e7fd4179SDavid Teigland } 116*90135925SDavid Teigland mutex_unlock(&ls->ls_requestqueue_mutex); 117e7fd4179SDavid Teigland } 118e7fd4179SDavid Teigland 119e7fd4179SDavid Teigland static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) 120e7fd4179SDavid Teigland { 121e7fd4179SDavid Teigland uint32_t type = ms->m_type; 122e7fd4179SDavid Teigland 123e7fd4179SDavid Teigland if (dlm_is_removed(ls, nodeid)) 124e7fd4179SDavid Teigland return 1; 125e7fd4179SDavid Teigland 126e7fd4179SDavid Teigland /* directory operations are always purged because the directory is 127e7fd4179SDavid Teigland always rebuilt during recovery and the lookups resent */ 128e7fd4179SDavid Teigland 129e7fd4179SDavid Teigland if (type == DLM_MSG_REMOVE || 130e7fd4179SDavid Teigland type == DLM_MSG_LOOKUP || 131e7fd4179SDavid Teigland type == DLM_MSG_LOOKUP_REPLY) 132e7fd4179SDavid Teigland return 1; 133e7fd4179SDavid Teigland 134e7fd4179SDavid Teigland if (!dlm_no_directory(ls)) 135e7fd4179SDavid Teigland return 0; 136e7fd4179SDavid Teigland 137e7fd4179SDavid Teigland /* with no directory, the master is likely to change as a part of 138e7fd4179SDavid Teigland recovery; requests to/from the defunct master need to be purged */ 139e7fd4179SDavid Teigland 140e7fd4179SDavid Teigland switch (type) { 141e7fd4179SDavid Teigland case DLM_MSG_REQUEST: 142e7fd4179SDavid Teigland case DLM_MSG_CONVERT: 143e7fd4179SDavid Teigland case DLM_MSG_UNLOCK: 144e7fd4179SDavid Teigland case DLM_MSG_CANCEL: 145e7fd4179SDavid Teigland /* we're no longer the master of this resource, the sender 146e7fd4179SDavid Teigland will resend to the new master (see waiter_needs_recovery) */ 147e7fd4179SDavid Teigland 148e7fd4179SDavid Teigland if (dlm_hash2nodeid(ls, ms->m_hash) != dlm_our_nodeid()) 149e7fd4179SDavid Teigland return 1; 150e7fd4179SDavid Teigland break; 151e7fd4179SDavid Teigland 152e7fd4179SDavid Teigland case DLM_MSG_REQUEST_REPLY: 153e7fd4179SDavid Teigland case DLM_MSG_CONVERT_REPLY: 154e7fd4179SDavid Teigland case DLM_MSG_UNLOCK_REPLY: 155e7fd4179SDavid Teigland case DLM_MSG_CANCEL_REPLY: 156e7fd4179SDavid Teigland case DLM_MSG_GRANT: 157e7fd4179SDavid Teigland /* this reply is from the former master of the resource, 158e7fd4179SDavid Teigland we'll resend to the new master if needed */ 159e7fd4179SDavid Teigland 160e7fd4179SDavid Teigland if (dlm_hash2nodeid(ls, ms->m_hash) != nodeid) 161e7fd4179SDavid Teigland return 1; 162e7fd4179SDavid Teigland break; 163e7fd4179SDavid Teigland } 164e7fd4179SDavid Teigland 165e7fd4179SDavid Teigland return 0; 166e7fd4179SDavid Teigland } 167e7fd4179SDavid Teigland 168e7fd4179SDavid Teigland void dlm_purge_requestqueue(struct dlm_ls *ls) 169e7fd4179SDavid Teigland { 170e7fd4179SDavid Teigland struct dlm_message *ms; 171e7fd4179SDavid Teigland struct rq_entry *e, *safe; 172e7fd4179SDavid Teigland 173*90135925SDavid Teigland mutex_lock(&ls->ls_requestqueue_mutex); 174e7fd4179SDavid Teigland list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { 175e7fd4179SDavid Teigland ms = (struct dlm_message *) e->request; 176e7fd4179SDavid Teigland 177e7fd4179SDavid Teigland if (purge_request(ls, ms, e->nodeid)) { 178e7fd4179SDavid Teigland list_del(&e->list); 179e7fd4179SDavid Teigland kfree(e); 180e7fd4179SDavid Teigland } 181e7fd4179SDavid Teigland } 182*90135925SDavid Teigland mutex_unlock(&ls->ls_requestqueue_mutex); 183e7fd4179SDavid Teigland } 184e7fd4179SDavid Teigland 185