1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include "rxe.h" 8 9 /* Check if task is idle i.e. not running, not scheduled in 10 * tasklet queue and not draining. If so move to busy to 11 * reserve a slot in do_task() by setting to busy and taking 12 * a qp reference to cover the gap from now until the task finishes. 13 * state will move out of busy if task returns a non zero value 14 * in do_task(). If state is already busy it is raised to armed 15 * to indicate to do_task that additional pass should be made 16 * over the task. 17 * Context: caller should hold task->lock. 18 * Returns: true if state transitioned from idle to busy else false. 19 */ 20 static bool __reserve_if_idle(struct rxe_task *task) 21 { 22 WARN_ON(rxe_read(task->qp) <= 0); 23 24 if (task->tasklet.state & BIT(TASKLET_STATE_SCHED)) 25 return false; 26 27 if (task->state == TASK_STATE_IDLE) { 28 rxe_get(task->qp); 29 task->state = TASK_STATE_BUSY; 30 task->num_sched++; 31 return true; 32 } 33 34 if (task->state == TASK_STATE_BUSY) 35 task->state = TASK_STATE_ARMED; 36 37 return false; 38 } 39 40 /* check if task is idle or drained and not currently 41 * scheduled in the tasklet queue. This routine is 42 * called by rxe_cleanup_task or rxe_disable_task to 43 * see if the queue is empty. 44 * Context: caller should hold task->lock. 45 * Returns true if done else false. 46 */ 47 static bool __is_done(struct rxe_task *task) 48 { 49 if (task->tasklet.state & BIT(TASKLET_STATE_SCHED)) 50 return false; 51 52 if (task->state == TASK_STATE_IDLE || 53 task->state == TASK_STATE_DRAINED) { 54 return true; 55 } 56 57 return false; 58 } 59 60 /* a locked version of __is_done */ 61 static bool is_done(struct rxe_task *task) 62 { 63 unsigned long flags; 64 int done; 65 66 spin_lock_irqsave(&task->lock, flags); 67 done = __is_done(task); 68 spin_unlock_irqrestore(&task->lock, flags); 69 70 return done; 71 } 72 73 /* do_task is a wrapper for the three tasks (requester, 74 * completer, responder) and calls them in a loop until 75 * they return a non-zero value. It is called either 76 * directly by rxe_run_task or indirectly if rxe_sched_task 77 * schedules the task. They must call __reserve_if_idle to 78 * move the task to busy before calling or scheduling. 79 * The task can also be moved to drained or invalid 80 * by calls to rxe-cleanup_task or rxe_disable_task. 81 * In that case tasks which get here are not executed but 82 * just flushed. The tasks are designed to look to see if 83 * there is work to do and do part of it before returning 84 * here with a return value of zero until all the work 85 * has been consumed then it retuens a non-zero value. 86 * The number of times the task can be run is limited by 87 * max iterations so one task cannot hold the cpu forever. 88 */ 89 static void do_task(struct tasklet_struct *t) 90 { 91 int cont; 92 int ret; 93 struct rxe_task *task = from_tasklet(task, t, tasklet); 94 unsigned int iterations; 95 unsigned long flags; 96 int resched = 0; 97 98 WARN_ON(rxe_read(task->qp) <= 0); 99 100 spin_lock_irqsave(&task->lock, flags); 101 if (task->state >= TASK_STATE_DRAINED) { 102 rxe_put(task->qp); 103 task->num_done++; 104 spin_unlock_irqrestore(&task->lock, flags); 105 return; 106 } 107 spin_unlock_irqrestore(&task->lock, flags); 108 109 do { 110 iterations = RXE_MAX_ITERATIONS; 111 cont = 0; 112 113 do { 114 ret = task->func(task->qp); 115 } while (ret == 0 && iterations-- > 0); 116 117 spin_lock_irqsave(&task->lock, flags); 118 switch (task->state) { 119 case TASK_STATE_BUSY: 120 if (ret) { 121 task->state = TASK_STATE_IDLE; 122 } else { 123 /* This can happen if the client 124 * can add work faster than the 125 * tasklet can finish it. 126 * Reschedule the tasklet and exit 127 * the loop to give up the cpu 128 */ 129 task->state = TASK_STATE_IDLE; 130 resched = 1; 131 } 132 break; 133 134 /* someone tried to run the task since the last time we called 135 * func, so we will call one more time regardless of the 136 * return value 137 */ 138 case TASK_STATE_ARMED: 139 task->state = TASK_STATE_BUSY; 140 cont = 1; 141 break; 142 143 case TASK_STATE_DRAINING: 144 if (ret) 145 task->state = TASK_STATE_DRAINED; 146 else 147 cont = 1; 148 break; 149 150 default: 151 WARN_ON(1); 152 rxe_info_qp(task->qp, "unexpected task state = %d", task->state); 153 } 154 155 if (!cont) { 156 task->num_done++; 157 if (WARN_ON(task->num_done != task->num_sched)) 158 rxe_err_qp(task->qp, "%ld tasks scheduled, %ld tasks done", 159 task->num_sched, task->num_done); 160 } 161 spin_unlock_irqrestore(&task->lock, flags); 162 } while (cont); 163 164 task->ret = ret; 165 166 if (resched) 167 rxe_sched_task(task); 168 169 rxe_put(task->qp); 170 } 171 172 int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp, 173 int (*func)(struct rxe_qp *)) 174 { 175 WARN_ON(rxe_read(qp) <= 0); 176 177 task->qp = qp; 178 task->func = func; 179 180 tasklet_setup(&task->tasklet, do_task); 181 182 task->state = TASK_STATE_IDLE; 183 spin_lock_init(&task->lock); 184 185 return 0; 186 } 187 188 /* rxe_cleanup_task is only called from rxe_do_qp_cleanup in 189 * process context. The qp is already completed with no 190 * remaining references. Once the queue is drained the 191 * task is moved to invalid and returns. The qp cleanup 192 * code then calls the task functions directly without 193 * using the task struct to drain any late arriving packets 194 * or work requests. 195 */ 196 void rxe_cleanup_task(struct rxe_task *task) 197 { 198 unsigned long flags; 199 200 spin_lock_irqsave(&task->lock, flags); 201 if (!__is_done(task) && task->state < TASK_STATE_DRAINED) { 202 task->state = TASK_STATE_DRAINING; 203 } else { 204 task->state = TASK_STATE_INVALID; 205 spin_unlock_irqrestore(&task->lock, flags); 206 return; 207 } 208 spin_unlock_irqrestore(&task->lock, flags); 209 210 /* now the task cannot be scheduled or run just wait 211 * for the previously scheduled tasks to finish. 212 */ 213 while (!is_done(task)) 214 cond_resched(); 215 216 tasklet_kill(&task->tasklet); 217 218 spin_lock_irqsave(&task->lock, flags); 219 task->state = TASK_STATE_INVALID; 220 spin_unlock_irqrestore(&task->lock, flags); 221 } 222 223 /* run the task inline if it is currently idle 224 * cannot call do_task holding the lock 225 */ 226 void rxe_run_task(struct rxe_task *task) 227 { 228 unsigned long flags; 229 int run; 230 231 WARN_ON(rxe_read(task->qp) <= 0); 232 233 spin_lock_irqsave(&task->lock, flags); 234 run = __reserve_if_idle(task); 235 spin_unlock_irqrestore(&task->lock, flags); 236 237 if (run) 238 do_task(&task->tasklet); 239 } 240 241 /* schedule the task to run later as a tasklet. 242 * the tasklet)schedule call can be called holding 243 * the lock. 244 */ 245 void rxe_sched_task(struct rxe_task *task) 246 { 247 unsigned long flags; 248 249 WARN_ON(rxe_read(task->qp) <= 0); 250 251 spin_lock_irqsave(&task->lock, flags); 252 if (__reserve_if_idle(task)) 253 tasklet_schedule(&task->tasklet); 254 spin_unlock_irqrestore(&task->lock, flags); 255 } 256 257 /* rxe_disable/enable_task are only called from 258 * rxe_modify_qp in process context. Task is moved 259 * to the drained state by do_task. 260 */ 261 void rxe_disable_task(struct rxe_task *task) 262 { 263 unsigned long flags; 264 265 WARN_ON(rxe_read(task->qp) <= 0); 266 267 spin_lock_irqsave(&task->lock, flags); 268 if (!__is_done(task) && task->state < TASK_STATE_DRAINED) { 269 task->state = TASK_STATE_DRAINING; 270 } else { 271 task->state = TASK_STATE_DRAINED; 272 spin_unlock_irqrestore(&task->lock, flags); 273 return; 274 } 275 spin_unlock_irqrestore(&task->lock, flags); 276 277 while (!is_done(task)) 278 cond_resched(); 279 280 tasklet_disable(&task->tasklet); 281 } 282 283 void rxe_enable_task(struct rxe_task *task) 284 { 285 unsigned long flags; 286 287 WARN_ON(rxe_read(task->qp) <= 0); 288 289 spin_lock_irqsave(&task->lock, flags); 290 if (task->state == TASK_STATE_INVALID) { 291 spin_unlock_irqrestore(&task->lock, flags); 292 return; 293 } 294 task->state = TASK_STATE_IDLE; 295 tasklet_enable(&task->tasklet); 296 spin_unlock_irqrestore(&task->lock, flags); 297 } 298