1 #ifndef _HFI1_IOWAIT_H 2 #define _HFI1_IOWAIT_H 3 /* 4 * Copyright(c) 2015 - 2018 Intel Corporation. 5 * 6 * This file is provided under a dual BSD/GPLv2 license. When using or 7 * redistributing this file, you may do so under either license. 8 * 9 * GPL LICENSE SUMMARY 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of version 2 of the GNU General Public License as 13 * published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * BSD LICENSE 21 * 22 * Redistribution and use in source and binary forms, with or without 23 * modification, are permitted provided that the following conditions 24 * are met: 25 * 26 * - Redistributions of source code must retain the above copyright 27 * notice, this list of conditions and the following disclaimer. 28 * - Redistributions in binary form must reproduce the above copyright 29 * notice, this list of conditions and the following disclaimer in 30 * the documentation and/or other materials provided with the 31 * distribution. 32 * - Neither the name of Intel Corporation nor the names of its 33 * contributors may be used to endorse or promote products derived 34 * from this software without specific prior written permission. 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 47 * 48 */ 49 50 #include <linux/list.h> 51 #include <linux/workqueue.h> 52 #include <linux/wait.h> 53 #include <linux/sched.h> 54 55 #include "sdma_txreq.h" 56 57 /* 58 * typedef (*restart_t)() - restart callback 59 * @work: pointer to work structure 60 */ 61 typedef void (*restart_t)(struct work_struct *work); 62 63 #define IOWAIT_PENDING_IB 0x0 64 #define IOWAIT_PENDING_TID 0x1 65 66 /* 67 * A QP can have multiple Send Engines (SEs). 68 * 69 * The current use case is for supporting a TID RDMA 70 * packet build/xmit mechanism independent from verbs. 71 */ 72 #define IOWAIT_SES 2 73 #define IOWAIT_IB_SE 0 74 #define IOWAIT_TID_SE 1 75 76 struct sdma_txreq; 77 struct sdma_engine; 78 /** 79 * @iowork: the work struct 80 * @tx_head: list of prebuilt packets 81 * @iow: the parent iowait structure 82 * 83 * This structure is the work item (process) specific 84 * details associated with the each of the two SEs of the 85 * QP. 86 * 87 * The workstruct and the queued TXs are unique to each 88 * SE. 89 */ 90 struct iowait; 91 struct iowait_work { 92 struct work_struct iowork; 93 struct list_head tx_head; 94 struct iowait *iow; 95 }; 96 97 /** 98 * @list: used to add/insert into QP/PQ wait lists 99 * @tx_head: overflow list of sdma_txreq's 100 * @sleep: no space callback 101 * @wakeup: space callback wakeup 102 * @sdma_drained: sdma count drained 103 * @lock: lock protected head of wait queue 104 * @iowork: workqueue overhead 105 * @wait_dma: wait for sdma_busy == 0 106 * @wait_pio: wait for pio_busy == 0 107 * @sdma_busy: # of packets in flight 108 * @count: total number of descriptors in tx_head'ed list 109 * @tx_limit: limit for overflow queuing 110 * @tx_count: number of tx entry's in tx_head'ed list 111 * @flags: wait flags (one per QP) 112 * @wait: SE array 113 * 114 * This is to be embedded in user's state structure 115 * (QP or PQ). 116 * 117 * The sleep and wakeup members are a 118 * bit misnamed. They do not strictly 119 * speaking sleep or wake up, but they 120 * are callbacks for the ULP to implement 121 * what ever queuing/dequeuing of 122 * the embedded iowait and its containing struct 123 * when a resource shortage like SDMA ring space is seen. 124 * 125 * Both potentially have locks help 126 * so sleeping is not allowed. 127 * 128 * The wait_dma member along with the iow 129 * 130 * The lock field is used by waiters to record 131 * the seqlock_t that guards the list head. 132 * Waiters explicity know that, but the destroy 133 * code that unwaits QPs does not. 134 */ 135 struct iowait { 136 struct list_head list; 137 int (*sleep)( 138 struct sdma_engine *sde, 139 struct iowait_work *wait, 140 struct sdma_txreq *tx, 141 uint seq, 142 bool pkts_sent 143 ); 144 void (*wakeup)(struct iowait *wait, int reason); 145 void (*sdma_drained)(struct iowait *wait); 146 seqlock_t *lock; 147 wait_queue_head_t wait_dma; 148 wait_queue_head_t wait_pio; 149 atomic_t sdma_busy; 150 atomic_t pio_busy; 151 u32 count; 152 u32 tx_limit; 153 u32 tx_count; 154 u8 starved_cnt; 155 unsigned long flags; 156 struct iowait_work wait[IOWAIT_SES]; 157 }; 158 159 #define SDMA_AVAIL_REASON 0 160 161 void iowait_set_flag(struct iowait *wait, u32 flag); 162 bool iowait_flag_set(struct iowait *wait, u32 flag); 163 void iowait_clear_flag(struct iowait *wait, u32 flag); 164 165 void iowait_init(struct iowait *wait, u32 tx_limit, 166 void (*func)(struct work_struct *work), 167 void (*tidfunc)(struct work_struct *work), 168 int (*sleep)(struct sdma_engine *sde, 169 struct iowait_work *wait, 170 struct sdma_txreq *tx, 171 uint seq, 172 bool pkts_sent), 173 void (*wakeup)(struct iowait *wait, int reason), 174 void (*sdma_drained)(struct iowait *wait)); 175 176 /** 177 * iowait_schedule() - schedule the default send engine work 178 * @wait: wait struct to schedule 179 * @wq: workqueue for schedule 180 * @cpu: cpu 181 */ 182 static inline bool iowait_schedule(struct iowait *wait, 183 struct workqueue_struct *wq, int cpu) 184 { 185 return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_IB_SE].iowork); 186 } 187 188 /** 189 * iowait_sdma_drain() - wait for DMAs to drain 190 * 191 * @wait: iowait structure 192 * 193 * This will delay until the iowait sdmas have 194 * completed. 195 */ 196 static inline void iowait_sdma_drain(struct iowait *wait) 197 { 198 wait_event(wait->wait_dma, !atomic_read(&wait->sdma_busy)); 199 } 200 201 /** 202 * iowait_sdma_pending() - return sdma pending count 203 * 204 * @wait: iowait structure 205 * 206 */ 207 static inline int iowait_sdma_pending(struct iowait *wait) 208 { 209 return atomic_read(&wait->sdma_busy); 210 } 211 212 /** 213 * iowait_sdma_inc - note sdma io pending 214 * @wait: iowait structure 215 */ 216 static inline void iowait_sdma_inc(struct iowait *wait) 217 { 218 atomic_inc(&wait->sdma_busy); 219 } 220 221 /** 222 * iowait_sdma_add - add count to pending 223 * @wait: iowait structure 224 */ 225 static inline void iowait_sdma_add(struct iowait *wait, int count) 226 { 227 atomic_add(count, &wait->sdma_busy); 228 } 229 230 /** 231 * iowait_sdma_dec - note sdma complete 232 * @wait: iowait structure 233 */ 234 static inline int iowait_sdma_dec(struct iowait *wait) 235 { 236 if (!wait) 237 return 0; 238 return atomic_dec_and_test(&wait->sdma_busy); 239 } 240 241 /** 242 * iowait_pio_drain() - wait for pios to drain 243 * 244 * @wait: iowait structure 245 * 246 * This will delay until the iowait pios have 247 * completed. 248 */ 249 static inline void iowait_pio_drain(struct iowait *wait) 250 { 251 wait_event_timeout(wait->wait_pio, 252 !atomic_read(&wait->pio_busy), 253 HZ); 254 } 255 256 /** 257 * iowait_pio_pending() - return pio pending count 258 * 259 * @wait: iowait structure 260 * 261 */ 262 static inline int iowait_pio_pending(struct iowait *wait) 263 { 264 return atomic_read(&wait->pio_busy); 265 } 266 267 /** 268 * iowait_pio_inc - note pio pending 269 * @wait: iowait structure 270 */ 271 static inline void iowait_pio_inc(struct iowait *wait) 272 { 273 atomic_inc(&wait->pio_busy); 274 } 275 276 /** 277 * iowait_pio_dec - note pio complete 278 * @wait: iowait structure 279 */ 280 static inline int iowait_pio_dec(struct iowait *wait) 281 { 282 if (!wait) 283 return 0; 284 return atomic_dec_and_test(&wait->pio_busy); 285 } 286 287 /** 288 * iowait_drain_wakeup() - trigger iowait_drain() waiter 289 * 290 * @wait: iowait structure 291 * 292 * This will trigger any waiters. 293 */ 294 static inline void iowait_drain_wakeup(struct iowait *wait) 295 { 296 wake_up(&wait->wait_dma); 297 wake_up(&wait->wait_pio); 298 if (wait->sdma_drained) 299 wait->sdma_drained(wait); 300 } 301 302 /** 303 * iowait_get_txhead() - get packet off of iowait list 304 * 305 * @wait iowait_work struture 306 */ 307 static inline struct sdma_txreq *iowait_get_txhead(struct iowait_work *wait) 308 { 309 struct sdma_txreq *tx = NULL; 310 311 if (!list_empty(&wait->tx_head)) { 312 tx = list_first_entry( 313 &wait->tx_head, 314 struct sdma_txreq, 315 list); 316 list_del_init(&tx->list); 317 } 318 return tx; 319 } 320 321 static inline u16 iowait_get_desc(struct iowait_work *w) 322 { 323 u16 num_desc = 0; 324 struct sdma_txreq *tx = NULL; 325 326 if (!list_empty(&w->tx_head)) { 327 tx = list_first_entry(&w->tx_head, struct sdma_txreq, 328 list); 329 num_desc = tx->num_desc; 330 } 331 return num_desc; 332 } 333 334 static inline u32 iowait_get_all_desc(struct iowait *w) 335 { 336 u32 num_desc = 0; 337 338 num_desc = iowait_get_desc(&w->wait[IOWAIT_IB_SE]); 339 num_desc += iowait_get_desc(&w->wait[IOWAIT_TID_SE]); 340 return num_desc; 341 } 342 343 /** 344 * iowait_queue - Put the iowait on a wait queue 345 * @pkts_sent: have some packets been sent before queuing? 346 * @w: the iowait struct 347 * @wait_head: the wait queue 348 * 349 * This function is called to insert an iowait struct into a 350 * wait queue after a resource (eg, sdma decriptor or pio 351 * buffer) is run out. 352 */ 353 static inline void iowait_queue(bool pkts_sent, struct iowait *w, 354 struct list_head *wait_head) 355 { 356 /* 357 * To play fair, insert the iowait at the tail of the wait queue if it 358 * has already sent some packets; Otherwise, put it at the head. 359 */ 360 if (pkts_sent) { 361 list_add_tail(&w->list, wait_head); 362 w->starved_cnt = 0; 363 } else { 364 list_add(&w->list, wait_head); 365 w->starved_cnt++; 366 } 367 } 368 369 /** 370 * iowait_starve_clear - clear the wait queue's starve count 371 * @pkts_sent: have some packets been sent? 372 * @w: the iowait struct 373 * 374 * This function is called to clear the starve count. If no 375 * packets have been sent, the starve count will not be cleared. 376 */ 377 static inline void iowait_starve_clear(bool pkts_sent, struct iowait *w) 378 { 379 if (pkts_sent) 380 w->starved_cnt = 0; 381 } 382 383 /** 384 * iowait_starve_find_max - Find the maximum of the starve count 385 * @w: the iowait struct 386 * @max: a variable containing the max starve count 387 * @idx: the index of the current iowait in an array 388 * @max_idx: a variable containing the array index for the 389 * iowait entry that has the max starve count 390 * 391 * This function is called to compare the starve count of a 392 * given iowait with the given max starve count. The max starve 393 * count and the index will be updated if the iowait's start 394 * count is larger. 395 */ 396 static inline void iowait_starve_find_max(struct iowait *w, u8 *max, 397 uint idx, uint *max_idx) 398 { 399 if (w->starved_cnt > *max) { 400 *max = w->starved_cnt; 401 *max_idx = idx; 402 } 403 } 404 405 /** 406 * iowait_packet_queued() - determine if a packet is queued 407 * @wait: the iowait_work structure 408 */ 409 static inline bool iowait_packet_queued(struct iowait_work *wait) 410 { 411 return !list_empty(&wait->tx_head); 412 } 413 414 /** 415 * inc_wait_count - increment wait counts 416 * @w: the log work struct 417 * @n: the count 418 */ 419 static inline void iowait_inc_wait_count(struct iowait_work *w, u16 n) 420 { 421 if (!w) 422 return; 423 w->iow->tx_count++; 424 w->iow->count += n; 425 } 426 427 /** 428 * iowait_get_tid_work - return iowait_work for tid SE 429 * @w: the iowait struct 430 */ 431 static inline struct iowait_work *iowait_get_tid_work(struct iowait *w) 432 { 433 return &w->wait[IOWAIT_TID_SE]; 434 } 435 436 /** 437 * iowait_get_ib_work - return iowait_work for ib SE 438 * @w: the iowait struct 439 */ 440 static inline struct iowait_work *iowait_get_ib_work(struct iowait *w) 441 { 442 return &w->wait[IOWAIT_IB_SE]; 443 } 444 445 /** 446 * iowait_ioww_to_iow - return iowait given iowait_work 447 * @w: the iowait_work struct 448 */ 449 static inline struct iowait *iowait_ioww_to_iow(struct iowait_work *w) 450 { 451 if (likely(w)) 452 return w->iow; 453 return NULL; 454 } 455 456 void iowait_cancel_work(struct iowait *w); 457 int iowait_set_work_flag(struct iowait_work *w); 458 459 #endif 460