1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/spinlock.h> 49 #include <linux/seqlock.h> 50 #include <linux/netdevice.h> 51 #include <linux/moduleparam.h> 52 #include <linux/bitops.h> 53 #include <linux/timer.h> 54 #include <linux/vmalloc.h> 55 #include <linux/highmem.h> 56 57 #include "hfi.h" 58 #include "common.h" 59 #include "qp.h" 60 #include "sdma.h" 61 #include "iowait.h" 62 #include "trace.h" 63 64 /* must be a power of 2 >= 64 <= 32768 */ 65 #define SDMA_DESCQ_CNT 2048 66 #define SDMA_DESC_INTR 64 67 #define INVALID_TAIL 0xffff 68 69 static uint sdma_descq_cnt = SDMA_DESCQ_CNT; 70 module_param(sdma_descq_cnt, uint, S_IRUGO); 71 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries"); 72 73 static uint sdma_idle_cnt = 250; 74 module_param(sdma_idle_cnt, uint, S_IRUGO); 75 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)"); 76 77 uint mod_num_sdma; 78 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO); 79 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use"); 80 81 static uint sdma_desct_intr = SDMA_DESC_INTR; 82 module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR); 83 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt"); 84 85 #define SDMA_WAIT_BATCH_SIZE 20 86 /* max wait time for a SDMA engine to indicate it has halted */ 87 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */ 88 /* all SDMA engine errors that cause a halt */ 89 90 #define SD(name) SEND_DMA_##name 91 #define ALL_SDMA_ENG_HALT_ERRS \ 92 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \ 93 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \ 94 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \ 95 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \ 96 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \ 97 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \ 98 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \ 99 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \ 100 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \ 101 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \ 102 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \ 103 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \ 104 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \ 105 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \ 106 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \ 107 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \ 108 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \ 109 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK)) 110 111 /* sdma_sendctrl operations */ 112 #define SDMA_SENDCTRL_OP_ENABLE BIT(0) 113 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1) 114 #define SDMA_SENDCTRL_OP_HALT BIT(2) 115 #define SDMA_SENDCTRL_OP_CLEANUP BIT(3) 116 117 /* handle long defines */ 118 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \ 119 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK 120 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \ 121 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT 122 123 static const char * const sdma_state_names[] = { 124 [sdma_state_s00_hw_down] = "s00_HwDown", 125 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait", 126 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait", 127 [sdma_state_s20_idle] = "s20_Idle", 128 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait", 129 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait", 130 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait", 131 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait", 132 [sdma_state_s80_hw_freeze] = "s80_HwFreeze", 133 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean", 134 [sdma_state_s99_running] = "s99_Running", 135 }; 136 137 #ifdef CONFIG_SDMA_VERBOSITY 138 static const char * const sdma_event_names[] = { 139 [sdma_event_e00_go_hw_down] = "e00_GoHwDown", 140 [sdma_event_e10_go_hw_start] = "e10_GoHwStart", 141 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone", 142 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone", 143 [sdma_event_e30_go_running] = "e30_GoRunning", 144 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned", 145 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned", 146 [sdma_event_e60_hw_halted] = "e60_HwHalted", 147 [sdma_event_e70_go_idle] = "e70_GoIdle", 148 [sdma_event_e80_hw_freeze] = "e80_HwFreeze", 149 [sdma_event_e81_hw_frozen] = "e81_HwFrozen", 150 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze", 151 [sdma_event_e85_link_down] = "e85_LinkDown", 152 [sdma_event_e90_sw_halted] = "e90_SwHalted", 153 }; 154 #endif 155 156 static const struct sdma_set_state_action sdma_action_table[] = { 157 [sdma_state_s00_hw_down] = { 158 .go_s99_running_tofalse = 1, 159 .op_enable = 0, 160 .op_intenable = 0, 161 .op_halt = 0, 162 .op_cleanup = 0, 163 }, 164 [sdma_state_s10_hw_start_up_halt_wait] = { 165 .op_enable = 0, 166 .op_intenable = 0, 167 .op_halt = 1, 168 .op_cleanup = 0, 169 }, 170 [sdma_state_s15_hw_start_up_clean_wait] = { 171 .op_enable = 0, 172 .op_intenable = 1, 173 .op_halt = 0, 174 .op_cleanup = 1, 175 }, 176 [sdma_state_s20_idle] = { 177 .op_enable = 0, 178 .op_intenable = 1, 179 .op_halt = 0, 180 .op_cleanup = 0, 181 }, 182 [sdma_state_s30_sw_clean_up_wait] = { 183 .op_enable = 0, 184 .op_intenable = 0, 185 .op_halt = 0, 186 .op_cleanup = 0, 187 }, 188 [sdma_state_s40_hw_clean_up_wait] = { 189 .op_enable = 0, 190 .op_intenable = 0, 191 .op_halt = 0, 192 .op_cleanup = 1, 193 }, 194 [sdma_state_s50_hw_halt_wait] = { 195 .op_enable = 0, 196 .op_intenable = 0, 197 .op_halt = 0, 198 .op_cleanup = 0, 199 }, 200 [sdma_state_s60_idle_halt_wait] = { 201 .go_s99_running_tofalse = 1, 202 .op_enable = 0, 203 .op_intenable = 0, 204 .op_halt = 1, 205 .op_cleanup = 0, 206 }, 207 [sdma_state_s80_hw_freeze] = { 208 .op_enable = 0, 209 .op_intenable = 0, 210 .op_halt = 0, 211 .op_cleanup = 0, 212 }, 213 [sdma_state_s82_freeze_sw_clean] = { 214 .op_enable = 0, 215 .op_intenable = 0, 216 .op_halt = 0, 217 .op_cleanup = 0, 218 }, 219 [sdma_state_s99_running] = { 220 .op_enable = 1, 221 .op_intenable = 1, 222 .op_halt = 0, 223 .op_cleanup = 0, 224 .go_s99_running_totrue = 1, 225 }, 226 }; 227 228 #define SDMA_TAIL_UPDATE_THRESH 0x1F 229 230 /* declare all statics here rather than keep sorting */ 231 static void sdma_complete(struct kref *); 232 static void sdma_finalput(struct sdma_state *); 233 static void sdma_get(struct sdma_state *); 234 static void sdma_hw_clean_up_task(unsigned long); 235 static void sdma_put(struct sdma_state *); 236 static void sdma_set_state(struct sdma_engine *, enum sdma_states); 237 static void sdma_start_hw_clean_up(struct sdma_engine *); 238 static void sdma_sw_clean_up_task(unsigned long); 239 static void sdma_sendctrl(struct sdma_engine *, unsigned); 240 static void init_sdma_regs(struct sdma_engine *, u32, uint); 241 static void sdma_process_event( 242 struct sdma_engine *sde, 243 enum sdma_events event); 244 static void __sdma_process_event( 245 struct sdma_engine *sde, 246 enum sdma_events event); 247 static void dump_sdma_state(struct sdma_engine *sde); 248 static void sdma_make_progress(struct sdma_engine *sde, u64 status); 249 static void sdma_desc_avail(struct sdma_engine *sde, uint avail); 250 static void sdma_flush_descq(struct sdma_engine *sde); 251 252 /** 253 * sdma_state_name() - return state string from enum 254 * @state: state 255 */ 256 static const char *sdma_state_name(enum sdma_states state) 257 { 258 return sdma_state_names[state]; 259 } 260 261 static void sdma_get(struct sdma_state *ss) 262 { 263 kref_get(&ss->kref); 264 } 265 266 static void sdma_complete(struct kref *kref) 267 { 268 struct sdma_state *ss = 269 container_of(kref, struct sdma_state, kref); 270 271 complete(&ss->comp); 272 } 273 274 static void sdma_put(struct sdma_state *ss) 275 { 276 kref_put(&ss->kref, sdma_complete); 277 } 278 279 static void sdma_finalput(struct sdma_state *ss) 280 { 281 sdma_put(ss); 282 wait_for_completion(&ss->comp); 283 } 284 285 static inline void write_sde_csr( 286 struct sdma_engine *sde, 287 u32 offset0, 288 u64 value) 289 { 290 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value); 291 } 292 293 static inline u64 read_sde_csr( 294 struct sdma_engine *sde, 295 u32 offset0) 296 { 297 return read_kctxt_csr(sde->dd, sde->this_idx, offset0); 298 } 299 300 /* 301 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for 302 * sdma engine 'sde' to drop to 0. 303 */ 304 static void sdma_wait_for_packet_egress(struct sdma_engine *sde, 305 int pause) 306 { 307 u64 off = 8 * sde->this_idx; 308 struct hfi1_devdata *dd = sde->dd; 309 int lcnt = 0; 310 u64 reg_prev; 311 u64 reg = 0; 312 313 while (1) { 314 reg_prev = reg; 315 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS); 316 317 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK; 318 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT; 319 if (reg == 0) 320 break; 321 /* counter is reest if accupancy count changes */ 322 if (reg != reg_prev) 323 lcnt = 0; 324 if (lcnt++ > 500) { 325 /* timed out - bounce the link */ 326 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n", 327 __func__, sde->this_idx, (u32)reg); 328 queue_work(dd->pport->link_wq, 329 &dd->pport->link_bounce_work); 330 break; 331 } 332 udelay(1); 333 } 334 } 335 336 /* 337 * sdma_wait() - wait for packet egress to complete for all SDMA engines, 338 * and pause for credit return. 339 */ 340 void sdma_wait(struct hfi1_devdata *dd) 341 { 342 int i; 343 344 for (i = 0; i < dd->num_sdma; i++) { 345 struct sdma_engine *sde = &dd->per_sdma[i]; 346 347 sdma_wait_for_packet_egress(sde, 0); 348 } 349 } 350 351 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt) 352 { 353 u64 reg; 354 355 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT)) 356 return; 357 reg = cnt; 358 reg &= SD(DESC_CNT_CNT_MASK); 359 reg <<= SD(DESC_CNT_CNT_SHIFT); 360 write_sde_csr(sde, SD(DESC_CNT), reg); 361 } 362 363 static inline void complete_tx(struct sdma_engine *sde, 364 struct sdma_txreq *tx, 365 int res) 366 { 367 /* protect against complete modifying */ 368 struct iowait *wait = tx->wait; 369 callback_t complete = tx->complete; 370 371 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 372 trace_hfi1_sdma_out_sn(sde, tx->sn); 373 if (WARN_ON_ONCE(sde->head_sn != tx->sn)) 374 dd_dev_err(sde->dd, "expected %llu got %llu\n", 375 sde->head_sn, tx->sn); 376 sde->head_sn++; 377 #endif 378 __sdma_txclean(sde->dd, tx); 379 if (complete) 380 (*complete)(tx, res); 381 if (wait && iowait_sdma_dec(wait)) 382 iowait_drain_wakeup(wait); 383 } 384 385 /* 386 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status 387 * 388 * Depending on timing there can be txreqs in two places: 389 * - in the descq ring 390 * - in the flush list 391 * 392 * To avoid ordering issues the descq ring needs to be flushed 393 * first followed by the flush list. 394 * 395 * This routine is called from two places 396 * - From a work queue item 397 * - Directly from the state machine just before setting the 398 * state to running 399 * 400 * Must be called with head_lock held 401 * 402 */ 403 static void sdma_flush(struct sdma_engine *sde) 404 { 405 struct sdma_txreq *txp, *txp_next; 406 LIST_HEAD(flushlist); 407 unsigned long flags; 408 409 /* flush from head to tail */ 410 sdma_flush_descq(sde); 411 spin_lock_irqsave(&sde->flushlist_lock, flags); 412 /* copy flush list */ 413 list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) { 414 list_del_init(&txp->list); 415 list_add_tail(&txp->list, &flushlist); 416 } 417 spin_unlock_irqrestore(&sde->flushlist_lock, flags); 418 /* flush from flush list */ 419 list_for_each_entry_safe(txp, txp_next, &flushlist, list) 420 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); 421 } 422 423 /* 424 * Fields a work request for flushing the descq ring 425 * and the flush list 426 * 427 * If the engine has been brought to running during 428 * the scheduling delay, the flush is ignored, assuming 429 * that the process of bringing the engine to running 430 * would have done this flush prior to going to running. 431 * 432 */ 433 static void sdma_field_flush(struct work_struct *work) 434 { 435 unsigned long flags; 436 struct sdma_engine *sde = 437 container_of(work, struct sdma_engine, flush_worker); 438 439 write_seqlock_irqsave(&sde->head_lock, flags); 440 if (!__sdma_running(sde)) 441 sdma_flush(sde); 442 write_sequnlock_irqrestore(&sde->head_lock, flags); 443 } 444 445 static void sdma_err_halt_wait(struct work_struct *work) 446 { 447 struct sdma_engine *sde = container_of(work, struct sdma_engine, 448 err_halt_worker); 449 u64 statuscsr; 450 unsigned long timeout; 451 452 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT); 453 while (1) { 454 statuscsr = read_sde_csr(sde, SD(STATUS)); 455 statuscsr &= SD(STATUS_ENG_HALTED_SMASK); 456 if (statuscsr) 457 break; 458 if (time_after(jiffies, timeout)) { 459 dd_dev_err(sde->dd, 460 "SDMA engine %d - timeout waiting for engine to halt\n", 461 sde->this_idx); 462 /* 463 * Continue anyway. This could happen if there was 464 * an uncorrectable error in the wrong spot. 465 */ 466 break; 467 } 468 usleep_range(80, 120); 469 } 470 471 sdma_process_event(sde, sdma_event_e15_hw_halt_done); 472 } 473 474 static void sdma_err_progress_check_schedule(struct sdma_engine *sde) 475 { 476 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) { 477 unsigned index; 478 struct hfi1_devdata *dd = sde->dd; 479 480 for (index = 0; index < dd->num_sdma; index++) { 481 struct sdma_engine *curr_sdma = &dd->per_sdma[index]; 482 483 if (curr_sdma != sde) 484 curr_sdma->progress_check_head = 485 curr_sdma->descq_head; 486 } 487 dd_dev_err(sde->dd, 488 "SDMA engine %d - check scheduled\n", 489 sde->this_idx); 490 mod_timer(&sde->err_progress_check_timer, jiffies + 10); 491 } 492 } 493 494 static void sdma_err_progress_check(struct timer_list *t) 495 { 496 unsigned index; 497 struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer); 498 499 dd_dev_err(sde->dd, "SDE progress check event\n"); 500 for (index = 0; index < sde->dd->num_sdma; index++) { 501 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index]; 502 unsigned long flags; 503 504 /* check progress on each engine except the current one */ 505 if (curr_sde == sde) 506 continue; 507 /* 508 * We must lock interrupts when acquiring sde->lock, 509 * to avoid a deadlock if interrupt triggers and spins on 510 * the same lock on same CPU 511 */ 512 spin_lock_irqsave(&curr_sde->tail_lock, flags); 513 write_seqlock(&curr_sde->head_lock); 514 515 /* skip non-running queues */ 516 if (curr_sde->state.current_state != sdma_state_s99_running) { 517 write_sequnlock(&curr_sde->head_lock); 518 spin_unlock_irqrestore(&curr_sde->tail_lock, flags); 519 continue; 520 } 521 522 if ((curr_sde->descq_head != curr_sde->descq_tail) && 523 (curr_sde->descq_head == 524 curr_sde->progress_check_head)) 525 __sdma_process_event(curr_sde, 526 sdma_event_e90_sw_halted); 527 write_sequnlock(&curr_sde->head_lock); 528 spin_unlock_irqrestore(&curr_sde->tail_lock, flags); 529 } 530 schedule_work(&sde->err_halt_worker); 531 } 532 533 static void sdma_hw_clean_up_task(unsigned long opaque) 534 { 535 struct sdma_engine *sde = (struct sdma_engine *)opaque; 536 u64 statuscsr; 537 538 while (1) { 539 #ifdef CONFIG_SDMA_VERBOSITY 540 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 541 sde->this_idx, slashstrip(__FILE__), __LINE__, 542 __func__); 543 #endif 544 statuscsr = read_sde_csr(sde, SD(STATUS)); 545 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK); 546 if (statuscsr) 547 break; 548 udelay(10); 549 } 550 551 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done); 552 } 553 554 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde) 555 { 556 return sde->tx_ring[sde->tx_head & sde->sdma_mask]; 557 } 558 559 /* 560 * flush ring for recovery 561 */ 562 static void sdma_flush_descq(struct sdma_engine *sde) 563 { 564 u16 head, tail; 565 int progress = 0; 566 struct sdma_txreq *txp = get_txhead(sde); 567 568 /* The reason for some of the complexity of this code is that 569 * not all descriptors have corresponding txps. So, we have to 570 * be able to skip over descs until we wander into the range of 571 * the next txp on the list. 572 */ 573 head = sde->descq_head & sde->sdma_mask; 574 tail = sde->descq_tail & sde->sdma_mask; 575 while (head != tail) { 576 /* advance head, wrap if needed */ 577 head = ++sde->descq_head & sde->sdma_mask; 578 /* if now past this txp's descs, do the callback */ 579 if (txp && txp->next_descq_idx == head) { 580 /* remove from list */ 581 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; 582 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); 583 trace_hfi1_sdma_progress(sde, head, tail, txp); 584 txp = get_txhead(sde); 585 } 586 progress++; 587 } 588 if (progress) 589 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); 590 } 591 592 static void sdma_sw_clean_up_task(unsigned long opaque) 593 { 594 struct sdma_engine *sde = (struct sdma_engine *)opaque; 595 unsigned long flags; 596 597 spin_lock_irqsave(&sde->tail_lock, flags); 598 write_seqlock(&sde->head_lock); 599 600 /* 601 * At this point, the following should always be true: 602 * - We are halted, so no more descriptors are getting retired. 603 * - We are not running, so no one is submitting new work. 604 * - Only we can send the e40_sw_cleaned, so we can't start 605 * running again until we say so. So, the active list and 606 * descq are ours to play with. 607 */ 608 609 /* 610 * In the error clean up sequence, software clean must be called 611 * before the hardware clean so we can use the hardware head in 612 * the progress routine. A hardware clean or SPC unfreeze will 613 * reset the hardware head. 614 * 615 * Process all retired requests. The progress routine will use the 616 * latest physical hardware head - we are not running so speed does 617 * not matter. 618 */ 619 sdma_make_progress(sde, 0); 620 621 sdma_flush(sde); 622 623 /* 624 * Reset our notion of head and tail. 625 * Note that the HW registers have been reset via an earlier 626 * clean up. 627 */ 628 sde->descq_tail = 0; 629 sde->descq_head = 0; 630 sde->desc_avail = sdma_descq_freecnt(sde); 631 *sde->head_dma = 0; 632 633 __sdma_process_event(sde, sdma_event_e40_sw_cleaned); 634 635 write_sequnlock(&sde->head_lock); 636 spin_unlock_irqrestore(&sde->tail_lock, flags); 637 } 638 639 static void sdma_sw_tear_down(struct sdma_engine *sde) 640 { 641 struct sdma_state *ss = &sde->state; 642 643 /* Releasing this reference means the state machine has stopped. */ 644 sdma_put(ss); 645 646 /* stop waiting for all unfreeze events to complete */ 647 atomic_set(&sde->dd->sdma_unfreeze_count, -1); 648 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 649 } 650 651 static void sdma_start_hw_clean_up(struct sdma_engine *sde) 652 { 653 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task); 654 } 655 656 static void sdma_set_state(struct sdma_engine *sde, 657 enum sdma_states next_state) 658 { 659 struct sdma_state *ss = &sde->state; 660 const struct sdma_set_state_action *action = sdma_action_table; 661 unsigned op = 0; 662 663 trace_hfi1_sdma_state( 664 sde, 665 sdma_state_names[ss->current_state], 666 sdma_state_names[next_state]); 667 668 /* debugging bookkeeping */ 669 ss->previous_state = ss->current_state; 670 ss->previous_op = ss->current_op; 671 ss->current_state = next_state; 672 673 if (ss->previous_state != sdma_state_s99_running && 674 next_state == sdma_state_s99_running) 675 sdma_flush(sde); 676 677 if (action[next_state].op_enable) 678 op |= SDMA_SENDCTRL_OP_ENABLE; 679 680 if (action[next_state].op_intenable) 681 op |= SDMA_SENDCTRL_OP_INTENABLE; 682 683 if (action[next_state].op_halt) 684 op |= SDMA_SENDCTRL_OP_HALT; 685 686 if (action[next_state].op_cleanup) 687 op |= SDMA_SENDCTRL_OP_CLEANUP; 688 689 if (action[next_state].go_s99_running_tofalse) 690 ss->go_s99_running = 0; 691 692 if (action[next_state].go_s99_running_totrue) 693 ss->go_s99_running = 1; 694 695 ss->current_op = op; 696 sdma_sendctrl(sde, ss->current_op); 697 } 698 699 /** 700 * sdma_get_descq_cnt() - called when device probed 701 * 702 * Return a validated descq count. 703 * 704 * This is currently only used in the verbs initialization to build the tx 705 * list. 706 * 707 * This will probably be deleted in favor of a more scalable approach to 708 * alloc tx's. 709 * 710 */ 711 u16 sdma_get_descq_cnt(void) 712 { 713 u16 count = sdma_descq_cnt; 714 715 if (!count) 716 return SDMA_DESCQ_CNT; 717 /* count must be a power of 2 greater than 64 and less than 718 * 32768. Otherwise return default. 719 */ 720 if (!is_power_of_2(count)) 721 return SDMA_DESCQ_CNT; 722 if (count < 64 || count > 32768) 723 return SDMA_DESCQ_CNT; 724 return count; 725 } 726 727 /** 728 * sdma_engine_get_vl() - return vl for a given sdma engine 729 * @sde: sdma engine 730 * 731 * This function returns the vl mapped to a given engine, or an error if 732 * the mapping can't be found. The mapping fields are protected by RCU. 733 */ 734 int sdma_engine_get_vl(struct sdma_engine *sde) 735 { 736 struct hfi1_devdata *dd = sde->dd; 737 struct sdma_vl_map *m; 738 u8 vl; 739 740 if (sde->this_idx >= TXE_NUM_SDMA_ENGINES) 741 return -EINVAL; 742 743 rcu_read_lock(); 744 m = rcu_dereference(dd->sdma_map); 745 if (unlikely(!m)) { 746 rcu_read_unlock(); 747 return -EINVAL; 748 } 749 vl = m->engine_to_vl[sde->this_idx]; 750 rcu_read_unlock(); 751 752 return vl; 753 } 754 755 /** 756 * sdma_select_engine_vl() - select sdma engine 757 * @dd: devdata 758 * @selector: a spreading factor 759 * @vl: this vl 760 * 761 * 762 * This function returns an engine based on the selector and a vl. The 763 * mapping fields are protected by RCU. 764 */ 765 struct sdma_engine *sdma_select_engine_vl( 766 struct hfi1_devdata *dd, 767 u32 selector, 768 u8 vl) 769 { 770 struct sdma_vl_map *m; 771 struct sdma_map_elem *e; 772 struct sdma_engine *rval; 773 774 /* NOTE This should only happen if SC->VL changed after the initial 775 * checks on the QP/AH 776 * Default will return engine 0 below 777 */ 778 if (vl >= num_vls) { 779 rval = NULL; 780 goto done; 781 } 782 783 rcu_read_lock(); 784 m = rcu_dereference(dd->sdma_map); 785 if (unlikely(!m)) { 786 rcu_read_unlock(); 787 return &dd->per_sdma[0]; 788 } 789 e = m->map[vl & m->mask]; 790 rval = e->sde[selector & e->mask]; 791 rcu_read_unlock(); 792 793 done: 794 rval = !rval ? &dd->per_sdma[0] : rval; 795 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx); 796 return rval; 797 } 798 799 /** 800 * sdma_select_engine_sc() - select sdma engine 801 * @dd: devdata 802 * @selector: a spreading factor 803 * @sc5: the 5 bit sc 804 * 805 * 806 * This function returns an engine based on the selector and an sc. 807 */ 808 struct sdma_engine *sdma_select_engine_sc( 809 struct hfi1_devdata *dd, 810 u32 selector, 811 u8 sc5) 812 { 813 u8 vl = sc_to_vlt(dd, sc5); 814 815 return sdma_select_engine_vl(dd, selector, vl); 816 } 817 818 struct sdma_rht_map_elem { 819 u32 mask; 820 u8 ctr; 821 struct sdma_engine *sde[0]; 822 }; 823 824 struct sdma_rht_node { 825 unsigned long cpu_id; 826 struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED]; 827 struct rhash_head node; 828 }; 829 830 #define NR_CPUS_HINT 192 831 832 static const struct rhashtable_params sdma_rht_params = { 833 .nelem_hint = NR_CPUS_HINT, 834 .head_offset = offsetof(struct sdma_rht_node, node), 835 .key_offset = offsetof(struct sdma_rht_node, cpu_id), 836 .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id), 837 .max_size = NR_CPUS, 838 .min_size = 8, 839 .automatic_shrinking = true, 840 }; 841 842 /* 843 * sdma_select_user_engine() - select sdma engine based on user setup 844 * @dd: devdata 845 * @selector: a spreading factor 846 * @vl: this vl 847 * 848 * This function returns an sdma engine for a user sdma request. 849 * User defined sdma engine affinity setting is honored when applicable, 850 * otherwise system default sdma engine mapping is used. To ensure correct 851 * ordering, the mapping from <selector, vl> to sde must remain unchanged. 852 */ 853 struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, 854 u32 selector, u8 vl) 855 { 856 struct sdma_rht_node *rht_node; 857 struct sdma_engine *sde = NULL; 858 const struct cpumask *current_mask = ¤t->cpus_allowed; 859 unsigned long cpu_id; 860 861 /* 862 * To ensure that always the same sdma engine(s) will be 863 * selected make sure the process is pinned to this CPU only. 864 */ 865 if (cpumask_weight(current_mask) != 1) 866 goto out; 867 868 cpu_id = smp_processor_id(); 869 rcu_read_lock(); 870 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id, 871 sdma_rht_params); 872 873 if (rht_node && rht_node->map[vl]) { 874 struct sdma_rht_map_elem *map = rht_node->map[vl]; 875 876 sde = map->sde[selector & map->mask]; 877 } 878 rcu_read_unlock(); 879 880 if (sde) 881 return sde; 882 883 out: 884 return sdma_select_engine_vl(dd, selector, vl); 885 } 886 887 static void sdma_populate_sde_map(struct sdma_rht_map_elem *map) 888 { 889 int i; 890 891 for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++) 892 map->sde[map->ctr + i] = map->sde[i]; 893 } 894 895 static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map, 896 struct sdma_engine *sde) 897 { 898 unsigned int i, pow; 899 900 /* only need to check the first ctr entries for a match */ 901 for (i = 0; i < map->ctr; i++) { 902 if (map->sde[i] == sde) { 903 memmove(&map->sde[i], &map->sde[i + 1], 904 (map->ctr - i - 1) * sizeof(map->sde[0])); 905 map->ctr--; 906 pow = roundup_pow_of_two(map->ctr ? : 1); 907 map->mask = pow - 1; 908 sdma_populate_sde_map(map); 909 break; 910 } 911 } 912 } 913 914 /* 915 * Prevents concurrent reads and writes of the sdma engine cpu_mask 916 */ 917 static DEFINE_MUTEX(process_to_sde_mutex); 918 919 ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, 920 size_t count) 921 { 922 struct hfi1_devdata *dd = sde->dd; 923 cpumask_var_t mask, new_mask; 924 unsigned long cpu; 925 int ret, vl, sz; 926 struct sdma_rht_node *rht_node; 927 928 vl = sdma_engine_get_vl(sde); 929 if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map))) 930 return -EINVAL; 931 932 ret = zalloc_cpumask_var(&mask, GFP_KERNEL); 933 if (!ret) 934 return -ENOMEM; 935 936 ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL); 937 if (!ret) { 938 free_cpumask_var(mask); 939 return -ENOMEM; 940 } 941 ret = cpulist_parse(buf, mask); 942 if (ret) 943 goto out_free; 944 945 if (!cpumask_subset(mask, cpu_online_mask)) { 946 dd_dev_warn(sde->dd, "Invalid CPU mask\n"); 947 ret = -EINVAL; 948 goto out_free; 949 } 950 951 sz = sizeof(struct sdma_rht_map_elem) + 952 (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *)); 953 954 mutex_lock(&process_to_sde_mutex); 955 956 for_each_cpu(cpu, mask) { 957 /* Check if we have this already mapped */ 958 if (cpumask_test_cpu(cpu, &sde->cpu_mask)) { 959 cpumask_set_cpu(cpu, new_mask); 960 continue; 961 } 962 963 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, 964 sdma_rht_params); 965 if (!rht_node) { 966 rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL); 967 if (!rht_node) { 968 ret = -ENOMEM; 969 goto out; 970 } 971 972 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); 973 if (!rht_node->map[vl]) { 974 kfree(rht_node); 975 ret = -ENOMEM; 976 goto out; 977 } 978 rht_node->cpu_id = cpu; 979 rht_node->map[vl]->mask = 0; 980 rht_node->map[vl]->ctr = 1; 981 rht_node->map[vl]->sde[0] = sde; 982 983 ret = rhashtable_insert_fast(dd->sdma_rht, 984 &rht_node->node, 985 sdma_rht_params); 986 if (ret) { 987 kfree(rht_node->map[vl]); 988 kfree(rht_node); 989 dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n", 990 cpu); 991 goto out; 992 } 993 994 } else { 995 int ctr, pow; 996 997 /* Add new user mappings */ 998 if (!rht_node->map[vl]) 999 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); 1000 1001 if (!rht_node->map[vl]) { 1002 ret = -ENOMEM; 1003 goto out; 1004 } 1005 1006 rht_node->map[vl]->ctr++; 1007 ctr = rht_node->map[vl]->ctr; 1008 rht_node->map[vl]->sde[ctr - 1] = sde; 1009 pow = roundup_pow_of_two(ctr); 1010 rht_node->map[vl]->mask = pow - 1; 1011 1012 /* Populate the sde map table */ 1013 sdma_populate_sde_map(rht_node->map[vl]); 1014 } 1015 cpumask_set_cpu(cpu, new_mask); 1016 } 1017 1018 /* Clean up old mappings */ 1019 for_each_cpu(cpu, cpu_online_mask) { 1020 struct sdma_rht_node *rht_node; 1021 1022 /* Don't cleanup sdes that are set in the new mask */ 1023 if (cpumask_test_cpu(cpu, mask)) 1024 continue; 1025 1026 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, 1027 sdma_rht_params); 1028 if (rht_node) { 1029 bool empty = true; 1030 int i; 1031 1032 /* Remove mappings for old sde */ 1033 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) 1034 if (rht_node->map[i]) 1035 sdma_cleanup_sde_map(rht_node->map[i], 1036 sde); 1037 1038 /* Free empty hash table entries */ 1039 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { 1040 if (!rht_node->map[i]) 1041 continue; 1042 1043 if (rht_node->map[i]->ctr) { 1044 empty = false; 1045 break; 1046 } 1047 } 1048 1049 if (empty) { 1050 ret = rhashtable_remove_fast(dd->sdma_rht, 1051 &rht_node->node, 1052 sdma_rht_params); 1053 WARN_ON(ret); 1054 1055 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) 1056 kfree(rht_node->map[i]); 1057 1058 kfree(rht_node); 1059 } 1060 } 1061 } 1062 1063 cpumask_copy(&sde->cpu_mask, new_mask); 1064 out: 1065 mutex_unlock(&process_to_sde_mutex); 1066 out_free: 1067 free_cpumask_var(mask); 1068 free_cpumask_var(new_mask); 1069 return ret ? : strnlen(buf, PAGE_SIZE); 1070 } 1071 1072 ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf) 1073 { 1074 mutex_lock(&process_to_sde_mutex); 1075 if (cpumask_empty(&sde->cpu_mask)) 1076 snprintf(buf, PAGE_SIZE, "%s\n", "empty"); 1077 else 1078 cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask); 1079 mutex_unlock(&process_to_sde_mutex); 1080 return strnlen(buf, PAGE_SIZE); 1081 } 1082 1083 static void sdma_rht_free(void *ptr, void *arg) 1084 { 1085 struct sdma_rht_node *rht_node = ptr; 1086 int i; 1087 1088 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) 1089 kfree(rht_node->map[i]); 1090 1091 kfree(rht_node); 1092 } 1093 1094 /** 1095 * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings 1096 * @s: seq file 1097 * @dd: hfi1_devdata 1098 * @cpuid: cpu id 1099 * 1100 * This routine dumps the process to sde mappings per cpu 1101 */ 1102 void sdma_seqfile_dump_cpu_list(struct seq_file *s, 1103 struct hfi1_devdata *dd, 1104 unsigned long cpuid) 1105 { 1106 struct sdma_rht_node *rht_node; 1107 int i, j; 1108 1109 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid, 1110 sdma_rht_params); 1111 if (!rht_node) 1112 return; 1113 1114 seq_printf(s, "cpu%3lu: ", cpuid); 1115 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { 1116 if (!rht_node->map[i] || !rht_node->map[i]->ctr) 1117 continue; 1118 1119 seq_printf(s, " vl%d: [", i); 1120 1121 for (j = 0; j < rht_node->map[i]->ctr; j++) { 1122 if (!rht_node->map[i]->sde[j]) 1123 continue; 1124 1125 if (j > 0) 1126 seq_puts(s, ","); 1127 1128 seq_printf(s, " sdma%2d", 1129 rht_node->map[i]->sde[j]->this_idx); 1130 } 1131 seq_puts(s, " ]"); 1132 } 1133 1134 seq_puts(s, "\n"); 1135 } 1136 1137 /* 1138 * Free the indicated map struct 1139 */ 1140 static void sdma_map_free(struct sdma_vl_map *m) 1141 { 1142 int i; 1143 1144 for (i = 0; m && i < m->actual_vls; i++) 1145 kfree(m->map[i]); 1146 kfree(m); 1147 } 1148 1149 /* 1150 * Handle RCU callback 1151 */ 1152 static void sdma_map_rcu_callback(struct rcu_head *list) 1153 { 1154 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list); 1155 1156 sdma_map_free(m); 1157 } 1158 1159 /** 1160 * sdma_map_init - called when # vls change 1161 * @dd: hfi1_devdata 1162 * @port: port number 1163 * @num_vls: number of vls 1164 * @vl_engines: per vl engine mapping (optional) 1165 * 1166 * This routine changes the mapping based on the number of vls. 1167 * 1168 * vl_engines is used to specify a non-uniform vl/engine loading. NULL 1169 * implies auto computing the loading and giving each VLs a uniform 1170 * distribution of engines per VL. 1171 * 1172 * The auto algorithm computes the sde_per_vl and the number of extra 1173 * engines. Any extra engines are added from the last VL on down. 1174 * 1175 * rcu locking is used here to control access to the mapping fields. 1176 * 1177 * If either the num_vls or num_sdma are non-power of 2, the array sizes 1178 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded 1179 * up to the next highest power of 2 and the first entry is reused 1180 * in a round robin fashion. 1181 * 1182 * If an error occurs the map change is not done and the mapping is 1183 * not changed. 1184 * 1185 */ 1186 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines) 1187 { 1188 int i, j; 1189 int extra, sde_per_vl; 1190 int engine = 0; 1191 u8 lvl_engines[OPA_MAX_VLS]; 1192 struct sdma_vl_map *oldmap, *newmap; 1193 1194 if (!(dd->flags & HFI1_HAS_SEND_DMA)) 1195 return 0; 1196 1197 if (!vl_engines) { 1198 /* truncate divide */ 1199 sde_per_vl = dd->num_sdma / num_vls; 1200 /* extras */ 1201 extra = dd->num_sdma % num_vls; 1202 vl_engines = lvl_engines; 1203 /* add extras from last vl down */ 1204 for (i = num_vls - 1; i >= 0; i--, extra--) 1205 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0); 1206 } 1207 /* build new map */ 1208 newmap = kzalloc( 1209 sizeof(struct sdma_vl_map) + 1210 roundup_pow_of_two(num_vls) * 1211 sizeof(struct sdma_map_elem *), 1212 GFP_KERNEL); 1213 if (!newmap) 1214 goto bail; 1215 newmap->actual_vls = num_vls; 1216 newmap->vls = roundup_pow_of_two(num_vls); 1217 newmap->mask = (1 << ilog2(newmap->vls)) - 1; 1218 /* initialize back-map */ 1219 for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++) 1220 newmap->engine_to_vl[i] = -1; 1221 for (i = 0; i < newmap->vls; i++) { 1222 /* save for wrap around */ 1223 int first_engine = engine; 1224 1225 if (i < newmap->actual_vls) { 1226 int sz = roundup_pow_of_two(vl_engines[i]); 1227 1228 /* only allocate once */ 1229 newmap->map[i] = kzalloc( 1230 sizeof(struct sdma_map_elem) + 1231 sz * sizeof(struct sdma_engine *), 1232 GFP_KERNEL); 1233 if (!newmap->map[i]) 1234 goto bail; 1235 newmap->map[i]->mask = (1 << ilog2(sz)) - 1; 1236 /* assign engines */ 1237 for (j = 0; j < sz; j++) { 1238 newmap->map[i]->sde[j] = 1239 &dd->per_sdma[engine]; 1240 if (++engine >= first_engine + vl_engines[i]) 1241 /* wrap back to first engine */ 1242 engine = first_engine; 1243 } 1244 /* assign back-map */ 1245 for (j = 0; j < vl_engines[i]; j++) 1246 newmap->engine_to_vl[first_engine + j] = i; 1247 } else { 1248 /* just re-use entry without allocating */ 1249 newmap->map[i] = newmap->map[i % num_vls]; 1250 } 1251 engine = first_engine + vl_engines[i]; 1252 } 1253 /* newmap in hand, save old map */ 1254 spin_lock_irq(&dd->sde_map_lock); 1255 oldmap = rcu_dereference_protected(dd->sdma_map, 1256 lockdep_is_held(&dd->sde_map_lock)); 1257 1258 /* publish newmap */ 1259 rcu_assign_pointer(dd->sdma_map, newmap); 1260 1261 spin_unlock_irq(&dd->sde_map_lock); 1262 /* success, free any old map after grace period */ 1263 if (oldmap) 1264 call_rcu(&oldmap->list, sdma_map_rcu_callback); 1265 return 0; 1266 bail: 1267 /* free any partial allocation */ 1268 sdma_map_free(newmap); 1269 return -ENOMEM; 1270 } 1271 1272 /** 1273 * sdma_clean() Clean up allocated memory 1274 * @dd: struct hfi1_devdata 1275 * @num_engines: num sdma engines 1276 * 1277 * This routine can be called regardless of the success of 1278 * sdma_init() 1279 */ 1280 void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) 1281 { 1282 size_t i; 1283 struct sdma_engine *sde; 1284 1285 if (dd->sdma_pad_dma) { 1286 dma_free_coherent(&dd->pcidev->dev, 4, 1287 (void *)dd->sdma_pad_dma, 1288 dd->sdma_pad_phys); 1289 dd->sdma_pad_dma = NULL; 1290 dd->sdma_pad_phys = 0; 1291 } 1292 if (dd->sdma_heads_dma) { 1293 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size, 1294 (void *)dd->sdma_heads_dma, 1295 dd->sdma_heads_phys); 1296 dd->sdma_heads_dma = NULL; 1297 dd->sdma_heads_phys = 0; 1298 } 1299 for (i = 0; dd->per_sdma && i < num_engines; ++i) { 1300 sde = &dd->per_sdma[i]; 1301 1302 sde->head_dma = NULL; 1303 sde->head_phys = 0; 1304 1305 if (sde->descq) { 1306 dma_free_coherent( 1307 &dd->pcidev->dev, 1308 sde->descq_cnt * sizeof(u64[2]), 1309 sde->descq, 1310 sde->descq_phys 1311 ); 1312 sde->descq = NULL; 1313 sde->descq_phys = 0; 1314 } 1315 kvfree(sde->tx_ring); 1316 sde->tx_ring = NULL; 1317 } 1318 spin_lock_irq(&dd->sde_map_lock); 1319 sdma_map_free(rcu_access_pointer(dd->sdma_map)); 1320 RCU_INIT_POINTER(dd->sdma_map, NULL); 1321 spin_unlock_irq(&dd->sde_map_lock); 1322 synchronize_rcu(); 1323 kfree(dd->per_sdma); 1324 dd->per_sdma = NULL; 1325 1326 if (dd->sdma_rht) { 1327 rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL); 1328 kfree(dd->sdma_rht); 1329 dd->sdma_rht = NULL; 1330 } 1331 } 1332 1333 /** 1334 * sdma_init() - called when device probed 1335 * @dd: hfi1_devdata 1336 * @port: port number (currently only zero) 1337 * 1338 * Initializes each sde and its csrs. 1339 * Interrupts are not required to be enabled. 1340 * 1341 * Returns: 1342 * 0 - success, -errno on failure 1343 */ 1344 int sdma_init(struct hfi1_devdata *dd, u8 port) 1345 { 1346 unsigned this_idx; 1347 struct sdma_engine *sde; 1348 struct rhashtable *tmp_sdma_rht; 1349 u16 descq_cnt; 1350 void *curr_head; 1351 struct hfi1_pportdata *ppd = dd->pport + port; 1352 u32 per_sdma_credits; 1353 uint idle_cnt = sdma_idle_cnt; 1354 size_t num_engines = dd->chip_sdma_engines; 1355 int ret = -ENOMEM; 1356 1357 if (!HFI1_CAP_IS_KSET(SDMA)) { 1358 HFI1_CAP_CLEAR(SDMA_AHG); 1359 return 0; 1360 } 1361 if (mod_num_sdma && 1362 /* can't exceed chip support */ 1363 mod_num_sdma <= dd->chip_sdma_engines && 1364 /* count must be >= vls */ 1365 mod_num_sdma >= num_vls) 1366 num_engines = mod_num_sdma; 1367 1368 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma); 1369 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines); 1370 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n", 1371 dd->chip_sdma_mem_size); 1372 1373 per_sdma_credits = 1374 dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE); 1375 1376 /* set up freeze waitqueue */ 1377 init_waitqueue_head(&dd->sdma_unfreeze_wq); 1378 atomic_set(&dd->sdma_unfreeze_count, 0); 1379 1380 descq_cnt = sdma_get_descq_cnt(); 1381 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n", 1382 num_engines, descq_cnt); 1383 1384 /* alloc memory for array of send engines */ 1385 dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma), 1386 GFP_KERNEL, dd->node); 1387 if (!dd->per_sdma) 1388 return ret; 1389 1390 idle_cnt = ns_to_cclock(dd, idle_cnt); 1391 if (idle_cnt) 1392 dd->default_desc1 = 1393 SDMA_DESC1_HEAD_TO_HOST_FLAG; 1394 else 1395 dd->default_desc1 = 1396 SDMA_DESC1_INT_REQ_FLAG; 1397 1398 if (!sdma_desct_intr) 1399 sdma_desct_intr = SDMA_DESC_INTR; 1400 1401 /* Allocate memory for SendDMA descriptor FIFOs */ 1402 for (this_idx = 0; this_idx < num_engines; ++this_idx) { 1403 sde = &dd->per_sdma[this_idx]; 1404 sde->dd = dd; 1405 sde->ppd = ppd; 1406 sde->this_idx = this_idx; 1407 sde->descq_cnt = descq_cnt; 1408 sde->desc_avail = sdma_descq_freecnt(sde); 1409 sde->sdma_shift = ilog2(descq_cnt); 1410 sde->sdma_mask = (1 << sde->sdma_shift) - 1; 1411 1412 /* Create a mask specifically for each interrupt source */ 1413 sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES + 1414 this_idx); 1415 sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES + 1416 this_idx); 1417 sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES + 1418 this_idx); 1419 /* Create a combined mask to cover all 3 interrupt sources */ 1420 sde->imask = sde->int_mask | sde->progress_mask | 1421 sde->idle_mask; 1422 1423 spin_lock_init(&sde->tail_lock); 1424 seqlock_init(&sde->head_lock); 1425 spin_lock_init(&sde->senddmactrl_lock); 1426 spin_lock_init(&sde->flushlist_lock); 1427 /* insure there is always a zero bit */ 1428 sde->ahg_bits = 0xfffffffe00000000ULL; 1429 1430 sdma_set_state(sde, sdma_state_s00_hw_down); 1431 1432 /* set up reference counting */ 1433 kref_init(&sde->state.kref); 1434 init_completion(&sde->state.comp); 1435 1436 INIT_LIST_HEAD(&sde->flushlist); 1437 INIT_LIST_HEAD(&sde->dmawait); 1438 1439 sde->tail_csr = 1440 get_kctxt_csr_addr(dd, this_idx, SD(TAIL)); 1441 1442 tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task, 1443 (unsigned long)sde); 1444 1445 tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task, 1446 (unsigned long)sde); 1447 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); 1448 INIT_WORK(&sde->flush_worker, sdma_field_flush); 1449 1450 sde->progress_check_head = 0; 1451 1452 timer_setup(&sde->err_progress_check_timer, 1453 sdma_err_progress_check, 0); 1454 1455 sde->descq = dma_zalloc_coherent( 1456 &dd->pcidev->dev, 1457 descq_cnt * sizeof(u64[2]), 1458 &sde->descq_phys, 1459 GFP_KERNEL 1460 ); 1461 if (!sde->descq) 1462 goto bail; 1463 sde->tx_ring = 1464 kvzalloc_node(sizeof(struct sdma_txreq *) * descq_cnt, 1465 GFP_KERNEL, dd->node); 1466 if (!sde->tx_ring) 1467 goto bail; 1468 } 1469 1470 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; 1471 /* Allocate memory for DMA of head registers to memory */ 1472 dd->sdma_heads_dma = dma_zalloc_coherent( 1473 &dd->pcidev->dev, 1474 dd->sdma_heads_size, 1475 &dd->sdma_heads_phys, 1476 GFP_KERNEL 1477 ); 1478 if (!dd->sdma_heads_dma) { 1479 dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); 1480 goto bail; 1481 } 1482 1483 /* Allocate memory for pad */ 1484 dd->sdma_pad_dma = dma_zalloc_coherent( 1485 &dd->pcidev->dev, 1486 sizeof(u32), 1487 &dd->sdma_pad_phys, 1488 GFP_KERNEL 1489 ); 1490 if (!dd->sdma_pad_dma) { 1491 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); 1492 goto bail; 1493 } 1494 1495 /* assign each engine to different cacheline and init registers */ 1496 curr_head = (void *)dd->sdma_heads_dma; 1497 for (this_idx = 0; this_idx < num_engines; ++this_idx) { 1498 unsigned long phys_offset; 1499 1500 sde = &dd->per_sdma[this_idx]; 1501 1502 sde->head_dma = curr_head; 1503 curr_head += L1_CACHE_BYTES; 1504 phys_offset = (unsigned long)sde->head_dma - 1505 (unsigned long)dd->sdma_heads_dma; 1506 sde->head_phys = dd->sdma_heads_phys + phys_offset; 1507 init_sdma_regs(sde, per_sdma_credits, idle_cnt); 1508 } 1509 dd->flags |= HFI1_HAS_SEND_DMA; 1510 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0; 1511 dd->num_sdma = num_engines; 1512 ret = sdma_map_init(dd, port, ppd->vls_operational, NULL); 1513 if (ret < 0) 1514 goto bail; 1515 1516 tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL); 1517 if (!tmp_sdma_rht) { 1518 ret = -ENOMEM; 1519 goto bail; 1520 } 1521 1522 ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params); 1523 if (ret < 0) 1524 goto bail; 1525 dd->sdma_rht = tmp_sdma_rht; 1526 1527 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma); 1528 return 0; 1529 1530 bail: 1531 sdma_clean(dd, num_engines); 1532 return ret; 1533 } 1534 1535 /** 1536 * sdma_all_running() - called when the link goes up 1537 * @dd: hfi1_devdata 1538 * 1539 * This routine moves all engines to the running state. 1540 */ 1541 void sdma_all_running(struct hfi1_devdata *dd) 1542 { 1543 struct sdma_engine *sde; 1544 unsigned int i; 1545 1546 /* move all engines to running */ 1547 for (i = 0; i < dd->num_sdma; ++i) { 1548 sde = &dd->per_sdma[i]; 1549 sdma_process_event(sde, sdma_event_e30_go_running); 1550 } 1551 } 1552 1553 /** 1554 * sdma_all_idle() - called when the link goes down 1555 * @dd: hfi1_devdata 1556 * 1557 * This routine moves all engines to the idle state. 1558 */ 1559 void sdma_all_idle(struct hfi1_devdata *dd) 1560 { 1561 struct sdma_engine *sde; 1562 unsigned int i; 1563 1564 /* idle all engines */ 1565 for (i = 0; i < dd->num_sdma; ++i) { 1566 sde = &dd->per_sdma[i]; 1567 sdma_process_event(sde, sdma_event_e70_go_idle); 1568 } 1569 } 1570 1571 /** 1572 * sdma_start() - called to kick off state processing for all engines 1573 * @dd: hfi1_devdata 1574 * 1575 * This routine is for kicking off the state processing for all required 1576 * sdma engines. Interrupts need to be working at this point. 1577 * 1578 */ 1579 void sdma_start(struct hfi1_devdata *dd) 1580 { 1581 unsigned i; 1582 struct sdma_engine *sde; 1583 1584 /* kick off the engines state processing */ 1585 for (i = 0; i < dd->num_sdma; ++i) { 1586 sde = &dd->per_sdma[i]; 1587 sdma_process_event(sde, sdma_event_e10_go_hw_start); 1588 } 1589 } 1590 1591 /** 1592 * sdma_exit() - used when module is removed 1593 * @dd: hfi1_devdata 1594 */ 1595 void sdma_exit(struct hfi1_devdata *dd) 1596 { 1597 unsigned this_idx; 1598 struct sdma_engine *sde; 1599 1600 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma; 1601 ++this_idx) { 1602 sde = &dd->per_sdma[this_idx]; 1603 if (!list_empty(&sde->dmawait)) 1604 dd_dev_err(dd, "sde %u: dmawait list not empty!\n", 1605 sde->this_idx); 1606 sdma_process_event(sde, sdma_event_e00_go_hw_down); 1607 1608 del_timer_sync(&sde->err_progress_check_timer); 1609 1610 /* 1611 * This waits for the state machine to exit so it is not 1612 * necessary to kill the sdma_sw_clean_up_task to make sure 1613 * it is not running. 1614 */ 1615 sdma_finalput(&sde->state); 1616 } 1617 } 1618 1619 /* 1620 * unmap the indicated descriptor 1621 */ 1622 static inline void sdma_unmap_desc( 1623 struct hfi1_devdata *dd, 1624 struct sdma_desc *descp) 1625 { 1626 switch (sdma_mapping_type(descp)) { 1627 case SDMA_MAP_SINGLE: 1628 dma_unmap_single( 1629 &dd->pcidev->dev, 1630 sdma_mapping_addr(descp), 1631 sdma_mapping_len(descp), 1632 DMA_TO_DEVICE); 1633 break; 1634 case SDMA_MAP_PAGE: 1635 dma_unmap_page( 1636 &dd->pcidev->dev, 1637 sdma_mapping_addr(descp), 1638 sdma_mapping_len(descp), 1639 DMA_TO_DEVICE); 1640 break; 1641 } 1642 } 1643 1644 /* 1645 * return the mode as indicated by the first 1646 * descriptor in the tx. 1647 */ 1648 static inline u8 ahg_mode(struct sdma_txreq *tx) 1649 { 1650 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK) 1651 >> SDMA_DESC1_HEADER_MODE_SHIFT; 1652 } 1653 1654 /** 1655 * __sdma_txclean() - clean tx of mappings, descp *kmalloc's 1656 * @dd: hfi1_devdata for unmapping 1657 * @tx: tx request to clean 1658 * 1659 * This is used in the progress routine to clean the tx or 1660 * by the ULP to toss an in-process tx build. 1661 * 1662 * The code can be called multiple times without issue. 1663 * 1664 */ 1665 void __sdma_txclean( 1666 struct hfi1_devdata *dd, 1667 struct sdma_txreq *tx) 1668 { 1669 u16 i; 1670 1671 if (tx->num_desc) { 1672 u8 skip = 0, mode = ahg_mode(tx); 1673 1674 /* unmap first */ 1675 sdma_unmap_desc(dd, &tx->descp[0]); 1676 /* determine number of AHG descriptors to skip */ 1677 if (mode > SDMA_AHG_APPLY_UPDATE1) 1678 skip = mode >> 1; 1679 for (i = 1 + skip; i < tx->num_desc; i++) 1680 sdma_unmap_desc(dd, &tx->descp[i]); 1681 tx->num_desc = 0; 1682 } 1683 kfree(tx->coalesce_buf); 1684 tx->coalesce_buf = NULL; 1685 /* kmalloc'ed descp */ 1686 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) { 1687 tx->desc_limit = ARRAY_SIZE(tx->descs); 1688 kfree(tx->descp); 1689 } 1690 } 1691 1692 static inline u16 sdma_gethead(struct sdma_engine *sde) 1693 { 1694 struct hfi1_devdata *dd = sde->dd; 1695 int use_dmahead; 1696 u16 hwhead; 1697 1698 #ifdef CONFIG_SDMA_VERBOSITY 1699 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 1700 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 1701 #endif 1702 1703 retry: 1704 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) && 1705 (dd->flags & HFI1_HAS_SDMA_TIMEOUT); 1706 hwhead = use_dmahead ? 1707 (u16)le64_to_cpu(*sde->head_dma) : 1708 (u16)read_sde_csr(sde, SD(HEAD)); 1709 1710 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) { 1711 u16 cnt; 1712 u16 swtail; 1713 u16 swhead; 1714 int sane; 1715 1716 swhead = sde->descq_head & sde->sdma_mask; 1717 /* this code is really bad for cache line trading */ 1718 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; 1719 cnt = sde->descq_cnt; 1720 1721 if (swhead < swtail) 1722 /* not wrapped */ 1723 sane = (hwhead >= swhead) & (hwhead <= swtail); 1724 else if (swhead > swtail) 1725 /* wrapped around */ 1726 sane = ((hwhead >= swhead) && (hwhead < cnt)) || 1727 (hwhead <= swtail); 1728 else 1729 /* empty */ 1730 sane = (hwhead == swhead); 1731 1732 if (unlikely(!sane)) { 1733 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n", 1734 sde->this_idx, 1735 use_dmahead ? "dma" : "kreg", 1736 hwhead, swhead, swtail, cnt); 1737 if (use_dmahead) { 1738 /* try one more time, using csr */ 1739 use_dmahead = 0; 1740 goto retry; 1741 } 1742 /* proceed as if no progress */ 1743 hwhead = swhead; 1744 } 1745 } 1746 return hwhead; 1747 } 1748 1749 /* 1750 * This is called when there are send DMA descriptors that might be 1751 * available. 1752 * 1753 * This is called with head_lock held. 1754 */ 1755 static void sdma_desc_avail(struct sdma_engine *sde, uint avail) 1756 { 1757 struct iowait *wait, *nw; 1758 struct iowait *waits[SDMA_WAIT_BATCH_SIZE]; 1759 uint i, n = 0, seq, max_idx = 0; 1760 struct sdma_txreq *stx; 1761 struct hfi1_ibdev *dev = &sde->dd->verbs_dev; 1762 u8 max_starved_cnt = 0; 1763 1764 #ifdef CONFIG_SDMA_VERBOSITY 1765 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 1766 slashstrip(__FILE__), __LINE__, __func__); 1767 dd_dev_err(sde->dd, "avail: %u\n", avail); 1768 #endif 1769 1770 do { 1771 seq = read_seqbegin(&dev->iowait_lock); 1772 if (!list_empty(&sde->dmawait)) { 1773 /* at least one item */ 1774 write_seqlock(&dev->iowait_lock); 1775 /* Harvest waiters wanting DMA descriptors */ 1776 list_for_each_entry_safe( 1777 wait, 1778 nw, 1779 &sde->dmawait, 1780 list) { 1781 u16 num_desc = 0; 1782 1783 if (!wait->wakeup) 1784 continue; 1785 if (n == ARRAY_SIZE(waits)) 1786 break; 1787 if (!list_empty(&wait->tx_head)) { 1788 stx = list_first_entry( 1789 &wait->tx_head, 1790 struct sdma_txreq, 1791 list); 1792 num_desc = stx->num_desc; 1793 } 1794 if (num_desc > avail) 1795 break; 1796 avail -= num_desc; 1797 /* Find the most starved wait memeber */ 1798 iowait_starve_find_max(wait, &max_starved_cnt, 1799 n, &max_idx); 1800 list_del_init(&wait->list); 1801 waits[n++] = wait; 1802 } 1803 write_sequnlock(&dev->iowait_lock); 1804 break; 1805 } 1806 } while (read_seqretry(&dev->iowait_lock, seq)); 1807 1808 /* Schedule the most starved one first */ 1809 if (n) 1810 waits[max_idx]->wakeup(waits[max_idx], SDMA_AVAIL_REASON); 1811 1812 for (i = 0; i < n; i++) 1813 if (i != max_idx) 1814 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON); 1815 } 1816 1817 /* head_lock must be held */ 1818 static void sdma_make_progress(struct sdma_engine *sde, u64 status) 1819 { 1820 struct sdma_txreq *txp = NULL; 1821 int progress = 0; 1822 u16 hwhead, swhead; 1823 int idle_check_done = 0; 1824 1825 hwhead = sdma_gethead(sde); 1826 1827 /* The reason for some of the complexity of this code is that 1828 * not all descriptors have corresponding txps. So, we have to 1829 * be able to skip over descs until we wander into the range of 1830 * the next txp on the list. 1831 */ 1832 1833 retry: 1834 txp = get_txhead(sde); 1835 swhead = sde->descq_head & sde->sdma_mask; 1836 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); 1837 while (swhead != hwhead) { 1838 /* advance head, wrap if needed */ 1839 swhead = ++sde->descq_head & sde->sdma_mask; 1840 1841 /* if now past this txp's descs, do the callback */ 1842 if (txp && txp->next_descq_idx == swhead) { 1843 /* remove from list */ 1844 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; 1845 complete_tx(sde, txp, SDMA_TXREQ_S_OK); 1846 /* see if there is another txp */ 1847 txp = get_txhead(sde); 1848 } 1849 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); 1850 progress++; 1851 } 1852 1853 /* 1854 * The SDMA idle interrupt is not guaranteed to be ordered with respect 1855 * to updates to the the dma_head location in host memory. The head 1856 * value read might not be fully up to date. If there are pending 1857 * descriptors and the SDMA idle interrupt fired then read from the 1858 * CSR SDMA head instead to get the latest value from the hardware. 1859 * The hardware SDMA head should be read at most once in this invocation 1860 * of sdma_make_progress(..) which is ensured by idle_check_done flag 1861 */ 1862 if ((status & sde->idle_mask) && !idle_check_done) { 1863 u16 swtail; 1864 1865 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; 1866 if (swtail != hwhead) { 1867 hwhead = (u16)read_sde_csr(sde, SD(HEAD)); 1868 idle_check_done = 1; 1869 goto retry; 1870 } 1871 } 1872 1873 sde->last_status = status; 1874 if (progress) 1875 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); 1876 } 1877 1878 /* 1879 * sdma_engine_interrupt() - interrupt handler for engine 1880 * @sde: sdma engine 1881 * @status: sdma interrupt reason 1882 * 1883 * Status is a mask of the 3 possible interrupts for this engine. It will 1884 * contain bits _only_ for this SDMA engine. It will contain at least one 1885 * bit, it may contain more. 1886 */ 1887 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status) 1888 { 1889 trace_hfi1_sdma_engine_interrupt(sde, status); 1890 write_seqlock(&sde->head_lock); 1891 sdma_set_desc_cnt(sde, sdma_desct_intr); 1892 if (status & sde->idle_mask) 1893 sde->idle_int_cnt++; 1894 else if (status & sde->progress_mask) 1895 sde->progress_int_cnt++; 1896 else if (status & sde->int_mask) 1897 sde->sdma_int_cnt++; 1898 sdma_make_progress(sde, status); 1899 write_sequnlock(&sde->head_lock); 1900 } 1901 1902 /** 1903 * sdma_engine_error() - error handler for engine 1904 * @sde: sdma engine 1905 * @status: sdma interrupt reason 1906 */ 1907 void sdma_engine_error(struct sdma_engine *sde, u64 status) 1908 { 1909 unsigned long flags; 1910 1911 #ifdef CONFIG_SDMA_VERBOSITY 1912 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n", 1913 sde->this_idx, 1914 (unsigned long long)status, 1915 sdma_state_names[sde->state.current_state]); 1916 #endif 1917 spin_lock_irqsave(&sde->tail_lock, flags); 1918 write_seqlock(&sde->head_lock); 1919 if (status & ALL_SDMA_ENG_HALT_ERRS) 1920 __sdma_process_event(sde, sdma_event_e60_hw_halted); 1921 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) { 1922 dd_dev_err(sde->dd, 1923 "SDMA (%u) engine error: 0x%llx state %s\n", 1924 sde->this_idx, 1925 (unsigned long long)status, 1926 sdma_state_names[sde->state.current_state]); 1927 dump_sdma_state(sde); 1928 } 1929 write_sequnlock(&sde->head_lock); 1930 spin_unlock_irqrestore(&sde->tail_lock, flags); 1931 } 1932 1933 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op) 1934 { 1935 u64 set_senddmactrl = 0; 1936 u64 clr_senddmactrl = 0; 1937 unsigned long flags; 1938 1939 #ifdef CONFIG_SDMA_VERBOSITY 1940 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n", 1941 sde->this_idx, 1942 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0, 1943 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0, 1944 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0, 1945 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0); 1946 #endif 1947 1948 if (op & SDMA_SENDCTRL_OP_ENABLE) 1949 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); 1950 else 1951 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); 1952 1953 if (op & SDMA_SENDCTRL_OP_INTENABLE) 1954 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); 1955 else 1956 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); 1957 1958 if (op & SDMA_SENDCTRL_OP_HALT) 1959 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); 1960 else 1961 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); 1962 1963 spin_lock_irqsave(&sde->senddmactrl_lock, flags); 1964 1965 sde->p_senddmactrl |= set_senddmactrl; 1966 sde->p_senddmactrl &= ~clr_senddmactrl; 1967 1968 if (op & SDMA_SENDCTRL_OP_CLEANUP) 1969 write_sde_csr(sde, SD(CTRL), 1970 sde->p_senddmactrl | 1971 SD(CTRL_SDMA_CLEANUP_SMASK)); 1972 else 1973 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl); 1974 1975 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags); 1976 1977 #ifdef CONFIG_SDMA_VERBOSITY 1978 sdma_dumpstate(sde); 1979 #endif 1980 } 1981 1982 static void sdma_setlengen(struct sdma_engine *sde) 1983 { 1984 #ifdef CONFIG_SDMA_VERBOSITY 1985 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 1986 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 1987 #endif 1988 1989 /* 1990 * Set SendDmaLenGen and clear-then-set the MSB of the generation 1991 * count to enable generation checking and load the internal 1992 * generation counter. 1993 */ 1994 write_sde_csr(sde, SD(LEN_GEN), 1995 (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)); 1996 write_sde_csr(sde, SD(LEN_GEN), 1997 ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) | 1998 (4ULL << SD(LEN_GEN_GENERATION_SHIFT))); 1999 } 2000 2001 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail) 2002 { 2003 /* Commit writes to memory and advance the tail on the chip */ 2004 smp_wmb(); /* see get_txhead() */ 2005 writeq(tail, sde->tail_csr); 2006 } 2007 2008 /* 2009 * This is called when changing to state s10_hw_start_up_halt_wait as 2010 * a result of send buffer errors or send DMA descriptor errors. 2011 */ 2012 static void sdma_hw_start_up(struct sdma_engine *sde) 2013 { 2014 u64 reg; 2015 2016 #ifdef CONFIG_SDMA_VERBOSITY 2017 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 2018 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 2019 #endif 2020 2021 sdma_setlengen(sde); 2022 sdma_update_tail(sde, 0); /* Set SendDmaTail */ 2023 *sde->head_dma = 0; 2024 2025 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) << 2026 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT); 2027 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); 2028 } 2029 2030 /* 2031 * set_sdma_integrity 2032 * 2033 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'. 2034 */ 2035 static void set_sdma_integrity(struct sdma_engine *sde) 2036 { 2037 struct hfi1_devdata *dd = sde->dd; 2038 2039 write_sde_csr(sde, SD(CHECK_ENABLE), 2040 hfi1_pkt_base_sdma_integrity(dd)); 2041 } 2042 2043 static void init_sdma_regs( 2044 struct sdma_engine *sde, 2045 u32 credits, 2046 uint idle_cnt) 2047 { 2048 u8 opval, opmask; 2049 #ifdef CONFIG_SDMA_VERBOSITY 2050 struct hfi1_devdata *dd = sde->dd; 2051 2052 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", 2053 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 2054 #endif 2055 2056 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys); 2057 sdma_setlengen(sde); 2058 sdma_update_tail(sde, 0); /* Set SendDmaTail */ 2059 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt); 2060 write_sde_csr(sde, SD(DESC_CNT), 0); 2061 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys); 2062 write_sde_csr(sde, SD(MEMORY), 2063 ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) | 2064 ((u64)(credits * sde->this_idx) << 2065 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT))); 2066 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull); 2067 set_sdma_integrity(sde); 2068 opmask = OPCODE_CHECK_MASK_DISABLED; 2069 opval = OPCODE_CHECK_VAL_DISABLED; 2070 write_sde_csr(sde, SD(CHECK_OPCODE), 2071 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) | 2072 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT)); 2073 } 2074 2075 #ifdef CONFIG_SDMA_VERBOSITY 2076 2077 #define sdma_dumpstate_helper0(reg) do { \ 2078 csr = read_csr(sde->dd, reg); \ 2079 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \ 2080 } while (0) 2081 2082 #define sdma_dumpstate_helper(reg) do { \ 2083 csr = read_sde_csr(sde, reg); \ 2084 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \ 2085 #reg, sde->this_idx, csr); \ 2086 } while (0) 2087 2088 #define sdma_dumpstate_helper2(reg) do { \ 2089 csr = read_csr(sde->dd, reg + (8 * i)); \ 2090 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \ 2091 #reg, i, csr); \ 2092 } while (0) 2093 2094 void sdma_dumpstate(struct sdma_engine *sde) 2095 { 2096 u64 csr; 2097 unsigned i; 2098 2099 sdma_dumpstate_helper(SD(CTRL)); 2100 sdma_dumpstate_helper(SD(STATUS)); 2101 sdma_dumpstate_helper0(SD(ERR_STATUS)); 2102 sdma_dumpstate_helper0(SD(ERR_MASK)); 2103 sdma_dumpstate_helper(SD(ENG_ERR_STATUS)); 2104 sdma_dumpstate_helper(SD(ENG_ERR_MASK)); 2105 2106 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) { 2107 sdma_dumpstate_helper2(CCE_INT_STATUS); 2108 sdma_dumpstate_helper2(CCE_INT_MASK); 2109 sdma_dumpstate_helper2(CCE_INT_BLOCKED); 2110 } 2111 2112 sdma_dumpstate_helper(SD(TAIL)); 2113 sdma_dumpstate_helper(SD(HEAD)); 2114 sdma_dumpstate_helper(SD(PRIORITY_THLD)); 2115 sdma_dumpstate_helper(SD(IDLE_CNT)); 2116 sdma_dumpstate_helper(SD(RELOAD_CNT)); 2117 sdma_dumpstate_helper(SD(DESC_CNT)); 2118 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT)); 2119 sdma_dumpstate_helper(SD(MEMORY)); 2120 sdma_dumpstate_helper0(SD(ENGINES)); 2121 sdma_dumpstate_helper0(SD(MEM_SIZE)); 2122 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */ 2123 sdma_dumpstate_helper(SD(BASE_ADDR)); 2124 sdma_dumpstate_helper(SD(LEN_GEN)); 2125 sdma_dumpstate_helper(SD(HEAD_ADDR)); 2126 sdma_dumpstate_helper(SD(CHECK_ENABLE)); 2127 sdma_dumpstate_helper(SD(CHECK_VL)); 2128 sdma_dumpstate_helper(SD(CHECK_JOB_KEY)); 2129 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY)); 2130 sdma_dumpstate_helper(SD(CHECK_SLID)); 2131 sdma_dumpstate_helper(SD(CHECK_OPCODE)); 2132 } 2133 #endif 2134 2135 static void dump_sdma_state(struct sdma_engine *sde) 2136 { 2137 struct hw_sdma_desc *descqp; 2138 u64 desc[2]; 2139 u64 addr; 2140 u8 gen; 2141 u16 len; 2142 u16 head, tail, cnt; 2143 2144 head = sde->descq_head & sde->sdma_mask; 2145 tail = sde->descq_tail & sde->sdma_mask; 2146 cnt = sdma_descq_freecnt(sde); 2147 2148 dd_dev_err(sde->dd, 2149 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n", 2150 sde->this_idx, head, tail, cnt, 2151 !list_empty(&sde->flushlist)); 2152 2153 /* print info for each entry in the descriptor queue */ 2154 while (head != tail) { 2155 char flags[6] = { 'x', 'x', 'x', 'x', 0 }; 2156 2157 descqp = &sde->descq[head]; 2158 desc[0] = le64_to_cpu(descqp->qw[0]); 2159 desc[1] = le64_to_cpu(descqp->qw[1]); 2160 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; 2161 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 2162 'H' : '-'; 2163 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; 2164 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; 2165 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) 2166 & SDMA_DESC0_PHY_ADDR_MASK; 2167 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) 2168 & SDMA_DESC1_GENERATION_MASK; 2169 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) 2170 & SDMA_DESC0_BYTE_COUNT_MASK; 2171 dd_dev_err(sde->dd, 2172 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", 2173 head, flags, addr, gen, len); 2174 dd_dev_err(sde->dd, 2175 "\tdesc0:0x%016llx desc1 0x%016llx\n", 2176 desc[0], desc[1]); 2177 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) 2178 dd_dev_err(sde->dd, 2179 "\taidx: %u amode: %u alen: %u\n", 2180 (u8)((desc[1] & 2181 SDMA_DESC1_HEADER_INDEX_SMASK) >> 2182 SDMA_DESC1_HEADER_INDEX_SHIFT), 2183 (u8)((desc[1] & 2184 SDMA_DESC1_HEADER_MODE_SMASK) >> 2185 SDMA_DESC1_HEADER_MODE_SHIFT), 2186 (u8)((desc[1] & 2187 SDMA_DESC1_HEADER_DWS_SMASK) >> 2188 SDMA_DESC1_HEADER_DWS_SHIFT)); 2189 head++; 2190 head &= sde->sdma_mask; 2191 } 2192 } 2193 2194 #define SDE_FMT \ 2195 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n" 2196 /** 2197 * sdma_seqfile_dump_sde() - debugfs dump of sde 2198 * @s: seq file 2199 * @sde: send dma engine to dump 2200 * 2201 * This routine dumps the sde to the indicated seq file. 2202 */ 2203 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) 2204 { 2205 u16 head, tail; 2206 struct hw_sdma_desc *descqp; 2207 u64 desc[2]; 2208 u64 addr; 2209 u8 gen; 2210 u16 len; 2211 2212 head = sde->descq_head & sde->sdma_mask; 2213 tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; 2214 seq_printf(s, SDE_FMT, sde->this_idx, 2215 sde->cpu, 2216 sdma_state_name(sde->state.current_state), 2217 (unsigned long long)read_sde_csr(sde, SD(CTRL)), 2218 (unsigned long long)read_sde_csr(sde, SD(STATUS)), 2219 (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)), 2220 (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail, 2221 (unsigned long long)read_sde_csr(sde, SD(HEAD)), head, 2222 (unsigned long long)le64_to_cpu(*sde->head_dma), 2223 (unsigned long long)read_sde_csr(sde, SD(MEMORY)), 2224 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)), 2225 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)), 2226 (unsigned long long)sde->last_status, 2227 (unsigned long long)sde->ahg_bits, 2228 sde->tx_tail, 2229 sde->tx_head, 2230 sde->descq_tail, 2231 sde->descq_head, 2232 !list_empty(&sde->flushlist), 2233 sde->descq_full_count, 2234 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID)); 2235 2236 /* print info for each entry in the descriptor queue */ 2237 while (head != tail) { 2238 char flags[6] = { 'x', 'x', 'x', 'x', 0 }; 2239 2240 descqp = &sde->descq[head]; 2241 desc[0] = le64_to_cpu(descqp->qw[0]); 2242 desc[1] = le64_to_cpu(descqp->qw[1]); 2243 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; 2244 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 2245 'H' : '-'; 2246 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; 2247 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; 2248 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) 2249 & SDMA_DESC0_PHY_ADDR_MASK; 2250 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) 2251 & SDMA_DESC1_GENERATION_MASK; 2252 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) 2253 & SDMA_DESC0_BYTE_COUNT_MASK; 2254 seq_printf(s, 2255 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", 2256 head, flags, addr, gen, len); 2257 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) 2258 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n", 2259 (u8)((desc[1] & 2260 SDMA_DESC1_HEADER_INDEX_SMASK) >> 2261 SDMA_DESC1_HEADER_INDEX_SHIFT), 2262 (u8)((desc[1] & 2263 SDMA_DESC1_HEADER_MODE_SMASK) >> 2264 SDMA_DESC1_HEADER_MODE_SHIFT)); 2265 head = (head + 1) & sde->sdma_mask; 2266 } 2267 } 2268 2269 /* 2270 * add the generation number into 2271 * the qw1 and return 2272 */ 2273 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1) 2274 { 2275 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3; 2276 2277 qw1 &= ~SDMA_DESC1_GENERATION_SMASK; 2278 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK) 2279 << SDMA_DESC1_GENERATION_SHIFT; 2280 return qw1; 2281 } 2282 2283 /* 2284 * This routine submits the indicated tx 2285 * 2286 * Space has already been guaranteed and 2287 * tail side of ring is locked. 2288 * 2289 * The hardware tail update is done 2290 * in the caller and that is facilitated 2291 * by returning the new tail. 2292 * 2293 * There is special case logic for ahg 2294 * to not add the generation number for 2295 * up to 2 descriptors that follow the 2296 * first descriptor. 2297 * 2298 */ 2299 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx) 2300 { 2301 int i; 2302 u16 tail; 2303 struct sdma_desc *descp = tx->descp; 2304 u8 skip = 0, mode = ahg_mode(tx); 2305 2306 tail = sde->descq_tail & sde->sdma_mask; 2307 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); 2308 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1])); 2309 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1], 2310 tail, &sde->descq[tail]); 2311 tail = ++sde->descq_tail & sde->sdma_mask; 2312 descp++; 2313 if (mode > SDMA_AHG_APPLY_UPDATE1) 2314 skip = mode >> 1; 2315 for (i = 1; i < tx->num_desc; i++, descp++) { 2316 u64 qw1; 2317 2318 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); 2319 if (skip) { 2320 /* edits don't have generation */ 2321 qw1 = descp->qw[1]; 2322 skip--; 2323 } else { 2324 /* replace generation with real one for non-edits */ 2325 qw1 = add_gen(sde, descp->qw[1]); 2326 } 2327 sde->descq[tail].qw[1] = cpu_to_le64(qw1); 2328 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1, 2329 tail, &sde->descq[tail]); 2330 tail = ++sde->descq_tail & sde->sdma_mask; 2331 } 2332 tx->next_descq_idx = tail; 2333 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 2334 tx->sn = sde->tail_sn++; 2335 trace_hfi1_sdma_in_sn(sde, tx->sn); 2336 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]); 2337 #endif 2338 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx; 2339 sde->desc_avail -= tx->num_desc; 2340 return tail; 2341 } 2342 2343 /* 2344 * Check for progress 2345 */ 2346 static int sdma_check_progress( 2347 struct sdma_engine *sde, 2348 struct iowait *wait, 2349 struct sdma_txreq *tx, 2350 bool pkts_sent) 2351 { 2352 int ret; 2353 2354 sde->desc_avail = sdma_descq_freecnt(sde); 2355 if (tx->num_desc <= sde->desc_avail) 2356 return -EAGAIN; 2357 /* pulse the head_lock */ 2358 if (wait && wait->sleep) { 2359 unsigned seq; 2360 2361 seq = raw_seqcount_begin( 2362 (const seqcount_t *)&sde->head_lock.seqcount); 2363 ret = wait->sleep(sde, wait, tx, seq, pkts_sent); 2364 if (ret == -EAGAIN) 2365 sde->desc_avail = sdma_descq_freecnt(sde); 2366 } else { 2367 ret = -EBUSY; 2368 } 2369 return ret; 2370 } 2371 2372 /** 2373 * sdma_send_txreq() - submit a tx req to ring 2374 * @sde: sdma engine to use 2375 * @wait: wait structure to use when full (may be NULL) 2376 * @tx: sdma_txreq to submit 2377 * @pkts_sent: has any packet been sent yet? 2378 * 2379 * The call submits the tx into the ring. If a iowait structure is non-NULL 2380 * the packet will be queued to the list in wait. 2381 * 2382 * Return: 2383 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in 2384 * ring (wait == NULL) 2385 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state 2386 */ 2387 int sdma_send_txreq(struct sdma_engine *sde, 2388 struct iowait *wait, 2389 struct sdma_txreq *tx, 2390 bool pkts_sent) 2391 { 2392 int ret = 0; 2393 u16 tail; 2394 unsigned long flags; 2395 2396 /* user should have supplied entire packet */ 2397 if (unlikely(tx->tlen)) 2398 return -EINVAL; 2399 tx->wait = wait; 2400 spin_lock_irqsave(&sde->tail_lock, flags); 2401 retry: 2402 if (unlikely(!__sdma_running(sde))) 2403 goto unlock_noconn; 2404 if (unlikely(tx->num_desc > sde->desc_avail)) 2405 goto nodesc; 2406 tail = submit_tx(sde, tx); 2407 if (wait) 2408 iowait_sdma_inc(wait); 2409 sdma_update_tail(sde, tail); 2410 unlock: 2411 spin_unlock_irqrestore(&sde->tail_lock, flags); 2412 return ret; 2413 unlock_noconn: 2414 if (wait) 2415 iowait_sdma_inc(wait); 2416 tx->next_descq_idx = 0; 2417 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 2418 tx->sn = sde->tail_sn++; 2419 trace_hfi1_sdma_in_sn(sde, tx->sn); 2420 #endif 2421 spin_lock(&sde->flushlist_lock); 2422 list_add_tail(&tx->list, &sde->flushlist); 2423 spin_unlock(&sde->flushlist_lock); 2424 if (wait) { 2425 wait->tx_count++; 2426 wait->count += tx->num_desc; 2427 } 2428 schedule_work(&sde->flush_worker); 2429 ret = -ECOMM; 2430 goto unlock; 2431 nodesc: 2432 ret = sdma_check_progress(sde, wait, tx, pkts_sent); 2433 if (ret == -EAGAIN) { 2434 ret = 0; 2435 goto retry; 2436 } 2437 sde->descq_full_count++; 2438 goto unlock; 2439 } 2440 2441 /** 2442 * sdma_send_txlist() - submit a list of tx req to ring 2443 * @sde: sdma engine to use 2444 * @wait: wait structure to use when full (may be NULL) 2445 * @tx_list: list of sdma_txreqs to submit 2446 * @count: pointer to a u32 which, after return will contain the total number of 2447 * sdma_txreqs removed from the tx_list. This will include sdma_txreqs 2448 * whose SDMA descriptors are submitted to the ring and the sdma_txreqs 2449 * which are added to SDMA engine flush list if the SDMA engine state is 2450 * not running. 2451 * 2452 * The call submits the list into the ring. 2453 * 2454 * If the iowait structure is non-NULL and not equal to the iowait list 2455 * the unprocessed part of the list will be appended to the list in wait. 2456 * 2457 * In all cases, the tx_list will be updated so the head of the tx_list is 2458 * the list of descriptors that have yet to be transmitted. 2459 * 2460 * The intent of this call is to provide a more efficient 2461 * way of submitting multiple packets to SDMA while holding the tail 2462 * side locking. 2463 * 2464 * Return: 2465 * 0 - Success, 2466 * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL) 2467 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state 2468 */ 2469 int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait, 2470 struct list_head *tx_list, u32 *count_out) 2471 { 2472 struct sdma_txreq *tx, *tx_next; 2473 int ret = 0; 2474 unsigned long flags; 2475 u16 tail = INVALID_TAIL; 2476 u32 submit_count = 0, flush_count = 0, total_count; 2477 2478 spin_lock_irqsave(&sde->tail_lock, flags); 2479 retry: 2480 list_for_each_entry_safe(tx, tx_next, tx_list, list) { 2481 tx->wait = wait; 2482 if (unlikely(!__sdma_running(sde))) 2483 goto unlock_noconn; 2484 if (unlikely(tx->num_desc > sde->desc_avail)) 2485 goto nodesc; 2486 if (unlikely(tx->tlen)) { 2487 ret = -EINVAL; 2488 goto update_tail; 2489 } 2490 list_del_init(&tx->list); 2491 tail = submit_tx(sde, tx); 2492 submit_count++; 2493 if (tail != INVALID_TAIL && 2494 (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) { 2495 sdma_update_tail(sde, tail); 2496 tail = INVALID_TAIL; 2497 } 2498 } 2499 update_tail: 2500 total_count = submit_count + flush_count; 2501 if (wait) { 2502 iowait_sdma_add(wait, total_count); 2503 iowait_starve_clear(submit_count > 0, wait); 2504 } 2505 if (tail != INVALID_TAIL) 2506 sdma_update_tail(sde, tail); 2507 spin_unlock_irqrestore(&sde->tail_lock, flags); 2508 *count_out = total_count; 2509 return ret; 2510 unlock_noconn: 2511 spin_lock(&sde->flushlist_lock); 2512 list_for_each_entry_safe(tx, tx_next, tx_list, list) { 2513 tx->wait = wait; 2514 list_del_init(&tx->list); 2515 tx->next_descq_idx = 0; 2516 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 2517 tx->sn = sde->tail_sn++; 2518 trace_hfi1_sdma_in_sn(sde, tx->sn); 2519 #endif 2520 list_add_tail(&tx->list, &sde->flushlist); 2521 flush_count++; 2522 if (wait) { 2523 wait->tx_count++; 2524 wait->count += tx->num_desc; 2525 } 2526 } 2527 spin_unlock(&sde->flushlist_lock); 2528 schedule_work(&sde->flush_worker); 2529 ret = -ECOMM; 2530 goto update_tail; 2531 nodesc: 2532 ret = sdma_check_progress(sde, wait, tx, submit_count > 0); 2533 if (ret == -EAGAIN) { 2534 ret = 0; 2535 goto retry; 2536 } 2537 sde->descq_full_count++; 2538 goto update_tail; 2539 } 2540 2541 static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event) 2542 { 2543 unsigned long flags; 2544 2545 spin_lock_irqsave(&sde->tail_lock, flags); 2546 write_seqlock(&sde->head_lock); 2547 2548 __sdma_process_event(sde, event); 2549 2550 if (sde->state.current_state == sdma_state_s99_running) 2551 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); 2552 2553 write_sequnlock(&sde->head_lock); 2554 spin_unlock_irqrestore(&sde->tail_lock, flags); 2555 } 2556 2557 static void __sdma_process_event(struct sdma_engine *sde, 2558 enum sdma_events event) 2559 { 2560 struct sdma_state *ss = &sde->state; 2561 int need_progress = 0; 2562 2563 /* CONFIG SDMA temporary */ 2564 #ifdef CONFIG_SDMA_VERBOSITY 2565 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx, 2566 sdma_state_names[ss->current_state], 2567 sdma_event_names[event]); 2568 #endif 2569 2570 switch (ss->current_state) { 2571 case sdma_state_s00_hw_down: 2572 switch (event) { 2573 case sdma_event_e00_go_hw_down: 2574 break; 2575 case sdma_event_e30_go_running: 2576 /* 2577 * If down, but running requested (usually result 2578 * of link up, then we need to start up. 2579 * This can happen when hw down is requested while 2580 * bringing the link up with traffic active on 2581 * 7220, e.g. 2582 */ 2583 ss->go_s99_running = 1; 2584 /* fall through -- and start dma engine */ 2585 case sdma_event_e10_go_hw_start: 2586 /* This reference means the state machine is started */ 2587 sdma_get(&sde->state); 2588 sdma_set_state(sde, 2589 sdma_state_s10_hw_start_up_halt_wait); 2590 break; 2591 case sdma_event_e15_hw_halt_done: 2592 break; 2593 case sdma_event_e25_hw_clean_up_done: 2594 break; 2595 case sdma_event_e40_sw_cleaned: 2596 sdma_sw_tear_down(sde); 2597 break; 2598 case sdma_event_e50_hw_cleaned: 2599 break; 2600 case sdma_event_e60_hw_halted: 2601 break; 2602 case sdma_event_e70_go_idle: 2603 break; 2604 case sdma_event_e80_hw_freeze: 2605 break; 2606 case sdma_event_e81_hw_frozen: 2607 break; 2608 case sdma_event_e82_hw_unfreeze: 2609 break; 2610 case sdma_event_e85_link_down: 2611 break; 2612 case sdma_event_e90_sw_halted: 2613 break; 2614 } 2615 break; 2616 2617 case sdma_state_s10_hw_start_up_halt_wait: 2618 switch (event) { 2619 case sdma_event_e00_go_hw_down: 2620 sdma_set_state(sde, sdma_state_s00_hw_down); 2621 sdma_sw_tear_down(sde); 2622 break; 2623 case sdma_event_e10_go_hw_start: 2624 break; 2625 case sdma_event_e15_hw_halt_done: 2626 sdma_set_state(sde, 2627 sdma_state_s15_hw_start_up_clean_wait); 2628 sdma_start_hw_clean_up(sde); 2629 break; 2630 case sdma_event_e25_hw_clean_up_done: 2631 break; 2632 case sdma_event_e30_go_running: 2633 ss->go_s99_running = 1; 2634 break; 2635 case sdma_event_e40_sw_cleaned: 2636 break; 2637 case sdma_event_e50_hw_cleaned: 2638 break; 2639 case sdma_event_e60_hw_halted: 2640 schedule_work(&sde->err_halt_worker); 2641 break; 2642 case sdma_event_e70_go_idle: 2643 ss->go_s99_running = 0; 2644 break; 2645 case sdma_event_e80_hw_freeze: 2646 break; 2647 case sdma_event_e81_hw_frozen: 2648 break; 2649 case sdma_event_e82_hw_unfreeze: 2650 break; 2651 case sdma_event_e85_link_down: 2652 break; 2653 case sdma_event_e90_sw_halted: 2654 break; 2655 } 2656 break; 2657 2658 case sdma_state_s15_hw_start_up_clean_wait: 2659 switch (event) { 2660 case sdma_event_e00_go_hw_down: 2661 sdma_set_state(sde, sdma_state_s00_hw_down); 2662 sdma_sw_tear_down(sde); 2663 break; 2664 case sdma_event_e10_go_hw_start: 2665 break; 2666 case sdma_event_e15_hw_halt_done: 2667 break; 2668 case sdma_event_e25_hw_clean_up_done: 2669 sdma_hw_start_up(sde); 2670 sdma_set_state(sde, ss->go_s99_running ? 2671 sdma_state_s99_running : 2672 sdma_state_s20_idle); 2673 break; 2674 case sdma_event_e30_go_running: 2675 ss->go_s99_running = 1; 2676 break; 2677 case sdma_event_e40_sw_cleaned: 2678 break; 2679 case sdma_event_e50_hw_cleaned: 2680 break; 2681 case sdma_event_e60_hw_halted: 2682 break; 2683 case sdma_event_e70_go_idle: 2684 ss->go_s99_running = 0; 2685 break; 2686 case sdma_event_e80_hw_freeze: 2687 break; 2688 case sdma_event_e81_hw_frozen: 2689 break; 2690 case sdma_event_e82_hw_unfreeze: 2691 break; 2692 case sdma_event_e85_link_down: 2693 break; 2694 case sdma_event_e90_sw_halted: 2695 break; 2696 } 2697 break; 2698 2699 case sdma_state_s20_idle: 2700 switch (event) { 2701 case sdma_event_e00_go_hw_down: 2702 sdma_set_state(sde, sdma_state_s00_hw_down); 2703 sdma_sw_tear_down(sde); 2704 break; 2705 case sdma_event_e10_go_hw_start: 2706 break; 2707 case sdma_event_e15_hw_halt_done: 2708 break; 2709 case sdma_event_e25_hw_clean_up_done: 2710 break; 2711 case sdma_event_e30_go_running: 2712 sdma_set_state(sde, sdma_state_s99_running); 2713 ss->go_s99_running = 1; 2714 break; 2715 case sdma_event_e40_sw_cleaned: 2716 break; 2717 case sdma_event_e50_hw_cleaned: 2718 break; 2719 case sdma_event_e60_hw_halted: 2720 sdma_set_state(sde, sdma_state_s50_hw_halt_wait); 2721 schedule_work(&sde->err_halt_worker); 2722 break; 2723 case sdma_event_e70_go_idle: 2724 break; 2725 case sdma_event_e85_link_down: 2726 /* fall through */ 2727 case sdma_event_e80_hw_freeze: 2728 sdma_set_state(sde, sdma_state_s80_hw_freeze); 2729 atomic_dec(&sde->dd->sdma_unfreeze_count); 2730 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 2731 break; 2732 case sdma_event_e81_hw_frozen: 2733 break; 2734 case sdma_event_e82_hw_unfreeze: 2735 break; 2736 case sdma_event_e90_sw_halted: 2737 break; 2738 } 2739 break; 2740 2741 case sdma_state_s30_sw_clean_up_wait: 2742 switch (event) { 2743 case sdma_event_e00_go_hw_down: 2744 sdma_set_state(sde, sdma_state_s00_hw_down); 2745 break; 2746 case sdma_event_e10_go_hw_start: 2747 break; 2748 case sdma_event_e15_hw_halt_done: 2749 break; 2750 case sdma_event_e25_hw_clean_up_done: 2751 break; 2752 case sdma_event_e30_go_running: 2753 ss->go_s99_running = 1; 2754 break; 2755 case sdma_event_e40_sw_cleaned: 2756 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait); 2757 sdma_start_hw_clean_up(sde); 2758 break; 2759 case sdma_event_e50_hw_cleaned: 2760 break; 2761 case sdma_event_e60_hw_halted: 2762 break; 2763 case sdma_event_e70_go_idle: 2764 ss->go_s99_running = 0; 2765 break; 2766 case sdma_event_e80_hw_freeze: 2767 break; 2768 case sdma_event_e81_hw_frozen: 2769 break; 2770 case sdma_event_e82_hw_unfreeze: 2771 break; 2772 case sdma_event_e85_link_down: 2773 ss->go_s99_running = 0; 2774 break; 2775 case sdma_event_e90_sw_halted: 2776 break; 2777 } 2778 break; 2779 2780 case sdma_state_s40_hw_clean_up_wait: 2781 switch (event) { 2782 case sdma_event_e00_go_hw_down: 2783 sdma_set_state(sde, sdma_state_s00_hw_down); 2784 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2785 break; 2786 case sdma_event_e10_go_hw_start: 2787 break; 2788 case sdma_event_e15_hw_halt_done: 2789 break; 2790 case sdma_event_e25_hw_clean_up_done: 2791 sdma_hw_start_up(sde); 2792 sdma_set_state(sde, ss->go_s99_running ? 2793 sdma_state_s99_running : 2794 sdma_state_s20_idle); 2795 break; 2796 case sdma_event_e30_go_running: 2797 ss->go_s99_running = 1; 2798 break; 2799 case sdma_event_e40_sw_cleaned: 2800 break; 2801 case sdma_event_e50_hw_cleaned: 2802 break; 2803 case sdma_event_e60_hw_halted: 2804 break; 2805 case sdma_event_e70_go_idle: 2806 ss->go_s99_running = 0; 2807 break; 2808 case sdma_event_e80_hw_freeze: 2809 break; 2810 case sdma_event_e81_hw_frozen: 2811 break; 2812 case sdma_event_e82_hw_unfreeze: 2813 break; 2814 case sdma_event_e85_link_down: 2815 ss->go_s99_running = 0; 2816 break; 2817 case sdma_event_e90_sw_halted: 2818 break; 2819 } 2820 break; 2821 2822 case sdma_state_s50_hw_halt_wait: 2823 switch (event) { 2824 case sdma_event_e00_go_hw_down: 2825 sdma_set_state(sde, sdma_state_s00_hw_down); 2826 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2827 break; 2828 case sdma_event_e10_go_hw_start: 2829 break; 2830 case sdma_event_e15_hw_halt_done: 2831 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); 2832 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2833 break; 2834 case sdma_event_e25_hw_clean_up_done: 2835 break; 2836 case sdma_event_e30_go_running: 2837 ss->go_s99_running = 1; 2838 break; 2839 case sdma_event_e40_sw_cleaned: 2840 break; 2841 case sdma_event_e50_hw_cleaned: 2842 break; 2843 case sdma_event_e60_hw_halted: 2844 schedule_work(&sde->err_halt_worker); 2845 break; 2846 case sdma_event_e70_go_idle: 2847 ss->go_s99_running = 0; 2848 break; 2849 case sdma_event_e80_hw_freeze: 2850 break; 2851 case sdma_event_e81_hw_frozen: 2852 break; 2853 case sdma_event_e82_hw_unfreeze: 2854 break; 2855 case sdma_event_e85_link_down: 2856 ss->go_s99_running = 0; 2857 break; 2858 case sdma_event_e90_sw_halted: 2859 break; 2860 } 2861 break; 2862 2863 case sdma_state_s60_idle_halt_wait: 2864 switch (event) { 2865 case sdma_event_e00_go_hw_down: 2866 sdma_set_state(sde, sdma_state_s00_hw_down); 2867 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2868 break; 2869 case sdma_event_e10_go_hw_start: 2870 break; 2871 case sdma_event_e15_hw_halt_done: 2872 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); 2873 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2874 break; 2875 case sdma_event_e25_hw_clean_up_done: 2876 break; 2877 case sdma_event_e30_go_running: 2878 ss->go_s99_running = 1; 2879 break; 2880 case sdma_event_e40_sw_cleaned: 2881 break; 2882 case sdma_event_e50_hw_cleaned: 2883 break; 2884 case sdma_event_e60_hw_halted: 2885 schedule_work(&sde->err_halt_worker); 2886 break; 2887 case sdma_event_e70_go_idle: 2888 ss->go_s99_running = 0; 2889 break; 2890 case sdma_event_e80_hw_freeze: 2891 break; 2892 case sdma_event_e81_hw_frozen: 2893 break; 2894 case sdma_event_e82_hw_unfreeze: 2895 break; 2896 case sdma_event_e85_link_down: 2897 break; 2898 case sdma_event_e90_sw_halted: 2899 break; 2900 } 2901 break; 2902 2903 case sdma_state_s80_hw_freeze: 2904 switch (event) { 2905 case sdma_event_e00_go_hw_down: 2906 sdma_set_state(sde, sdma_state_s00_hw_down); 2907 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2908 break; 2909 case sdma_event_e10_go_hw_start: 2910 break; 2911 case sdma_event_e15_hw_halt_done: 2912 break; 2913 case sdma_event_e25_hw_clean_up_done: 2914 break; 2915 case sdma_event_e30_go_running: 2916 ss->go_s99_running = 1; 2917 break; 2918 case sdma_event_e40_sw_cleaned: 2919 break; 2920 case sdma_event_e50_hw_cleaned: 2921 break; 2922 case sdma_event_e60_hw_halted: 2923 break; 2924 case sdma_event_e70_go_idle: 2925 ss->go_s99_running = 0; 2926 break; 2927 case sdma_event_e80_hw_freeze: 2928 break; 2929 case sdma_event_e81_hw_frozen: 2930 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean); 2931 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2932 break; 2933 case sdma_event_e82_hw_unfreeze: 2934 break; 2935 case sdma_event_e85_link_down: 2936 break; 2937 case sdma_event_e90_sw_halted: 2938 break; 2939 } 2940 break; 2941 2942 case sdma_state_s82_freeze_sw_clean: 2943 switch (event) { 2944 case sdma_event_e00_go_hw_down: 2945 sdma_set_state(sde, sdma_state_s00_hw_down); 2946 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2947 break; 2948 case sdma_event_e10_go_hw_start: 2949 break; 2950 case sdma_event_e15_hw_halt_done: 2951 break; 2952 case sdma_event_e25_hw_clean_up_done: 2953 break; 2954 case sdma_event_e30_go_running: 2955 ss->go_s99_running = 1; 2956 break; 2957 case sdma_event_e40_sw_cleaned: 2958 /* notify caller this engine is done cleaning */ 2959 atomic_dec(&sde->dd->sdma_unfreeze_count); 2960 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 2961 break; 2962 case sdma_event_e50_hw_cleaned: 2963 break; 2964 case sdma_event_e60_hw_halted: 2965 break; 2966 case sdma_event_e70_go_idle: 2967 ss->go_s99_running = 0; 2968 break; 2969 case sdma_event_e80_hw_freeze: 2970 break; 2971 case sdma_event_e81_hw_frozen: 2972 break; 2973 case sdma_event_e82_hw_unfreeze: 2974 sdma_hw_start_up(sde); 2975 sdma_set_state(sde, ss->go_s99_running ? 2976 sdma_state_s99_running : 2977 sdma_state_s20_idle); 2978 break; 2979 case sdma_event_e85_link_down: 2980 break; 2981 case sdma_event_e90_sw_halted: 2982 break; 2983 } 2984 break; 2985 2986 case sdma_state_s99_running: 2987 switch (event) { 2988 case sdma_event_e00_go_hw_down: 2989 sdma_set_state(sde, sdma_state_s00_hw_down); 2990 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2991 break; 2992 case sdma_event_e10_go_hw_start: 2993 break; 2994 case sdma_event_e15_hw_halt_done: 2995 break; 2996 case sdma_event_e25_hw_clean_up_done: 2997 break; 2998 case sdma_event_e30_go_running: 2999 break; 3000 case sdma_event_e40_sw_cleaned: 3001 break; 3002 case sdma_event_e50_hw_cleaned: 3003 break; 3004 case sdma_event_e60_hw_halted: 3005 need_progress = 1; 3006 sdma_err_progress_check_schedule(sde); 3007 /* fall through */ 3008 case sdma_event_e90_sw_halted: 3009 /* 3010 * SW initiated halt does not perform engines 3011 * progress check 3012 */ 3013 sdma_set_state(sde, sdma_state_s50_hw_halt_wait); 3014 schedule_work(&sde->err_halt_worker); 3015 break; 3016 case sdma_event_e70_go_idle: 3017 sdma_set_state(sde, sdma_state_s60_idle_halt_wait); 3018 break; 3019 case sdma_event_e85_link_down: 3020 ss->go_s99_running = 0; 3021 /* fall through */ 3022 case sdma_event_e80_hw_freeze: 3023 sdma_set_state(sde, sdma_state_s80_hw_freeze); 3024 atomic_dec(&sde->dd->sdma_unfreeze_count); 3025 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 3026 break; 3027 case sdma_event_e81_hw_frozen: 3028 break; 3029 case sdma_event_e82_hw_unfreeze: 3030 break; 3031 } 3032 break; 3033 } 3034 3035 ss->last_event = event; 3036 if (need_progress) 3037 sdma_make_progress(sde, 0); 3038 } 3039 3040 /* 3041 * _extend_sdma_tx_descs() - helper to extend txreq 3042 * 3043 * This is called once the initial nominal allocation 3044 * of descriptors in the sdma_txreq is exhausted. 3045 * 3046 * The code will bump the allocation up to the max 3047 * of MAX_DESC (64) descriptors. There doesn't seem 3048 * much point in an interim step. The last descriptor 3049 * is reserved for coalesce buffer in order to support 3050 * cases where input packet has >MAX_DESC iovecs. 3051 * 3052 */ 3053 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) 3054 { 3055 int i; 3056 3057 /* Handle last descriptor */ 3058 if (unlikely((tx->num_desc == (MAX_DESC - 1)))) { 3059 /* if tlen is 0, it is for padding, release last descriptor */ 3060 if (!tx->tlen) { 3061 tx->desc_limit = MAX_DESC; 3062 } else if (!tx->coalesce_buf) { 3063 /* allocate coalesce buffer with space for padding */ 3064 tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32), 3065 GFP_ATOMIC); 3066 if (!tx->coalesce_buf) 3067 goto enomem; 3068 tx->coalesce_idx = 0; 3069 } 3070 return 0; 3071 } 3072 3073 if (unlikely(tx->num_desc == MAX_DESC)) 3074 goto enomem; 3075 3076 tx->descp = kmalloc_array( 3077 MAX_DESC, 3078 sizeof(struct sdma_desc), 3079 GFP_ATOMIC); 3080 if (!tx->descp) 3081 goto enomem; 3082 3083 /* reserve last descriptor for coalescing */ 3084 tx->desc_limit = MAX_DESC - 1; 3085 /* copy ones already built */ 3086 for (i = 0; i < tx->num_desc; i++) 3087 tx->descp[i] = tx->descs[i]; 3088 return 0; 3089 enomem: 3090 __sdma_txclean(dd, tx); 3091 return -ENOMEM; 3092 } 3093 3094 /* 3095 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors 3096 * 3097 * This is called once the initial nominal allocation of descriptors 3098 * in the sdma_txreq is exhausted. 3099 * 3100 * This function calls _extend_sdma_tx_descs to extend or allocate 3101 * coalesce buffer. If there is a allocated coalesce buffer, it will 3102 * copy the input packet data into the coalesce buffer. It also adds 3103 * coalesce buffer descriptor once when whole packet is received. 3104 * 3105 * Return: 3106 * <0 - error 3107 * 0 - coalescing, don't populate descriptor 3108 * 1 - continue with populating descriptor 3109 */ 3110 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, 3111 int type, void *kvaddr, struct page *page, 3112 unsigned long offset, u16 len) 3113 { 3114 int pad_len, rval; 3115 dma_addr_t addr; 3116 3117 rval = _extend_sdma_tx_descs(dd, tx); 3118 if (rval) { 3119 __sdma_txclean(dd, tx); 3120 return rval; 3121 } 3122 3123 /* If coalesce buffer is allocated, copy data into it */ 3124 if (tx->coalesce_buf) { 3125 if (type == SDMA_MAP_NONE) { 3126 __sdma_txclean(dd, tx); 3127 return -EINVAL; 3128 } 3129 3130 if (type == SDMA_MAP_PAGE) { 3131 kvaddr = kmap(page); 3132 kvaddr += offset; 3133 } else if (WARN_ON(!kvaddr)) { 3134 __sdma_txclean(dd, tx); 3135 return -EINVAL; 3136 } 3137 3138 memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len); 3139 tx->coalesce_idx += len; 3140 if (type == SDMA_MAP_PAGE) 3141 kunmap(page); 3142 3143 /* If there is more data, return */ 3144 if (tx->tlen - tx->coalesce_idx) 3145 return 0; 3146 3147 /* Whole packet is received; add any padding */ 3148 pad_len = tx->packet_len & (sizeof(u32) - 1); 3149 if (pad_len) { 3150 pad_len = sizeof(u32) - pad_len; 3151 memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len); 3152 /* padding is taken care of for coalescing case */ 3153 tx->packet_len += pad_len; 3154 tx->tlen += pad_len; 3155 } 3156 3157 /* dma map the coalesce buffer */ 3158 addr = dma_map_single(&dd->pcidev->dev, 3159 tx->coalesce_buf, 3160 tx->tlen, 3161 DMA_TO_DEVICE); 3162 3163 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { 3164 __sdma_txclean(dd, tx); 3165 return -ENOSPC; 3166 } 3167 3168 /* Add descriptor for coalesce buffer */ 3169 tx->desc_limit = MAX_DESC; 3170 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, 3171 addr, tx->tlen); 3172 } 3173 3174 return 1; 3175 } 3176 3177 /* Update sdes when the lmc changes */ 3178 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid) 3179 { 3180 struct sdma_engine *sde; 3181 int i; 3182 u64 sreg; 3183 3184 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) << 3185 SD(CHECK_SLID_MASK_SHIFT)) | 3186 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) << 3187 SD(CHECK_SLID_VALUE_SHIFT)); 3188 3189 for (i = 0; i < dd->num_sdma; i++) { 3190 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x", 3191 i, (u32)sreg); 3192 sde = &dd->per_sdma[i]; 3193 write_sde_csr(sde, SD(CHECK_SLID), sreg); 3194 } 3195 } 3196 3197 /* tx not dword sized - pad */ 3198 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) 3199 { 3200 int rval = 0; 3201 3202 tx->num_desc++; 3203 if ((unlikely(tx->num_desc == tx->desc_limit))) { 3204 rval = _extend_sdma_tx_descs(dd, tx); 3205 if (rval) { 3206 __sdma_txclean(dd, tx); 3207 return rval; 3208 } 3209 } 3210 /* finish the one just added */ 3211 make_tx_sdma_desc( 3212 tx, 3213 SDMA_MAP_NONE, 3214 dd->sdma_pad_phys, 3215 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1))); 3216 _sdma_close_tx(dd, tx); 3217 return rval; 3218 } 3219 3220 /* 3221 * Add ahg to the sdma_txreq 3222 * 3223 * The logic will consume up to 3 3224 * descriptors at the beginning of 3225 * sdma_txreq. 3226 */ 3227 void _sdma_txreq_ahgadd( 3228 struct sdma_txreq *tx, 3229 u8 num_ahg, 3230 u8 ahg_entry, 3231 u32 *ahg, 3232 u8 ahg_hlen) 3233 { 3234 u32 i, shift = 0, desc = 0; 3235 u8 mode; 3236 3237 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4); 3238 /* compute mode */ 3239 if (num_ahg == 1) 3240 mode = SDMA_AHG_APPLY_UPDATE1; 3241 else if (num_ahg <= 5) 3242 mode = SDMA_AHG_APPLY_UPDATE2; 3243 else 3244 mode = SDMA_AHG_APPLY_UPDATE3; 3245 tx->num_desc++; 3246 /* initialize to consumed descriptors to zero */ 3247 switch (mode) { 3248 case SDMA_AHG_APPLY_UPDATE3: 3249 tx->num_desc++; 3250 tx->descs[2].qw[0] = 0; 3251 tx->descs[2].qw[1] = 0; 3252 /* FALLTHROUGH */ 3253 case SDMA_AHG_APPLY_UPDATE2: 3254 tx->num_desc++; 3255 tx->descs[1].qw[0] = 0; 3256 tx->descs[1].qw[1] = 0; 3257 break; 3258 } 3259 ahg_hlen >>= 2; 3260 tx->descs[0].qw[1] |= 3261 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK) 3262 << SDMA_DESC1_HEADER_INDEX_SHIFT) | 3263 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK) 3264 << SDMA_DESC1_HEADER_DWS_SHIFT) | 3265 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK) 3266 << SDMA_DESC1_HEADER_MODE_SHIFT) | 3267 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK) 3268 << SDMA_DESC1_HEADER_UPDATE1_SHIFT); 3269 for (i = 0; i < (num_ahg - 1); i++) { 3270 if (!shift && !(i & 2)) 3271 desc++; 3272 tx->descs[desc].qw[!!(i & 2)] |= 3273 (((u64)ahg[i + 1]) 3274 << shift); 3275 shift = (shift + 32) & 63; 3276 } 3277 } 3278 3279 /** 3280 * sdma_ahg_alloc - allocate an AHG entry 3281 * @sde: engine to allocate from 3282 * 3283 * Return: 3284 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled, 3285 * -ENOSPC if an entry is not available 3286 */ 3287 int sdma_ahg_alloc(struct sdma_engine *sde) 3288 { 3289 int nr; 3290 int oldbit; 3291 3292 if (!sde) { 3293 trace_hfi1_ahg_allocate(sde, -EINVAL); 3294 return -EINVAL; 3295 } 3296 while (1) { 3297 nr = ffz(READ_ONCE(sde->ahg_bits)); 3298 if (nr > 31) { 3299 trace_hfi1_ahg_allocate(sde, -ENOSPC); 3300 return -ENOSPC; 3301 } 3302 oldbit = test_and_set_bit(nr, &sde->ahg_bits); 3303 if (!oldbit) 3304 break; 3305 cpu_relax(); 3306 } 3307 trace_hfi1_ahg_allocate(sde, nr); 3308 return nr; 3309 } 3310 3311 /** 3312 * sdma_ahg_free - free an AHG entry 3313 * @sde: engine to return AHG entry 3314 * @ahg_index: index to free 3315 * 3316 * This routine frees the indicate AHG entry. 3317 */ 3318 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index) 3319 { 3320 if (!sde) 3321 return; 3322 trace_hfi1_ahg_deallocate(sde, ahg_index); 3323 if (ahg_index < 0 || ahg_index > 31) 3324 return; 3325 clear_bit(ahg_index, &sde->ahg_bits); 3326 } 3327 3328 /* 3329 * SPC freeze handling for SDMA engines. Called when the driver knows 3330 * the SPC is going into a freeze but before the freeze is fully 3331 * settled. Generally an error interrupt. 3332 * 3333 * This event will pull the engine out of running so no more entries can be 3334 * added to the engine's queue. 3335 */ 3336 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down) 3337 { 3338 int i; 3339 enum sdma_events event = link_down ? sdma_event_e85_link_down : 3340 sdma_event_e80_hw_freeze; 3341 3342 /* set up the wait but do not wait here */ 3343 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); 3344 3345 /* tell all engines to stop running and wait */ 3346 for (i = 0; i < dd->num_sdma; i++) 3347 sdma_process_event(&dd->per_sdma[i], event); 3348 3349 /* sdma_freeze() will wait for all engines to have stopped */ 3350 } 3351 3352 /* 3353 * SPC freeze handling for SDMA engines. Called when the driver knows 3354 * the SPC is fully frozen. 3355 */ 3356 void sdma_freeze(struct hfi1_devdata *dd) 3357 { 3358 int i; 3359 int ret; 3360 3361 /* 3362 * Make sure all engines have moved out of the running state before 3363 * continuing. 3364 */ 3365 ret = wait_event_interruptible(dd->sdma_unfreeze_wq, 3366 atomic_read(&dd->sdma_unfreeze_count) <= 3367 0); 3368 /* interrupted or count is negative, then unloading - just exit */ 3369 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0) 3370 return; 3371 3372 /* set up the count for the next wait */ 3373 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); 3374 3375 /* tell all engines that the SPC is frozen, they can start cleaning */ 3376 for (i = 0; i < dd->num_sdma; i++) 3377 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen); 3378 3379 /* 3380 * Wait for everyone to finish software clean before exiting. The 3381 * software clean will read engine CSRs, so must be completed before 3382 * the next step, which will clear the engine CSRs. 3383 */ 3384 (void)wait_event_interruptible(dd->sdma_unfreeze_wq, 3385 atomic_read(&dd->sdma_unfreeze_count) <= 0); 3386 /* no need to check results - done no matter what */ 3387 } 3388 3389 /* 3390 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen. 3391 * 3392 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All 3393 * that is left is a software clean. We could do it after the SPC is fully 3394 * frozen, but then we'd have to add another state to wait for the unfreeze. 3395 * Instead, just defer the software clean until the unfreeze step. 3396 */ 3397 void sdma_unfreeze(struct hfi1_devdata *dd) 3398 { 3399 int i; 3400 3401 /* tell all engines start freeze clean up */ 3402 for (i = 0; i < dd->num_sdma; i++) 3403 sdma_process_event(&dd->per_sdma[i], 3404 sdma_event_e82_hw_unfreeze); 3405 } 3406 3407 /** 3408 * _sdma_engine_progress_schedule() - schedule progress on engine 3409 * @sde: sdma_engine to schedule progress 3410 * 3411 */ 3412 void _sdma_engine_progress_schedule( 3413 struct sdma_engine *sde) 3414 { 3415 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask); 3416 /* assume we have selected a good cpu */ 3417 write_csr(sde->dd, 3418 CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), 3419 sde->progress_mask); 3420 } 3421