1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/spinlock.h> 49 #include <linux/seqlock.h> 50 #include <linux/netdevice.h> 51 #include <linux/moduleparam.h> 52 #include <linux/bitops.h> 53 #include <linux/timer.h> 54 #include <linux/vmalloc.h> 55 #include <linux/highmem.h> 56 57 #include "hfi.h" 58 #include "common.h" 59 #include "qp.h" 60 #include "sdma.h" 61 #include "iowait.h" 62 #include "trace.h" 63 64 /* must be a power of 2 >= 64 <= 32768 */ 65 #define SDMA_DESCQ_CNT 2048 66 #define SDMA_DESC_INTR 64 67 #define INVALID_TAIL 0xffff 68 69 static uint sdma_descq_cnt = SDMA_DESCQ_CNT; 70 module_param(sdma_descq_cnt, uint, S_IRUGO); 71 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries"); 72 73 static uint sdma_idle_cnt = 250; 74 module_param(sdma_idle_cnt, uint, S_IRUGO); 75 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)"); 76 77 uint mod_num_sdma; 78 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO); 79 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use"); 80 81 static uint sdma_desct_intr = SDMA_DESC_INTR; 82 module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR); 83 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt"); 84 85 #define SDMA_WAIT_BATCH_SIZE 20 86 /* max wait time for a SDMA engine to indicate it has halted */ 87 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */ 88 /* all SDMA engine errors that cause a halt */ 89 90 #define SD(name) SEND_DMA_##name 91 #define ALL_SDMA_ENG_HALT_ERRS \ 92 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \ 93 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \ 94 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \ 95 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \ 96 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \ 97 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \ 98 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \ 99 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \ 100 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \ 101 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \ 102 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \ 103 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \ 104 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \ 105 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \ 106 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \ 107 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \ 108 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \ 109 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK)) 110 111 /* sdma_sendctrl operations */ 112 #define SDMA_SENDCTRL_OP_ENABLE BIT(0) 113 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1) 114 #define SDMA_SENDCTRL_OP_HALT BIT(2) 115 #define SDMA_SENDCTRL_OP_CLEANUP BIT(3) 116 117 /* handle long defines */ 118 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \ 119 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK 120 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \ 121 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT 122 123 static const char * const sdma_state_names[] = { 124 [sdma_state_s00_hw_down] = "s00_HwDown", 125 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait", 126 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait", 127 [sdma_state_s20_idle] = "s20_Idle", 128 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait", 129 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait", 130 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait", 131 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait", 132 [sdma_state_s80_hw_freeze] = "s80_HwFreeze", 133 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean", 134 [sdma_state_s99_running] = "s99_Running", 135 }; 136 137 #ifdef CONFIG_SDMA_VERBOSITY 138 static const char * const sdma_event_names[] = { 139 [sdma_event_e00_go_hw_down] = "e00_GoHwDown", 140 [sdma_event_e10_go_hw_start] = "e10_GoHwStart", 141 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone", 142 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone", 143 [sdma_event_e30_go_running] = "e30_GoRunning", 144 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned", 145 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned", 146 [sdma_event_e60_hw_halted] = "e60_HwHalted", 147 [sdma_event_e70_go_idle] = "e70_GoIdle", 148 [sdma_event_e80_hw_freeze] = "e80_HwFreeze", 149 [sdma_event_e81_hw_frozen] = "e81_HwFrozen", 150 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze", 151 [sdma_event_e85_link_down] = "e85_LinkDown", 152 [sdma_event_e90_sw_halted] = "e90_SwHalted", 153 }; 154 #endif 155 156 static const struct sdma_set_state_action sdma_action_table[] = { 157 [sdma_state_s00_hw_down] = { 158 .go_s99_running_tofalse = 1, 159 .op_enable = 0, 160 .op_intenable = 0, 161 .op_halt = 0, 162 .op_cleanup = 0, 163 }, 164 [sdma_state_s10_hw_start_up_halt_wait] = { 165 .op_enable = 0, 166 .op_intenable = 0, 167 .op_halt = 1, 168 .op_cleanup = 0, 169 }, 170 [sdma_state_s15_hw_start_up_clean_wait] = { 171 .op_enable = 0, 172 .op_intenable = 1, 173 .op_halt = 0, 174 .op_cleanup = 1, 175 }, 176 [sdma_state_s20_idle] = { 177 .op_enable = 0, 178 .op_intenable = 1, 179 .op_halt = 0, 180 .op_cleanup = 0, 181 }, 182 [sdma_state_s30_sw_clean_up_wait] = { 183 .op_enable = 0, 184 .op_intenable = 0, 185 .op_halt = 0, 186 .op_cleanup = 0, 187 }, 188 [sdma_state_s40_hw_clean_up_wait] = { 189 .op_enable = 0, 190 .op_intenable = 0, 191 .op_halt = 0, 192 .op_cleanup = 1, 193 }, 194 [sdma_state_s50_hw_halt_wait] = { 195 .op_enable = 0, 196 .op_intenable = 0, 197 .op_halt = 0, 198 .op_cleanup = 0, 199 }, 200 [sdma_state_s60_idle_halt_wait] = { 201 .go_s99_running_tofalse = 1, 202 .op_enable = 0, 203 .op_intenable = 0, 204 .op_halt = 1, 205 .op_cleanup = 0, 206 }, 207 [sdma_state_s80_hw_freeze] = { 208 .op_enable = 0, 209 .op_intenable = 0, 210 .op_halt = 0, 211 .op_cleanup = 0, 212 }, 213 [sdma_state_s82_freeze_sw_clean] = { 214 .op_enable = 0, 215 .op_intenable = 0, 216 .op_halt = 0, 217 .op_cleanup = 0, 218 }, 219 [sdma_state_s99_running] = { 220 .op_enable = 1, 221 .op_intenable = 1, 222 .op_halt = 0, 223 .op_cleanup = 0, 224 .go_s99_running_totrue = 1, 225 }, 226 }; 227 228 #define SDMA_TAIL_UPDATE_THRESH 0x1F 229 230 /* declare all statics here rather than keep sorting */ 231 static void sdma_complete(struct kref *); 232 static void sdma_finalput(struct sdma_state *); 233 static void sdma_get(struct sdma_state *); 234 static void sdma_hw_clean_up_task(unsigned long); 235 static void sdma_put(struct sdma_state *); 236 static void sdma_set_state(struct sdma_engine *, enum sdma_states); 237 static void sdma_start_hw_clean_up(struct sdma_engine *); 238 static void sdma_sw_clean_up_task(unsigned long); 239 static void sdma_sendctrl(struct sdma_engine *, unsigned); 240 static void init_sdma_regs(struct sdma_engine *, u32, uint); 241 static void sdma_process_event( 242 struct sdma_engine *sde, 243 enum sdma_events event); 244 static void __sdma_process_event( 245 struct sdma_engine *sde, 246 enum sdma_events event); 247 static void dump_sdma_state(struct sdma_engine *sde); 248 static void sdma_make_progress(struct sdma_engine *sde, u64 status); 249 static void sdma_desc_avail(struct sdma_engine *sde, uint avail); 250 static void sdma_flush_descq(struct sdma_engine *sde); 251 252 /** 253 * sdma_state_name() - return state string from enum 254 * @state: state 255 */ 256 static const char *sdma_state_name(enum sdma_states state) 257 { 258 return sdma_state_names[state]; 259 } 260 261 static void sdma_get(struct sdma_state *ss) 262 { 263 kref_get(&ss->kref); 264 } 265 266 static void sdma_complete(struct kref *kref) 267 { 268 struct sdma_state *ss = 269 container_of(kref, struct sdma_state, kref); 270 271 complete(&ss->comp); 272 } 273 274 static void sdma_put(struct sdma_state *ss) 275 { 276 kref_put(&ss->kref, sdma_complete); 277 } 278 279 static void sdma_finalput(struct sdma_state *ss) 280 { 281 sdma_put(ss); 282 wait_for_completion(&ss->comp); 283 } 284 285 static inline void write_sde_csr( 286 struct sdma_engine *sde, 287 u32 offset0, 288 u64 value) 289 { 290 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value); 291 } 292 293 static inline u64 read_sde_csr( 294 struct sdma_engine *sde, 295 u32 offset0) 296 { 297 return read_kctxt_csr(sde->dd, sde->this_idx, offset0); 298 } 299 300 /* 301 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for 302 * sdma engine 'sde' to drop to 0. 303 */ 304 static void sdma_wait_for_packet_egress(struct sdma_engine *sde, 305 int pause) 306 { 307 u64 off = 8 * sde->this_idx; 308 struct hfi1_devdata *dd = sde->dd; 309 int lcnt = 0; 310 u64 reg_prev; 311 u64 reg = 0; 312 313 while (1) { 314 reg_prev = reg; 315 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS); 316 317 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK; 318 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT; 319 if (reg == 0) 320 break; 321 /* counter is reest if accupancy count changes */ 322 if (reg != reg_prev) 323 lcnt = 0; 324 if (lcnt++ > 500) { 325 /* timed out - bounce the link */ 326 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n", 327 __func__, sde->this_idx, (u32)reg); 328 queue_work(dd->pport->link_wq, 329 &dd->pport->link_bounce_work); 330 break; 331 } 332 udelay(1); 333 } 334 } 335 336 /* 337 * sdma_wait() - wait for packet egress to complete for all SDMA engines, 338 * and pause for credit return. 339 */ 340 void sdma_wait(struct hfi1_devdata *dd) 341 { 342 int i; 343 344 for (i = 0; i < dd->num_sdma; i++) { 345 struct sdma_engine *sde = &dd->per_sdma[i]; 346 347 sdma_wait_for_packet_egress(sde, 0); 348 } 349 } 350 351 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt) 352 { 353 u64 reg; 354 355 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT)) 356 return; 357 reg = cnt; 358 reg &= SD(DESC_CNT_CNT_MASK); 359 reg <<= SD(DESC_CNT_CNT_SHIFT); 360 write_sde_csr(sde, SD(DESC_CNT), reg); 361 } 362 363 static inline void complete_tx(struct sdma_engine *sde, 364 struct sdma_txreq *tx, 365 int res) 366 { 367 /* protect against complete modifying */ 368 struct iowait *wait = tx->wait; 369 callback_t complete = tx->complete; 370 371 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 372 trace_hfi1_sdma_out_sn(sde, tx->sn); 373 if (WARN_ON_ONCE(sde->head_sn != tx->sn)) 374 dd_dev_err(sde->dd, "expected %llu got %llu\n", 375 sde->head_sn, tx->sn); 376 sde->head_sn++; 377 #endif 378 __sdma_txclean(sde->dd, tx); 379 if (complete) 380 (*complete)(tx, res); 381 if (iowait_sdma_dec(wait)) 382 iowait_drain_wakeup(wait); 383 } 384 385 /* 386 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status 387 * 388 * Depending on timing there can be txreqs in two places: 389 * - in the descq ring 390 * - in the flush list 391 * 392 * To avoid ordering issues the descq ring needs to be flushed 393 * first followed by the flush list. 394 * 395 * This routine is called from two places 396 * - From a work queue item 397 * - Directly from the state machine just before setting the 398 * state to running 399 * 400 * Must be called with head_lock held 401 * 402 */ 403 static void sdma_flush(struct sdma_engine *sde) 404 { 405 struct sdma_txreq *txp, *txp_next; 406 LIST_HEAD(flushlist); 407 unsigned long flags; 408 409 /* flush from head to tail */ 410 sdma_flush_descq(sde); 411 spin_lock_irqsave(&sde->flushlist_lock, flags); 412 /* copy flush list */ 413 list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) { 414 list_del_init(&txp->list); 415 list_add_tail(&txp->list, &flushlist); 416 } 417 spin_unlock_irqrestore(&sde->flushlist_lock, flags); 418 /* flush from flush list */ 419 list_for_each_entry_safe(txp, txp_next, &flushlist, list) 420 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); 421 } 422 423 /* 424 * Fields a work request for flushing the descq ring 425 * and the flush list 426 * 427 * If the engine has been brought to running during 428 * the scheduling delay, the flush is ignored, assuming 429 * that the process of bringing the engine to running 430 * would have done this flush prior to going to running. 431 * 432 */ 433 static void sdma_field_flush(struct work_struct *work) 434 { 435 unsigned long flags; 436 struct sdma_engine *sde = 437 container_of(work, struct sdma_engine, flush_worker); 438 439 write_seqlock_irqsave(&sde->head_lock, flags); 440 if (!__sdma_running(sde)) 441 sdma_flush(sde); 442 write_sequnlock_irqrestore(&sde->head_lock, flags); 443 } 444 445 static void sdma_err_halt_wait(struct work_struct *work) 446 { 447 struct sdma_engine *sde = container_of(work, struct sdma_engine, 448 err_halt_worker); 449 u64 statuscsr; 450 unsigned long timeout; 451 452 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT); 453 while (1) { 454 statuscsr = read_sde_csr(sde, SD(STATUS)); 455 statuscsr &= SD(STATUS_ENG_HALTED_SMASK); 456 if (statuscsr) 457 break; 458 if (time_after(jiffies, timeout)) { 459 dd_dev_err(sde->dd, 460 "SDMA engine %d - timeout waiting for engine to halt\n", 461 sde->this_idx); 462 /* 463 * Continue anyway. This could happen if there was 464 * an uncorrectable error in the wrong spot. 465 */ 466 break; 467 } 468 usleep_range(80, 120); 469 } 470 471 sdma_process_event(sde, sdma_event_e15_hw_halt_done); 472 } 473 474 static void sdma_err_progress_check_schedule(struct sdma_engine *sde) 475 { 476 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) { 477 unsigned index; 478 struct hfi1_devdata *dd = sde->dd; 479 480 for (index = 0; index < dd->num_sdma; index++) { 481 struct sdma_engine *curr_sdma = &dd->per_sdma[index]; 482 483 if (curr_sdma != sde) 484 curr_sdma->progress_check_head = 485 curr_sdma->descq_head; 486 } 487 dd_dev_err(sde->dd, 488 "SDMA engine %d - check scheduled\n", 489 sde->this_idx); 490 mod_timer(&sde->err_progress_check_timer, jiffies + 10); 491 } 492 } 493 494 static void sdma_err_progress_check(struct timer_list *t) 495 { 496 unsigned index; 497 struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer); 498 499 dd_dev_err(sde->dd, "SDE progress check event\n"); 500 for (index = 0; index < sde->dd->num_sdma; index++) { 501 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index]; 502 unsigned long flags; 503 504 /* check progress on each engine except the current one */ 505 if (curr_sde == sde) 506 continue; 507 /* 508 * We must lock interrupts when acquiring sde->lock, 509 * to avoid a deadlock if interrupt triggers and spins on 510 * the same lock on same CPU 511 */ 512 spin_lock_irqsave(&curr_sde->tail_lock, flags); 513 write_seqlock(&curr_sde->head_lock); 514 515 /* skip non-running queues */ 516 if (curr_sde->state.current_state != sdma_state_s99_running) { 517 write_sequnlock(&curr_sde->head_lock); 518 spin_unlock_irqrestore(&curr_sde->tail_lock, flags); 519 continue; 520 } 521 522 if ((curr_sde->descq_head != curr_sde->descq_tail) && 523 (curr_sde->descq_head == 524 curr_sde->progress_check_head)) 525 __sdma_process_event(curr_sde, 526 sdma_event_e90_sw_halted); 527 write_sequnlock(&curr_sde->head_lock); 528 spin_unlock_irqrestore(&curr_sde->tail_lock, flags); 529 } 530 schedule_work(&sde->err_halt_worker); 531 } 532 533 static void sdma_hw_clean_up_task(unsigned long opaque) 534 { 535 struct sdma_engine *sde = (struct sdma_engine *)opaque; 536 u64 statuscsr; 537 538 while (1) { 539 #ifdef CONFIG_SDMA_VERBOSITY 540 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 541 sde->this_idx, slashstrip(__FILE__), __LINE__, 542 __func__); 543 #endif 544 statuscsr = read_sde_csr(sde, SD(STATUS)); 545 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK); 546 if (statuscsr) 547 break; 548 udelay(10); 549 } 550 551 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done); 552 } 553 554 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde) 555 { 556 return sde->tx_ring[sde->tx_head & sde->sdma_mask]; 557 } 558 559 /* 560 * flush ring for recovery 561 */ 562 static void sdma_flush_descq(struct sdma_engine *sde) 563 { 564 u16 head, tail; 565 int progress = 0; 566 struct sdma_txreq *txp = get_txhead(sde); 567 568 /* The reason for some of the complexity of this code is that 569 * not all descriptors have corresponding txps. So, we have to 570 * be able to skip over descs until we wander into the range of 571 * the next txp on the list. 572 */ 573 head = sde->descq_head & sde->sdma_mask; 574 tail = sde->descq_tail & sde->sdma_mask; 575 while (head != tail) { 576 /* advance head, wrap if needed */ 577 head = ++sde->descq_head & sde->sdma_mask; 578 /* if now past this txp's descs, do the callback */ 579 if (txp && txp->next_descq_idx == head) { 580 /* remove from list */ 581 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; 582 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); 583 trace_hfi1_sdma_progress(sde, head, tail, txp); 584 txp = get_txhead(sde); 585 } 586 progress++; 587 } 588 if (progress) 589 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); 590 } 591 592 static void sdma_sw_clean_up_task(unsigned long opaque) 593 { 594 struct sdma_engine *sde = (struct sdma_engine *)opaque; 595 unsigned long flags; 596 597 spin_lock_irqsave(&sde->tail_lock, flags); 598 write_seqlock(&sde->head_lock); 599 600 /* 601 * At this point, the following should always be true: 602 * - We are halted, so no more descriptors are getting retired. 603 * - We are not running, so no one is submitting new work. 604 * - Only we can send the e40_sw_cleaned, so we can't start 605 * running again until we say so. So, the active list and 606 * descq are ours to play with. 607 */ 608 609 /* 610 * In the error clean up sequence, software clean must be called 611 * before the hardware clean so we can use the hardware head in 612 * the progress routine. A hardware clean or SPC unfreeze will 613 * reset the hardware head. 614 * 615 * Process all retired requests. The progress routine will use the 616 * latest physical hardware head - we are not running so speed does 617 * not matter. 618 */ 619 sdma_make_progress(sde, 0); 620 621 sdma_flush(sde); 622 623 /* 624 * Reset our notion of head and tail. 625 * Note that the HW registers have been reset via an earlier 626 * clean up. 627 */ 628 sde->descq_tail = 0; 629 sde->descq_head = 0; 630 sde->desc_avail = sdma_descq_freecnt(sde); 631 *sde->head_dma = 0; 632 633 __sdma_process_event(sde, sdma_event_e40_sw_cleaned); 634 635 write_sequnlock(&sde->head_lock); 636 spin_unlock_irqrestore(&sde->tail_lock, flags); 637 } 638 639 static void sdma_sw_tear_down(struct sdma_engine *sde) 640 { 641 struct sdma_state *ss = &sde->state; 642 643 /* Releasing this reference means the state machine has stopped. */ 644 sdma_put(ss); 645 646 /* stop waiting for all unfreeze events to complete */ 647 atomic_set(&sde->dd->sdma_unfreeze_count, -1); 648 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 649 } 650 651 static void sdma_start_hw_clean_up(struct sdma_engine *sde) 652 { 653 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task); 654 } 655 656 static void sdma_set_state(struct sdma_engine *sde, 657 enum sdma_states next_state) 658 { 659 struct sdma_state *ss = &sde->state; 660 const struct sdma_set_state_action *action = sdma_action_table; 661 unsigned op = 0; 662 663 trace_hfi1_sdma_state( 664 sde, 665 sdma_state_names[ss->current_state], 666 sdma_state_names[next_state]); 667 668 /* debugging bookkeeping */ 669 ss->previous_state = ss->current_state; 670 ss->previous_op = ss->current_op; 671 ss->current_state = next_state; 672 673 if (ss->previous_state != sdma_state_s99_running && 674 next_state == sdma_state_s99_running) 675 sdma_flush(sde); 676 677 if (action[next_state].op_enable) 678 op |= SDMA_SENDCTRL_OP_ENABLE; 679 680 if (action[next_state].op_intenable) 681 op |= SDMA_SENDCTRL_OP_INTENABLE; 682 683 if (action[next_state].op_halt) 684 op |= SDMA_SENDCTRL_OP_HALT; 685 686 if (action[next_state].op_cleanup) 687 op |= SDMA_SENDCTRL_OP_CLEANUP; 688 689 if (action[next_state].go_s99_running_tofalse) 690 ss->go_s99_running = 0; 691 692 if (action[next_state].go_s99_running_totrue) 693 ss->go_s99_running = 1; 694 695 ss->current_op = op; 696 sdma_sendctrl(sde, ss->current_op); 697 } 698 699 /** 700 * sdma_get_descq_cnt() - called when device probed 701 * 702 * Return a validated descq count. 703 * 704 * This is currently only used in the verbs initialization to build the tx 705 * list. 706 * 707 * This will probably be deleted in favor of a more scalable approach to 708 * alloc tx's. 709 * 710 */ 711 u16 sdma_get_descq_cnt(void) 712 { 713 u16 count = sdma_descq_cnt; 714 715 if (!count) 716 return SDMA_DESCQ_CNT; 717 /* count must be a power of 2 greater than 64 and less than 718 * 32768. Otherwise return default. 719 */ 720 if (!is_power_of_2(count)) 721 return SDMA_DESCQ_CNT; 722 if (count < 64 || count > 32768) 723 return SDMA_DESCQ_CNT; 724 return count; 725 } 726 727 /** 728 * sdma_engine_get_vl() - return vl for a given sdma engine 729 * @sde: sdma engine 730 * 731 * This function returns the vl mapped to a given engine, or an error if 732 * the mapping can't be found. The mapping fields are protected by RCU. 733 */ 734 int sdma_engine_get_vl(struct sdma_engine *sde) 735 { 736 struct hfi1_devdata *dd = sde->dd; 737 struct sdma_vl_map *m; 738 u8 vl; 739 740 if (sde->this_idx >= TXE_NUM_SDMA_ENGINES) 741 return -EINVAL; 742 743 rcu_read_lock(); 744 m = rcu_dereference(dd->sdma_map); 745 if (unlikely(!m)) { 746 rcu_read_unlock(); 747 return -EINVAL; 748 } 749 vl = m->engine_to_vl[sde->this_idx]; 750 rcu_read_unlock(); 751 752 return vl; 753 } 754 755 /** 756 * sdma_select_engine_vl() - select sdma engine 757 * @dd: devdata 758 * @selector: a spreading factor 759 * @vl: this vl 760 * 761 * 762 * This function returns an engine based on the selector and a vl. The 763 * mapping fields are protected by RCU. 764 */ 765 struct sdma_engine *sdma_select_engine_vl( 766 struct hfi1_devdata *dd, 767 u32 selector, 768 u8 vl) 769 { 770 struct sdma_vl_map *m; 771 struct sdma_map_elem *e; 772 struct sdma_engine *rval; 773 774 /* NOTE This should only happen if SC->VL changed after the initial 775 * checks on the QP/AH 776 * Default will return engine 0 below 777 */ 778 if (vl >= num_vls) { 779 rval = NULL; 780 goto done; 781 } 782 783 rcu_read_lock(); 784 m = rcu_dereference(dd->sdma_map); 785 if (unlikely(!m)) { 786 rcu_read_unlock(); 787 return &dd->per_sdma[0]; 788 } 789 e = m->map[vl & m->mask]; 790 rval = e->sde[selector & e->mask]; 791 rcu_read_unlock(); 792 793 done: 794 rval = !rval ? &dd->per_sdma[0] : rval; 795 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx); 796 return rval; 797 } 798 799 /** 800 * sdma_select_engine_sc() - select sdma engine 801 * @dd: devdata 802 * @selector: a spreading factor 803 * @sc5: the 5 bit sc 804 * 805 * 806 * This function returns an engine based on the selector and an sc. 807 */ 808 struct sdma_engine *sdma_select_engine_sc( 809 struct hfi1_devdata *dd, 810 u32 selector, 811 u8 sc5) 812 { 813 u8 vl = sc_to_vlt(dd, sc5); 814 815 return sdma_select_engine_vl(dd, selector, vl); 816 } 817 818 struct sdma_rht_map_elem { 819 u32 mask; 820 u8 ctr; 821 struct sdma_engine *sde[0]; 822 }; 823 824 struct sdma_rht_node { 825 unsigned long cpu_id; 826 struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED]; 827 struct rhash_head node; 828 }; 829 830 #define NR_CPUS_HINT 192 831 832 static const struct rhashtable_params sdma_rht_params = { 833 .nelem_hint = NR_CPUS_HINT, 834 .head_offset = offsetof(struct sdma_rht_node, node), 835 .key_offset = offsetof(struct sdma_rht_node, cpu_id), 836 .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id), 837 .max_size = NR_CPUS, 838 .min_size = 8, 839 .automatic_shrinking = true, 840 }; 841 842 /* 843 * sdma_select_user_engine() - select sdma engine based on user setup 844 * @dd: devdata 845 * @selector: a spreading factor 846 * @vl: this vl 847 * 848 * This function returns an sdma engine for a user sdma request. 849 * User defined sdma engine affinity setting is honored when applicable, 850 * otherwise system default sdma engine mapping is used. To ensure correct 851 * ordering, the mapping from <selector, vl> to sde must remain unchanged. 852 */ 853 struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, 854 u32 selector, u8 vl) 855 { 856 struct sdma_rht_node *rht_node; 857 struct sdma_engine *sde = NULL; 858 const struct cpumask *current_mask = ¤t->cpus_allowed; 859 unsigned long cpu_id; 860 861 /* 862 * To ensure that always the same sdma engine(s) will be 863 * selected make sure the process is pinned to this CPU only. 864 */ 865 if (cpumask_weight(current_mask) != 1) 866 goto out; 867 868 cpu_id = smp_processor_id(); 869 rcu_read_lock(); 870 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id, 871 sdma_rht_params); 872 873 if (rht_node && rht_node->map[vl]) { 874 struct sdma_rht_map_elem *map = rht_node->map[vl]; 875 876 sde = map->sde[selector & map->mask]; 877 } 878 rcu_read_unlock(); 879 880 if (sde) 881 return sde; 882 883 out: 884 return sdma_select_engine_vl(dd, selector, vl); 885 } 886 887 static void sdma_populate_sde_map(struct sdma_rht_map_elem *map) 888 { 889 int i; 890 891 for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++) 892 map->sde[map->ctr + i] = map->sde[i]; 893 } 894 895 static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map, 896 struct sdma_engine *sde) 897 { 898 unsigned int i, pow; 899 900 /* only need to check the first ctr entries for a match */ 901 for (i = 0; i < map->ctr; i++) { 902 if (map->sde[i] == sde) { 903 memmove(&map->sde[i], &map->sde[i + 1], 904 (map->ctr - i - 1) * sizeof(map->sde[0])); 905 map->ctr--; 906 pow = roundup_pow_of_two(map->ctr ? : 1); 907 map->mask = pow - 1; 908 sdma_populate_sde_map(map); 909 break; 910 } 911 } 912 } 913 914 /* 915 * Prevents concurrent reads and writes of the sdma engine cpu_mask 916 */ 917 static DEFINE_MUTEX(process_to_sde_mutex); 918 919 ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, 920 size_t count) 921 { 922 struct hfi1_devdata *dd = sde->dd; 923 cpumask_var_t mask, new_mask; 924 unsigned long cpu; 925 int ret, vl, sz; 926 struct sdma_rht_node *rht_node; 927 928 vl = sdma_engine_get_vl(sde); 929 if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map))) 930 return -EINVAL; 931 932 ret = zalloc_cpumask_var(&mask, GFP_KERNEL); 933 if (!ret) 934 return -ENOMEM; 935 936 ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL); 937 if (!ret) { 938 free_cpumask_var(mask); 939 return -ENOMEM; 940 } 941 ret = cpulist_parse(buf, mask); 942 if (ret) 943 goto out_free; 944 945 if (!cpumask_subset(mask, cpu_online_mask)) { 946 dd_dev_warn(sde->dd, "Invalid CPU mask\n"); 947 ret = -EINVAL; 948 goto out_free; 949 } 950 951 sz = sizeof(struct sdma_rht_map_elem) + 952 (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *)); 953 954 mutex_lock(&process_to_sde_mutex); 955 956 for_each_cpu(cpu, mask) { 957 /* Check if we have this already mapped */ 958 if (cpumask_test_cpu(cpu, &sde->cpu_mask)) { 959 cpumask_set_cpu(cpu, new_mask); 960 continue; 961 } 962 963 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, 964 sdma_rht_params); 965 if (!rht_node) { 966 rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL); 967 if (!rht_node) { 968 ret = -ENOMEM; 969 goto out; 970 } 971 972 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); 973 if (!rht_node->map[vl]) { 974 kfree(rht_node); 975 ret = -ENOMEM; 976 goto out; 977 } 978 rht_node->cpu_id = cpu; 979 rht_node->map[vl]->mask = 0; 980 rht_node->map[vl]->ctr = 1; 981 rht_node->map[vl]->sde[0] = sde; 982 983 ret = rhashtable_insert_fast(dd->sdma_rht, 984 &rht_node->node, 985 sdma_rht_params); 986 if (ret) { 987 kfree(rht_node->map[vl]); 988 kfree(rht_node); 989 dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n", 990 cpu); 991 goto out; 992 } 993 994 } else { 995 int ctr, pow; 996 997 /* Add new user mappings */ 998 if (!rht_node->map[vl]) 999 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); 1000 1001 if (!rht_node->map[vl]) { 1002 ret = -ENOMEM; 1003 goto out; 1004 } 1005 1006 rht_node->map[vl]->ctr++; 1007 ctr = rht_node->map[vl]->ctr; 1008 rht_node->map[vl]->sde[ctr - 1] = sde; 1009 pow = roundup_pow_of_two(ctr); 1010 rht_node->map[vl]->mask = pow - 1; 1011 1012 /* Populate the sde map table */ 1013 sdma_populate_sde_map(rht_node->map[vl]); 1014 } 1015 cpumask_set_cpu(cpu, new_mask); 1016 } 1017 1018 /* Clean up old mappings */ 1019 for_each_cpu(cpu, cpu_online_mask) { 1020 struct sdma_rht_node *rht_node; 1021 1022 /* Don't cleanup sdes that are set in the new mask */ 1023 if (cpumask_test_cpu(cpu, mask)) 1024 continue; 1025 1026 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, 1027 sdma_rht_params); 1028 if (rht_node) { 1029 bool empty = true; 1030 int i; 1031 1032 /* Remove mappings for old sde */ 1033 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) 1034 if (rht_node->map[i]) 1035 sdma_cleanup_sde_map(rht_node->map[i], 1036 sde); 1037 1038 /* Free empty hash table entries */ 1039 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { 1040 if (!rht_node->map[i]) 1041 continue; 1042 1043 if (rht_node->map[i]->ctr) { 1044 empty = false; 1045 break; 1046 } 1047 } 1048 1049 if (empty) { 1050 ret = rhashtable_remove_fast(dd->sdma_rht, 1051 &rht_node->node, 1052 sdma_rht_params); 1053 WARN_ON(ret); 1054 1055 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) 1056 kfree(rht_node->map[i]); 1057 1058 kfree(rht_node); 1059 } 1060 } 1061 } 1062 1063 cpumask_copy(&sde->cpu_mask, new_mask); 1064 out: 1065 mutex_unlock(&process_to_sde_mutex); 1066 out_free: 1067 free_cpumask_var(mask); 1068 free_cpumask_var(new_mask); 1069 return ret ? : strnlen(buf, PAGE_SIZE); 1070 } 1071 1072 ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf) 1073 { 1074 mutex_lock(&process_to_sde_mutex); 1075 if (cpumask_empty(&sde->cpu_mask)) 1076 snprintf(buf, PAGE_SIZE, "%s\n", "empty"); 1077 else 1078 cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask); 1079 mutex_unlock(&process_to_sde_mutex); 1080 return strnlen(buf, PAGE_SIZE); 1081 } 1082 1083 static void sdma_rht_free(void *ptr, void *arg) 1084 { 1085 struct sdma_rht_node *rht_node = ptr; 1086 int i; 1087 1088 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) 1089 kfree(rht_node->map[i]); 1090 1091 kfree(rht_node); 1092 } 1093 1094 /** 1095 * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings 1096 * @s: seq file 1097 * @dd: hfi1_devdata 1098 * @cpuid: cpu id 1099 * 1100 * This routine dumps the process to sde mappings per cpu 1101 */ 1102 void sdma_seqfile_dump_cpu_list(struct seq_file *s, 1103 struct hfi1_devdata *dd, 1104 unsigned long cpuid) 1105 { 1106 struct sdma_rht_node *rht_node; 1107 int i, j; 1108 1109 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid, 1110 sdma_rht_params); 1111 if (!rht_node) 1112 return; 1113 1114 seq_printf(s, "cpu%3lu: ", cpuid); 1115 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { 1116 if (!rht_node->map[i] || !rht_node->map[i]->ctr) 1117 continue; 1118 1119 seq_printf(s, " vl%d: [", i); 1120 1121 for (j = 0; j < rht_node->map[i]->ctr; j++) { 1122 if (!rht_node->map[i]->sde[j]) 1123 continue; 1124 1125 if (j > 0) 1126 seq_puts(s, ","); 1127 1128 seq_printf(s, " sdma%2d", 1129 rht_node->map[i]->sde[j]->this_idx); 1130 } 1131 seq_puts(s, " ]"); 1132 } 1133 1134 seq_puts(s, "\n"); 1135 } 1136 1137 /* 1138 * Free the indicated map struct 1139 */ 1140 static void sdma_map_free(struct sdma_vl_map *m) 1141 { 1142 int i; 1143 1144 for (i = 0; m && i < m->actual_vls; i++) 1145 kfree(m->map[i]); 1146 kfree(m); 1147 } 1148 1149 /* 1150 * Handle RCU callback 1151 */ 1152 static void sdma_map_rcu_callback(struct rcu_head *list) 1153 { 1154 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list); 1155 1156 sdma_map_free(m); 1157 } 1158 1159 /** 1160 * sdma_map_init - called when # vls change 1161 * @dd: hfi1_devdata 1162 * @port: port number 1163 * @num_vls: number of vls 1164 * @vl_engines: per vl engine mapping (optional) 1165 * 1166 * This routine changes the mapping based on the number of vls. 1167 * 1168 * vl_engines is used to specify a non-uniform vl/engine loading. NULL 1169 * implies auto computing the loading and giving each VLs a uniform 1170 * distribution of engines per VL. 1171 * 1172 * The auto algorithm computes the sde_per_vl and the number of extra 1173 * engines. Any extra engines are added from the last VL on down. 1174 * 1175 * rcu locking is used here to control access to the mapping fields. 1176 * 1177 * If either the num_vls or num_sdma are non-power of 2, the array sizes 1178 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded 1179 * up to the next highest power of 2 and the first entry is reused 1180 * in a round robin fashion. 1181 * 1182 * If an error occurs the map change is not done and the mapping is 1183 * not changed. 1184 * 1185 */ 1186 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines) 1187 { 1188 int i, j; 1189 int extra, sde_per_vl; 1190 int engine = 0; 1191 u8 lvl_engines[OPA_MAX_VLS]; 1192 struct sdma_vl_map *oldmap, *newmap; 1193 1194 if (!(dd->flags & HFI1_HAS_SEND_DMA)) 1195 return 0; 1196 1197 if (!vl_engines) { 1198 /* truncate divide */ 1199 sde_per_vl = dd->num_sdma / num_vls; 1200 /* extras */ 1201 extra = dd->num_sdma % num_vls; 1202 vl_engines = lvl_engines; 1203 /* add extras from last vl down */ 1204 for (i = num_vls - 1; i >= 0; i--, extra--) 1205 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0); 1206 } 1207 /* build new map */ 1208 newmap = kzalloc( 1209 sizeof(struct sdma_vl_map) + 1210 roundup_pow_of_two(num_vls) * 1211 sizeof(struct sdma_map_elem *), 1212 GFP_KERNEL); 1213 if (!newmap) 1214 goto bail; 1215 newmap->actual_vls = num_vls; 1216 newmap->vls = roundup_pow_of_two(num_vls); 1217 newmap->mask = (1 << ilog2(newmap->vls)) - 1; 1218 /* initialize back-map */ 1219 for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++) 1220 newmap->engine_to_vl[i] = -1; 1221 for (i = 0; i < newmap->vls; i++) { 1222 /* save for wrap around */ 1223 int first_engine = engine; 1224 1225 if (i < newmap->actual_vls) { 1226 int sz = roundup_pow_of_two(vl_engines[i]); 1227 1228 /* only allocate once */ 1229 newmap->map[i] = kzalloc( 1230 sizeof(struct sdma_map_elem) + 1231 sz * sizeof(struct sdma_engine *), 1232 GFP_KERNEL); 1233 if (!newmap->map[i]) 1234 goto bail; 1235 newmap->map[i]->mask = (1 << ilog2(sz)) - 1; 1236 /* assign engines */ 1237 for (j = 0; j < sz; j++) { 1238 newmap->map[i]->sde[j] = 1239 &dd->per_sdma[engine]; 1240 if (++engine >= first_engine + vl_engines[i]) 1241 /* wrap back to first engine */ 1242 engine = first_engine; 1243 } 1244 /* assign back-map */ 1245 for (j = 0; j < vl_engines[i]; j++) 1246 newmap->engine_to_vl[first_engine + j] = i; 1247 } else { 1248 /* just re-use entry without allocating */ 1249 newmap->map[i] = newmap->map[i % num_vls]; 1250 } 1251 engine = first_engine + vl_engines[i]; 1252 } 1253 /* newmap in hand, save old map */ 1254 spin_lock_irq(&dd->sde_map_lock); 1255 oldmap = rcu_dereference_protected(dd->sdma_map, 1256 lockdep_is_held(&dd->sde_map_lock)); 1257 1258 /* publish newmap */ 1259 rcu_assign_pointer(dd->sdma_map, newmap); 1260 1261 spin_unlock_irq(&dd->sde_map_lock); 1262 /* success, free any old map after grace period */ 1263 if (oldmap) 1264 call_rcu(&oldmap->list, sdma_map_rcu_callback); 1265 return 0; 1266 bail: 1267 /* free any partial allocation */ 1268 sdma_map_free(newmap); 1269 return -ENOMEM; 1270 } 1271 1272 /** 1273 * sdma_clean() Clean up allocated memory 1274 * @dd: struct hfi1_devdata 1275 * @num_engines: num sdma engines 1276 * 1277 * This routine can be called regardless of the success of 1278 * sdma_init() 1279 */ 1280 void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) 1281 { 1282 size_t i; 1283 struct sdma_engine *sde; 1284 1285 if (dd->sdma_pad_dma) { 1286 dma_free_coherent(&dd->pcidev->dev, 4, 1287 (void *)dd->sdma_pad_dma, 1288 dd->sdma_pad_phys); 1289 dd->sdma_pad_dma = NULL; 1290 dd->sdma_pad_phys = 0; 1291 } 1292 if (dd->sdma_heads_dma) { 1293 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size, 1294 (void *)dd->sdma_heads_dma, 1295 dd->sdma_heads_phys); 1296 dd->sdma_heads_dma = NULL; 1297 dd->sdma_heads_phys = 0; 1298 } 1299 for (i = 0; dd->per_sdma && i < num_engines; ++i) { 1300 sde = &dd->per_sdma[i]; 1301 1302 sde->head_dma = NULL; 1303 sde->head_phys = 0; 1304 1305 if (sde->descq) { 1306 dma_free_coherent( 1307 &dd->pcidev->dev, 1308 sde->descq_cnt * sizeof(u64[2]), 1309 sde->descq, 1310 sde->descq_phys 1311 ); 1312 sde->descq = NULL; 1313 sde->descq_phys = 0; 1314 } 1315 kvfree(sde->tx_ring); 1316 sde->tx_ring = NULL; 1317 } 1318 spin_lock_irq(&dd->sde_map_lock); 1319 sdma_map_free(rcu_access_pointer(dd->sdma_map)); 1320 RCU_INIT_POINTER(dd->sdma_map, NULL); 1321 spin_unlock_irq(&dd->sde_map_lock); 1322 synchronize_rcu(); 1323 kfree(dd->per_sdma); 1324 dd->per_sdma = NULL; 1325 1326 if (dd->sdma_rht) { 1327 rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL); 1328 kfree(dd->sdma_rht); 1329 dd->sdma_rht = NULL; 1330 } 1331 } 1332 1333 /** 1334 * sdma_init() - called when device probed 1335 * @dd: hfi1_devdata 1336 * @port: port number (currently only zero) 1337 * 1338 * Initializes each sde and its csrs. 1339 * Interrupts are not required to be enabled. 1340 * 1341 * Returns: 1342 * 0 - success, -errno on failure 1343 */ 1344 int sdma_init(struct hfi1_devdata *dd, u8 port) 1345 { 1346 unsigned this_idx; 1347 struct sdma_engine *sde; 1348 struct rhashtable *tmp_sdma_rht; 1349 u16 descq_cnt; 1350 void *curr_head; 1351 struct hfi1_pportdata *ppd = dd->pport + port; 1352 u32 per_sdma_credits; 1353 uint idle_cnt = sdma_idle_cnt; 1354 size_t num_engines = chip_sdma_engines(dd); 1355 int ret = -ENOMEM; 1356 1357 if (!HFI1_CAP_IS_KSET(SDMA)) { 1358 HFI1_CAP_CLEAR(SDMA_AHG); 1359 return 0; 1360 } 1361 if (mod_num_sdma && 1362 /* can't exceed chip support */ 1363 mod_num_sdma <= chip_sdma_engines(dd) && 1364 /* count must be >= vls */ 1365 mod_num_sdma >= num_vls) 1366 num_engines = mod_num_sdma; 1367 1368 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma); 1369 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd)); 1370 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n", 1371 chip_sdma_mem_size(dd)); 1372 1373 per_sdma_credits = 1374 chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE); 1375 1376 /* set up freeze waitqueue */ 1377 init_waitqueue_head(&dd->sdma_unfreeze_wq); 1378 atomic_set(&dd->sdma_unfreeze_count, 0); 1379 1380 descq_cnt = sdma_get_descq_cnt(); 1381 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n", 1382 num_engines, descq_cnt); 1383 1384 /* alloc memory for array of send engines */ 1385 dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma), 1386 GFP_KERNEL, dd->node); 1387 if (!dd->per_sdma) 1388 return ret; 1389 1390 idle_cnt = ns_to_cclock(dd, idle_cnt); 1391 if (idle_cnt) 1392 dd->default_desc1 = 1393 SDMA_DESC1_HEAD_TO_HOST_FLAG; 1394 else 1395 dd->default_desc1 = 1396 SDMA_DESC1_INT_REQ_FLAG; 1397 1398 if (!sdma_desct_intr) 1399 sdma_desct_intr = SDMA_DESC_INTR; 1400 1401 /* Allocate memory for SendDMA descriptor FIFOs */ 1402 for (this_idx = 0; this_idx < num_engines; ++this_idx) { 1403 sde = &dd->per_sdma[this_idx]; 1404 sde->dd = dd; 1405 sde->ppd = ppd; 1406 sde->this_idx = this_idx; 1407 sde->descq_cnt = descq_cnt; 1408 sde->desc_avail = sdma_descq_freecnt(sde); 1409 sde->sdma_shift = ilog2(descq_cnt); 1410 sde->sdma_mask = (1 << sde->sdma_shift) - 1; 1411 1412 /* Create a mask specifically for each interrupt source */ 1413 sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES + 1414 this_idx); 1415 sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES + 1416 this_idx); 1417 sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES + 1418 this_idx); 1419 /* Create a combined mask to cover all 3 interrupt sources */ 1420 sde->imask = sde->int_mask | sde->progress_mask | 1421 sde->idle_mask; 1422 1423 spin_lock_init(&sde->tail_lock); 1424 seqlock_init(&sde->head_lock); 1425 spin_lock_init(&sde->senddmactrl_lock); 1426 spin_lock_init(&sde->flushlist_lock); 1427 seqlock_init(&sde->waitlock); 1428 /* insure there is always a zero bit */ 1429 sde->ahg_bits = 0xfffffffe00000000ULL; 1430 1431 sdma_set_state(sde, sdma_state_s00_hw_down); 1432 1433 /* set up reference counting */ 1434 kref_init(&sde->state.kref); 1435 init_completion(&sde->state.comp); 1436 1437 INIT_LIST_HEAD(&sde->flushlist); 1438 INIT_LIST_HEAD(&sde->dmawait); 1439 1440 sde->tail_csr = 1441 get_kctxt_csr_addr(dd, this_idx, SD(TAIL)); 1442 1443 tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task, 1444 (unsigned long)sde); 1445 1446 tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task, 1447 (unsigned long)sde); 1448 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); 1449 INIT_WORK(&sde->flush_worker, sdma_field_flush); 1450 1451 sde->progress_check_head = 0; 1452 1453 timer_setup(&sde->err_progress_check_timer, 1454 sdma_err_progress_check, 0); 1455 1456 sde->descq = dma_zalloc_coherent( 1457 &dd->pcidev->dev, 1458 descq_cnt * sizeof(u64[2]), 1459 &sde->descq_phys, 1460 GFP_KERNEL 1461 ); 1462 if (!sde->descq) 1463 goto bail; 1464 sde->tx_ring = 1465 kvzalloc_node(array_size(descq_cnt, 1466 sizeof(struct sdma_txreq *)), 1467 GFP_KERNEL, dd->node); 1468 if (!sde->tx_ring) 1469 goto bail; 1470 } 1471 1472 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; 1473 /* Allocate memory for DMA of head registers to memory */ 1474 dd->sdma_heads_dma = dma_zalloc_coherent( 1475 &dd->pcidev->dev, 1476 dd->sdma_heads_size, 1477 &dd->sdma_heads_phys, 1478 GFP_KERNEL 1479 ); 1480 if (!dd->sdma_heads_dma) { 1481 dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); 1482 goto bail; 1483 } 1484 1485 /* Allocate memory for pad */ 1486 dd->sdma_pad_dma = dma_zalloc_coherent( 1487 &dd->pcidev->dev, 1488 sizeof(u32), 1489 &dd->sdma_pad_phys, 1490 GFP_KERNEL 1491 ); 1492 if (!dd->sdma_pad_dma) { 1493 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); 1494 goto bail; 1495 } 1496 1497 /* assign each engine to different cacheline and init registers */ 1498 curr_head = (void *)dd->sdma_heads_dma; 1499 for (this_idx = 0; this_idx < num_engines; ++this_idx) { 1500 unsigned long phys_offset; 1501 1502 sde = &dd->per_sdma[this_idx]; 1503 1504 sde->head_dma = curr_head; 1505 curr_head += L1_CACHE_BYTES; 1506 phys_offset = (unsigned long)sde->head_dma - 1507 (unsigned long)dd->sdma_heads_dma; 1508 sde->head_phys = dd->sdma_heads_phys + phys_offset; 1509 init_sdma_regs(sde, per_sdma_credits, idle_cnt); 1510 } 1511 dd->flags |= HFI1_HAS_SEND_DMA; 1512 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0; 1513 dd->num_sdma = num_engines; 1514 ret = sdma_map_init(dd, port, ppd->vls_operational, NULL); 1515 if (ret < 0) 1516 goto bail; 1517 1518 tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL); 1519 if (!tmp_sdma_rht) { 1520 ret = -ENOMEM; 1521 goto bail; 1522 } 1523 1524 ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params); 1525 if (ret < 0) 1526 goto bail; 1527 dd->sdma_rht = tmp_sdma_rht; 1528 1529 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma); 1530 return 0; 1531 1532 bail: 1533 sdma_clean(dd, num_engines); 1534 return ret; 1535 } 1536 1537 /** 1538 * sdma_all_running() - called when the link goes up 1539 * @dd: hfi1_devdata 1540 * 1541 * This routine moves all engines to the running state. 1542 */ 1543 void sdma_all_running(struct hfi1_devdata *dd) 1544 { 1545 struct sdma_engine *sde; 1546 unsigned int i; 1547 1548 /* move all engines to running */ 1549 for (i = 0; i < dd->num_sdma; ++i) { 1550 sde = &dd->per_sdma[i]; 1551 sdma_process_event(sde, sdma_event_e30_go_running); 1552 } 1553 } 1554 1555 /** 1556 * sdma_all_idle() - called when the link goes down 1557 * @dd: hfi1_devdata 1558 * 1559 * This routine moves all engines to the idle state. 1560 */ 1561 void sdma_all_idle(struct hfi1_devdata *dd) 1562 { 1563 struct sdma_engine *sde; 1564 unsigned int i; 1565 1566 /* idle all engines */ 1567 for (i = 0; i < dd->num_sdma; ++i) { 1568 sde = &dd->per_sdma[i]; 1569 sdma_process_event(sde, sdma_event_e70_go_idle); 1570 } 1571 } 1572 1573 /** 1574 * sdma_start() - called to kick off state processing for all engines 1575 * @dd: hfi1_devdata 1576 * 1577 * This routine is for kicking off the state processing for all required 1578 * sdma engines. Interrupts need to be working at this point. 1579 * 1580 */ 1581 void sdma_start(struct hfi1_devdata *dd) 1582 { 1583 unsigned i; 1584 struct sdma_engine *sde; 1585 1586 /* kick off the engines state processing */ 1587 for (i = 0; i < dd->num_sdma; ++i) { 1588 sde = &dd->per_sdma[i]; 1589 sdma_process_event(sde, sdma_event_e10_go_hw_start); 1590 } 1591 } 1592 1593 /** 1594 * sdma_exit() - used when module is removed 1595 * @dd: hfi1_devdata 1596 */ 1597 void sdma_exit(struct hfi1_devdata *dd) 1598 { 1599 unsigned this_idx; 1600 struct sdma_engine *sde; 1601 1602 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma; 1603 ++this_idx) { 1604 sde = &dd->per_sdma[this_idx]; 1605 if (!list_empty(&sde->dmawait)) 1606 dd_dev_err(dd, "sde %u: dmawait list not empty!\n", 1607 sde->this_idx); 1608 sdma_process_event(sde, sdma_event_e00_go_hw_down); 1609 1610 del_timer_sync(&sde->err_progress_check_timer); 1611 1612 /* 1613 * This waits for the state machine to exit so it is not 1614 * necessary to kill the sdma_sw_clean_up_task to make sure 1615 * it is not running. 1616 */ 1617 sdma_finalput(&sde->state); 1618 } 1619 } 1620 1621 /* 1622 * unmap the indicated descriptor 1623 */ 1624 static inline void sdma_unmap_desc( 1625 struct hfi1_devdata *dd, 1626 struct sdma_desc *descp) 1627 { 1628 switch (sdma_mapping_type(descp)) { 1629 case SDMA_MAP_SINGLE: 1630 dma_unmap_single( 1631 &dd->pcidev->dev, 1632 sdma_mapping_addr(descp), 1633 sdma_mapping_len(descp), 1634 DMA_TO_DEVICE); 1635 break; 1636 case SDMA_MAP_PAGE: 1637 dma_unmap_page( 1638 &dd->pcidev->dev, 1639 sdma_mapping_addr(descp), 1640 sdma_mapping_len(descp), 1641 DMA_TO_DEVICE); 1642 break; 1643 } 1644 } 1645 1646 /* 1647 * return the mode as indicated by the first 1648 * descriptor in the tx. 1649 */ 1650 static inline u8 ahg_mode(struct sdma_txreq *tx) 1651 { 1652 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK) 1653 >> SDMA_DESC1_HEADER_MODE_SHIFT; 1654 } 1655 1656 /** 1657 * __sdma_txclean() - clean tx of mappings, descp *kmalloc's 1658 * @dd: hfi1_devdata for unmapping 1659 * @tx: tx request to clean 1660 * 1661 * This is used in the progress routine to clean the tx or 1662 * by the ULP to toss an in-process tx build. 1663 * 1664 * The code can be called multiple times without issue. 1665 * 1666 */ 1667 void __sdma_txclean( 1668 struct hfi1_devdata *dd, 1669 struct sdma_txreq *tx) 1670 { 1671 u16 i; 1672 1673 if (tx->num_desc) { 1674 u8 skip = 0, mode = ahg_mode(tx); 1675 1676 /* unmap first */ 1677 sdma_unmap_desc(dd, &tx->descp[0]); 1678 /* determine number of AHG descriptors to skip */ 1679 if (mode > SDMA_AHG_APPLY_UPDATE1) 1680 skip = mode >> 1; 1681 for (i = 1 + skip; i < tx->num_desc; i++) 1682 sdma_unmap_desc(dd, &tx->descp[i]); 1683 tx->num_desc = 0; 1684 } 1685 kfree(tx->coalesce_buf); 1686 tx->coalesce_buf = NULL; 1687 /* kmalloc'ed descp */ 1688 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) { 1689 tx->desc_limit = ARRAY_SIZE(tx->descs); 1690 kfree(tx->descp); 1691 } 1692 } 1693 1694 static inline u16 sdma_gethead(struct sdma_engine *sde) 1695 { 1696 struct hfi1_devdata *dd = sde->dd; 1697 int use_dmahead; 1698 u16 hwhead; 1699 1700 #ifdef CONFIG_SDMA_VERBOSITY 1701 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 1702 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 1703 #endif 1704 1705 retry: 1706 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) && 1707 (dd->flags & HFI1_HAS_SDMA_TIMEOUT); 1708 hwhead = use_dmahead ? 1709 (u16)le64_to_cpu(*sde->head_dma) : 1710 (u16)read_sde_csr(sde, SD(HEAD)); 1711 1712 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) { 1713 u16 cnt; 1714 u16 swtail; 1715 u16 swhead; 1716 int sane; 1717 1718 swhead = sde->descq_head & sde->sdma_mask; 1719 /* this code is really bad for cache line trading */ 1720 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; 1721 cnt = sde->descq_cnt; 1722 1723 if (swhead < swtail) 1724 /* not wrapped */ 1725 sane = (hwhead >= swhead) & (hwhead <= swtail); 1726 else if (swhead > swtail) 1727 /* wrapped around */ 1728 sane = ((hwhead >= swhead) && (hwhead < cnt)) || 1729 (hwhead <= swtail); 1730 else 1731 /* empty */ 1732 sane = (hwhead == swhead); 1733 1734 if (unlikely(!sane)) { 1735 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n", 1736 sde->this_idx, 1737 use_dmahead ? "dma" : "kreg", 1738 hwhead, swhead, swtail, cnt); 1739 if (use_dmahead) { 1740 /* try one more time, using csr */ 1741 use_dmahead = 0; 1742 goto retry; 1743 } 1744 /* proceed as if no progress */ 1745 hwhead = swhead; 1746 } 1747 } 1748 return hwhead; 1749 } 1750 1751 /* 1752 * This is called when there are send DMA descriptors that might be 1753 * available. 1754 * 1755 * This is called with head_lock held. 1756 */ 1757 static void sdma_desc_avail(struct sdma_engine *sde, uint avail) 1758 { 1759 struct iowait *wait, *nw; 1760 struct iowait *waits[SDMA_WAIT_BATCH_SIZE]; 1761 uint i, n = 0, seq, max_idx = 0; 1762 u8 max_starved_cnt = 0; 1763 1764 #ifdef CONFIG_SDMA_VERBOSITY 1765 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 1766 slashstrip(__FILE__), __LINE__, __func__); 1767 dd_dev_err(sde->dd, "avail: %u\n", avail); 1768 #endif 1769 1770 do { 1771 seq = read_seqbegin(&sde->waitlock); 1772 if (!list_empty(&sde->dmawait)) { 1773 /* at least one item */ 1774 write_seqlock(&sde->waitlock); 1775 /* Harvest waiters wanting DMA descriptors */ 1776 list_for_each_entry_safe( 1777 wait, 1778 nw, 1779 &sde->dmawait, 1780 list) { 1781 u32 num_desc; 1782 1783 if (!wait->wakeup) 1784 continue; 1785 if (n == ARRAY_SIZE(waits)) 1786 break; 1787 num_desc = iowait_get_all_desc(wait); 1788 if (num_desc > avail) 1789 break; 1790 avail -= num_desc; 1791 /* Find the most starved wait memeber */ 1792 iowait_starve_find_max(wait, &max_starved_cnt, 1793 n, &max_idx); 1794 list_del_init(&wait->list); 1795 waits[n++] = wait; 1796 } 1797 write_sequnlock(&sde->waitlock); 1798 break; 1799 } 1800 } while (read_seqretry(&sde->waitlock, seq)); 1801 1802 /* Schedule the most starved one first */ 1803 if (n) 1804 waits[max_idx]->wakeup(waits[max_idx], SDMA_AVAIL_REASON); 1805 1806 for (i = 0; i < n; i++) 1807 if (i != max_idx) 1808 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON); 1809 } 1810 1811 /* head_lock must be held */ 1812 static void sdma_make_progress(struct sdma_engine *sde, u64 status) 1813 { 1814 struct sdma_txreq *txp = NULL; 1815 int progress = 0; 1816 u16 hwhead, swhead; 1817 int idle_check_done = 0; 1818 1819 hwhead = sdma_gethead(sde); 1820 1821 /* The reason for some of the complexity of this code is that 1822 * not all descriptors have corresponding txps. So, we have to 1823 * be able to skip over descs until we wander into the range of 1824 * the next txp on the list. 1825 */ 1826 1827 retry: 1828 txp = get_txhead(sde); 1829 swhead = sde->descq_head & sde->sdma_mask; 1830 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); 1831 while (swhead != hwhead) { 1832 /* advance head, wrap if needed */ 1833 swhead = ++sde->descq_head & sde->sdma_mask; 1834 1835 /* if now past this txp's descs, do the callback */ 1836 if (txp && txp->next_descq_idx == swhead) { 1837 /* remove from list */ 1838 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; 1839 complete_tx(sde, txp, SDMA_TXREQ_S_OK); 1840 /* see if there is another txp */ 1841 txp = get_txhead(sde); 1842 } 1843 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); 1844 progress++; 1845 } 1846 1847 /* 1848 * The SDMA idle interrupt is not guaranteed to be ordered with respect 1849 * to updates to the the dma_head location in host memory. The head 1850 * value read might not be fully up to date. If there are pending 1851 * descriptors and the SDMA idle interrupt fired then read from the 1852 * CSR SDMA head instead to get the latest value from the hardware. 1853 * The hardware SDMA head should be read at most once in this invocation 1854 * of sdma_make_progress(..) which is ensured by idle_check_done flag 1855 */ 1856 if ((status & sde->idle_mask) && !idle_check_done) { 1857 u16 swtail; 1858 1859 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; 1860 if (swtail != hwhead) { 1861 hwhead = (u16)read_sde_csr(sde, SD(HEAD)); 1862 idle_check_done = 1; 1863 goto retry; 1864 } 1865 } 1866 1867 sde->last_status = status; 1868 if (progress) 1869 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); 1870 } 1871 1872 /* 1873 * sdma_engine_interrupt() - interrupt handler for engine 1874 * @sde: sdma engine 1875 * @status: sdma interrupt reason 1876 * 1877 * Status is a mask of the 3 possible interrupts for this engine. It will 1878 * contain bits _only_ for this SDMA engine. It will contain at least one 1879 * bit, it may contain more. 1880 */ 1881 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status) 1882 { 1883 trace_hfi1_sdma_engine_interrupt(sde, status); 1884 write_seqlock(&sde->head_lock); 1885 sdma_set_desc_cnt(sde, sdma_desct_intr); 1886 if (status & sde->idle_mask) 1887 sde->idle_int_cnt++; 1888 else if (status & sde->progress_mask) 1889 sde->progress_int_cnt++; 1890 else if (status & sde->int_mask) 1891 sde->sdma_int_cnt++; 1892 sdma_make_progress(sde, status); 1893 write_sequnlock(&sde->head_lock); 1894 } 1895 1896 /** 1897 * sdma_engine_error() - error handler for engine 1898 * @sde: sdma engine 1899 * @status: sdma interrupt reason 1900 */ 1901 void sdma_engine_error(struct sdma_engine *sde, u64 status) 1902 { 1903 unsigned long flags; 1904 1905 #ifdef CONFIG_SDMA_VERBOSITY 1906 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n", 1907 sde->this_idx, 1908 (unsigned long long)status, 1909 sdma_state_names[sde->state.current_state]); 1910 #endif 1911 spin_lock_irqsave(&sde->tail_lock, flags); 1912 write_seqlock(&sde->head_lock); 1913 if (status & ALL_SDMA_ENG_HALT_ERRS) 1914 __sdma_process_event(sde, sdma_event_e60_hw_halted); 1915 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) { 1916 dd_dev_err(sde->dd, 1917 "SDMA (%u) engine error: 0x%llx state %s\n", 1918 sde->this_idx, 1919 (unsigned long long)status, 1920 sdma_state_names[sde->state.current_state]); 1921 dump_sdma_state(sde); 1922 } 1923 write_sequnlock(&sde->head_lock); 1924 spin_unlock_irqrestore(&sde->tail_lock, flags); 1925 } 1926 1927 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op) 1928 { 1929 u64 set_senddmactrl = 0; 1930 u64 clr_senddmactrl = 0; 1931 unsigned long flags; 1932 1933 #ifdef CONFIG_SDMA_VERBOSITY 1934 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n", 1935 sde->this_idx, 1936 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0, 1937 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0, 1938 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0, 1939 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0); 1940 #endif 1941 1942 if (op & SDMA_SENDCTRL_OP_ENABLE) 1943 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); 1944 else 1945 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); 1946 1947 if (op & SDMA_SENDCTRL_OP_INTENABLE) 1948 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); 1949 else 1950 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); 1951 1952 if (op & SDMA_SENDCTRL_OP_HALT) 1953 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); 1954 else 1955 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); 1956 1957 spin_lock_irqsave(&sde->senddmactrl_lock, flags); 1958 1959 sde->p_senddmactrl |= set_senddmactrl; 1960 sde->p_senddmactrl &= ~clr_senddmactrl; 1961 1962 if (op & SDMA_SENDCTRL_OP_CLEANUP) 1963 write_sde_csr(sde, SD(CTRL), 1964 sde->p_senddmactrl | 1965 SD(CTRL_SDMA_CLEANUP_SMASK)); 1966 else 1967 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl); 1968 1969 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags); 1970 1971 #ifdef CONFIG_SDMA_VERBOSITY 1972 sdma_dumpstate(sde); 1973 #endif 1974 } 1975 1976 static void sdma_setlengen(struct sdma_engine *sde) 1977 { 1978 #ifdef CONFIG_SDMA_VERBOSITY 1979 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 1980 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 1981 #endif 1982 1983 /* 1984 * Set SendDmaLenGen and clear-then-set the MSB of the generation 1985 * count to enable generation checking and load the internal 1986 * generation counter. 1987 */ 1988 write_sde_csr(sde, SD(LEN_GEN), 1989 (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)); 1990 write_sde_csr(sde, SD(LEN_GEN), 1991 ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) | 1992 (4ULL << SD(LEN_GEN_GENERATION_SHIFT))); 1993 } 1994 1995 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail) 1996 { 1997 /* Commit writes to memory and advance the tail on the chip */ 1998 smp_wmb(); /* see get_txhead() */ 1999 writeq(tail, sde->tail_csr); 2000 } 2001 2002 /* 2003 * This is called when changing to state s10_hw_start_up_halt_wait as 2004 * a result of send buffer errors or send DMA descriptor errors. 2005 */ 2006 static void sdma_hw_start_up(struct sdma_engine *sde) 2007 { 2008 u64 reg; 2009 2010 #ifdef CONFIG_SDMA_VERBOSITY 2011 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 2012 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 2013 #endif 2014 2015 sdma_setlengen(sde); 2016 sdma_update_tail(sde, 0); /* Set SendDmaTail */ 2017 *sde->head_dma = 0; 2018 2019 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) << 2020 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT); 2021 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); 2022 } 2023 2024 /* 2025 * set_sdma_integrity 2026 * 2027 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'. 2028 */ 2029 static void set_sdma_integrity(struct sdma_engine *sde) 2030 { 2031 struct hfi1_devdata *dd = sde->dd; 2032 2033 write_sde_csr(sde, SD(CHECK_ENABLE), 2034 hfi1_pkt_base_sdma_integrity(dd)); 2035 } 2036 2037 static void init_sdma_regs( 2038 struct sdma_engine *sde, 2039 u32 credits, 2040 uint idle_cnt) 2041 { 2042 u8 opval, opmask; 2043 #ifdef CONFIG_SDMA_VERBOSITY 2044 struct hfi1_devdata *dd = sde->dd; 2045 2046 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", 2047 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 2048 #endif 2049 2050 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys); 2051 sdma_setlengen(sde); 2052 sdma_update_tail(sde, 0); /* Set SendDmaTail */ 2053 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt); 2054 write_sde_csr(sde, SD(DESC_CNT), 0); 2055 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys); 2056 write_sde_csr(sde, SD(MEMORY), 2057 ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) | 2058 ((u64)(credits * sde->this_idx) << 2059 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT))); 2060 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull); 2061 set_sdma_integrity(sde); 2062 opmask = OPCODE_CHECK_MASK_DISABLED; 2063 opval = OPCODE_CHECK_VAL_DISABLED; 2064 write_sde_csr(sde, SD(CHECK_OPCODE), 2065 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) | 2066 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT)); 2067 } 2068 2069 #ifdef CONFIG_SDMA_VERBOSITY 2070 2071 #define sdma_dumpstate_helper0(reg) do { \ 2072 csr = read_csr(sde->dd, reg); \ 2073 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \ 2074 } while (0) 2075 2076 #define sdma_dumpstate_helper(reg) do { \ 2077 csr = read_sde_csr(sde, reg); \ 2078 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \ 2079 #reg, sde->this_idx, csr); \ 2080 } while (0) 2081 2082 #define sdma_dumpstate_helper2(reg) do { \ 2083 csr = read_csr(sde->dd, reg + (8 * i)); \ 2084 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \ 2085 #reg, i, csr); \ 2086 } while (0) 2087 2088 void sdma_dumpstate(struct sdma_engine *sde) 2089 { 2090 u64 csr; 2091 unsigned i; 2092 2093 sdma_dumpstate_helper(SD(CTRL)); 2094 sdma_dumpstate_helper(SD(STATUS)); 2095 sdma_dumpstate_helper0(SD(ERR_STATUS)); 2096 sdma_dumpstate_helper0(SD(ERR_MASK)); 2097 sdma_dumpstate_helper(SD(ENG_ERR_STATUS)); 2098 sdma_dumpstate_helper(SD(ENG_ERR_MASK)); 2099 2100 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) { 2101 sdma_dumpstate_helper2(CCE_INT_STATUS); 2102 sdma_dumpstate_helper2(CCE_INT_MASK); 2103 sdma_dumpstate_helper2(CCE_INT_BLOCKED); 2104 } 2105 2106 sdma_dumpstate_helper(SD(TAIL)); 2107 sdma_dumpstate_helper(SD(HEAD)); 2108 sdma_dumpstate_helper(SD(PRIORITY_THLD)); 2109 sdma_dumpstate_helper(SD(IDLE_CNT)); 2110 sdma_dumpstate_helper(SD(RELOAD_CNT)); 2111 sdma_dumpstate_helper(SD(DESC_CNT)); 2112 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT)); 2113 sdma_dumpstate_helper(SD(MEMORY)); 2114 sdma_dumpstate_helper0(SD(ENGINES)); 2115 sdma_dumpstate_helper0(SD(MEM_SIZE)); 2116 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */ 2117 sdma_dumpstate_helper(SD(BASE_ADDR)); 2118 sdma_dumpstate_helper(SD(LEN_GEN)); 2119 sdma_dumpstate_helper(SD(HEAD_ADDR)); 2120 sdma_dumpstate_helper(SD(CHECK_ENABLE)); 2121 sdma_dumpstate_helper(SD(CHECK_VL)); 2122 sdma_dumpstate_helper(SD(CHECK_JOB_KEY)); 2123 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY)); 2124 sdma_dumpstate_helper(SD(CHECK_SLID)); 2125 sdma_dumpstate_helper(SD(CHECK_OPCODE)); 2126 } 2127 #endif 2128 2129 static void dump_sdma_state(struct sdma_engine *sde) 2130 { 2131 struct hw_sdma_desc *descqp; 2132 u64 desc[2]; 2133 u64 addr; 2134 u8 gen; 2135 u16 len; 2136 u16 head, tail, cnt; 2137 2138 head = sde->descq_head & sde->sdma_mask; 2139 tail = sde->descq_tail & sde->sdma_mask; 2140 cnt = sdma_descq_freecnt(sde); 2141 2142 dd_dev_err(sde->dd, 2143 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n", 2144 sde->this_idx, head, tail, cnt, 2145 !list_empty(&sde->flushlist)); 2146 2147 /* print info for each entry in the descriptor queue */ 2148 while (head != tail) { 2149 char flags[6] = { 'x', 'x', 'x', 'x', 0 }; 2150 2151 descqp = &sde->descq[head]; 2152 desc[0] = le64_to_cpu(descqp->qw[0]); 2153 desc[1] = le64_to_cpu(descqp->qw[1]); 2154 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; 2155 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 2156 'H' : '-'; 2157 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; 2158 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; 2159 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) 2160 & SDMA_DESC0_PHY_ADDR_MASK; 2161 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) 2162 & SDMA_DESC1_GENERATION_MASK; 2163 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) 2164 & SDMA_DESC0_BYTE_COUNT_MASK; 2165 dd_dev_err(sde->dd, 2166 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", 2167 head, flags, addr, gen, len); 2168 dd_dev_err(sde->dd, 2169 "\tdesc0:0x%016llx desc1 0x%016llx\n", 2170 desc[0], desc[1]); 2171 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) 2172 dd_dev_err(sde->dd, 2173 "\taidx: %u amode: %u alen: %u\n", 2174 (u8)((desc[1] & 2175 SDMA_DESC1_HEADER_INDEX_SMASK) >> 2176 SDMA_DESC1_HEADER_INDEX_SHIFT), 2177 (u8)((desc[1] & 2178 SDMA_DESC1_HEADER_MODE_SMASK) >> 2179 SDMA_DESC1_HEADER_MODE_SHIFT), 2180 (u8)((desc[1] & 2181 SDMA_DESC1_HEADER_DWS_SMASK) >> 2182 SDMA_DESC1_HEADER_DWS_SHIFT)); 2183 head++; 2184 head &= sde->sdma_mask; 2185 } 2186 } 2187 2188 #define SDE_FMT \ 2189 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n" 2190 /** 2191 * sdma_seqfile_dump_sde() - debugfs dump of sde 2192 * @s: seq file 2193 * @sde: send dma engine to dump 2194 * 2195 * This routine dumps the sde to the indicated seq file. 2196 */ 2197 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) 2198 { 2199 u16 head, tail; 2200 struct hw_sdma_desc *descqp; 2201 u64 desc[2]; 2202 u64 addr; 2203 u8 gen; 2204 u16 len; 2205 2206 head = sde->descq_head & sde->sdma_mask; 2207 tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; 2208 seq_printf(s, SDE_FMT, sde->this_idx, 2209 sde->cpu, 2210 sdma_state_name(sde->state.current_state), 2211 (unsigned long long)read_sde_csr(sde, SD(CTRL)), 2212 (unsigned long long)read_sde_csr(sde, SD(STATUS)), 2213 (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)), 2214 (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail, 2215 (unsigned long long)read_sde_csr(sde, SD(HEAD)), head, 2216 (unsigned long long)le64_to_cpu(*sde->head_dma), 2217 (unsigned long long)read_sde_csr(sde, SD(MEMORY)), 2218 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)), 2219 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)), 2220 (unsigned long long)sde->last_status, 2221 (unsigned long long)sde->ahg_bits, 2222 sde->tx_tail, 2223 sde->tx_head, 2224 sde->descq_tail, 2225 sde->descq_head, 2226 !list_empty(&sde->flushlist), 2227 sde->descq_full_count, 2228 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID)); 2229 2230 /* print info for each entry in the descriptor queue */ 2231 while (head != tail) { 2232 char flags[6] = { 'x', 'x', 'x', 'x', 0 }; 2233 2234 descqp = &sde->descq[head]; 2235 desc[0] = le64_to_cpu(descqp->qw[0]); 2236 desc[1] = le64_to_cpu(descqp->qw[1]); 2237 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; 2238 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 2239 'H' : '-'; 2240 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; 2241 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; 2242 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) 2243 & SDMA_DESC0_PHY_ADDR_MASK; 2244 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) 2245 & SDMA_DESC1_GENERATION_MASK; 2246 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) 2247 & SDMA_DESC0_BYTE_COUNT_MASK; 2248 seq_printf(s, 2249 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", 2250 head, flags, addr, gen, len); 2251 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) 2252 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n", 2253 (u8)((desc[1] & 2254 SDMA_DESC1_HEADER_INDEX_SMASK) >> 2255 SDMA_DESC1_HEADER_INDEX_SHIFT), 2256 (u8)((desc[1] & 2257 SDMA_DESC1_HEADER_MODE_SMASK) >> 2258 SDMA_DESC1_HEADER_MODE_SHIFT)); 2259 head = (head + 1) & sde->sdma_mask; 2260 } 2261 } 2262 2263 /* 2264 * add the generation number into 2265 * the qw1 and return 2266 */ 2267 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1) 2268 { 2269 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3; 2270 2271 qw1 &= ~SDMA_DESC1_GENERATION_SMASK; 2272 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK) 2273 << SDMA_DESC1_GENERATION_SHIFT; 2274 return qw1; 2275 } 2276 2277 /* 2278 * This routine submits the indicated tx 2279 * 2280 * Space has already been guaranteed and 2281 * tail side of ring is locked. 2282 * 2283 * The hardware tail update is done 2284 * in the caller and that is facilitated 2285 * by returning the new tail. 2286 * 2287 * There is special case logic for ahg 2288 * to not add the generation number for 2289 * up to 2 descriptors that follow the 2290 * first descriptor. 2291 * 2292 */ 2293 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx) 2294 { 2295 int i; 2296 u16 tail; 2297 struct sdma_desc *descp = tx->descp; 2298 u8 skip = 0, mode = ahg_mode(tx); 2299 2300 tail = sde->descq_tail & sde->sdma_mask; 2301 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); 2302 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1])); 2303 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1], 2304 tail, &sde->descq[tail]); 2305 tail = ++sde->descq_tail & sde->sdma_mask; 2306 descp++; 2307 if (mode > SDMA_AHG_APPLY_UPDATE1) 2308 skip = mode >> 1; 2309 for (i = 1; i < tx->num_desc; i++, descp++) { 2310 u64 qw1; 2311 2312 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); 2313 if (skip) { 2314 /* edits don't have generation */ 2315 qw1 = descp->qw[1]; 2316 skip--; 2317 } else { 2318 /* replace generation with real one for non-edits */ 2319 qw1 = add_gen(sde, descp->qw[1]); 2320 } 2321 sde->descq[tail].qw[1] = cpu_to_le64(qw1); 2322 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1, 2323 tail, &sde->descq[tail]); 2324 tail = ++sde->descq_tail & sde->sdma_mask; 2325 } 2326 tx->next_descq_idx = tail; 2327 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 2328 tx->sn = sde->tail_sn++; 2329 trace_hfi1_sdma_in_sn(sde, tx->sn); 2330 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]); 2331 #endif 2332 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx; 2333 sde->desc_avail -= tx->num_desc; 2334 return tail; 2335 } 2336 2337 /* 2338 * Check for progress 2339 */ 2340 static int sdma_check_progress( 2341 struct sdma_engine *sde, 2342 struct iowait_work *wait, 2343 struct sdma_txreq *tx, 2344 bool pkts_sent) 2345 { 2346 int ret; 2347 2348 sde->desc_avail = sdma_descq_freecnt(sde); 2349 if (tx->num_desc <= sde->desc_avail) 2350 return -EAGAIN; 2351 /* pulse the head_lock */ 2352 if (wait && iowait_ioww_to_iow(wait)->sleep) { 2353 unsigned seq; 2354 2355 seq = raw_seqcount_begin( 2356 (const seqcount_t *)&sde->head_lock.seqcount); 2357 ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent); 2358 if (ret == -EAGAIN) 2359 sde->desc_avail = sdma_descq_freecnt(sde); 2360 } else { 2361 ret = -EBUSY; 2362 } 2363 return ret; 2364 } 2365 2366 /** 2367 * sdma_send_txreq() - submit a tx req to ring 2368 * @sde: sdma engine to use 2369 * @wait: SE wait structure to use when full (may be NULL) 2370 * @tx: sdma_txreq to submit 2371 * @pkts_sent: has any packet been sent yet? 2372 * 2373 * The call submits the tx into the ring. If a iowait structure is non-NULL 2374 * the packet will be queued to the list in wait. 2375 * 2376 * Return: 2377 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in 2378 * ring (wait == NULL) 2379 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state 2380 */ 2381 int sdma_send_txreq(struct sdma_engine *sde, 2382 struct iowait_work *wait, 2383 struct sdma_txreq *tx, 2384 bool pkts_sent) 2385 { 2386 int ret = 0; 2387 u16 tail; 2388 unsigned long flags; 2389 2390 /* user should have supplied entire packet */ 2391 if (unlikely(tx->tlen)) 2392 return -EINVAL; 2393 tx->wait = iowait_ioww_to_iow(wait); 2394 spin_lock_irqsave(&sde->tail_lock, flags); 2395 retry: 2396 if (unlikely(!__sdma_running(sde))) 2397 goto unlock_noconn; 2398 if (unlikely(tx->num_desc > sde->desc_avail)) 2399 goto nodesc; 2400 tail = submit_tx(sde, tx); 2401 if (wait) 2402 iowait_sdma_inc(iowait_ioww_to_iow(wait)); 2403 sdma_update_tail(sde, tail); 2404 unlock: 2405 spin_unlock_irqrestore(&sde->tail_lock, flags); 2406 return ret; 2407 unlock_noconn: 2408 if (wait) 2409 iowait_sdma_inc(iowait_ioww_to_iow(wait)); 2410 tx->next_descq_idx = 0; 2411 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 2412 tx->sn = sde->tail_sn++; 2413 trace_hfi1_sdma_in_sn(sde, tx->sn); 2414 #endif 2415 spin_lock(&sde->flushlist_lock); 2416 list_add_tail(&tx->list, &sde->flushlist); 2417 spin_unlock(&sde->flushlist_lock); 2418 iowait_inc_wait_count(wait, tx->num_desc); 2419 schedule_work(&sde->flush_worker); 2420 ret = -ECOMM; 2421 goto unlock; 2422 nodesc: 2423 ret = sdma_check_progress(sde, wait, tx, pkts_sent); 2424 if (ret == -EAGAIN) { 2425 ret = 0; 2426 goto retry; 2427 } 2428 sde->descq_full_count++; 2429 goto unlock; 2430 } 2431 2432 /** 2433 * sdma_send_txlist() - submit a list of tx req to ring 2434 * @sde: sdma engine to use 2435 * @wait: SE wait structure to use when full (may be NULL) 2436 * @tx_list: list of sdma_txreqs to submit 2437 * @count: pointer to a u16 which, after return will contain the total number of 2438 * sdma_txreqs removed from the tx_list. This will include sdma_txreqs 2439 * whose SDMA descriptors are submitted to the ring and the sdma_txreqs 2440 * which are added to SDMA engine flush list if the SDMA engine state is 2441 * not running. 2442 * 2443 * The call submits the list into the ring. 2444 * 2445 * If the iowait structure is non-NULL and not equal to the iowait list 2446 * the unprocessed part of the list will be appended to the list in wait. 2447 * 2448 * In all cases, the tx_list will be updated so the head of the tx_list is 2449 * the list of descriptors that have yet to be transmitted. 2450 * 2451 * The intent of this call is to provide a more efficient 2452 * way of submitting multiple packets to SDMA while holding the tail 2453 * side locking. 2454 * 2455 * Return: 2456 * 0 - Success, 2457 * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL) 2458 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state 2459 */ 2460 int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait, 2461 struct list_head *tx_list, u16 *count_out) 2462 { 2463 struct sdma_txreq *tx, *tx_next; 2464 int ret = 0; 2465 unsigned long flags; 2466 u16 tail = INVALID_TAIL; 2467 u32 submit_count = 0, flush_count = 0, total_count; 2468 2469 spin_lock_irqsave(&sde->tail_lock, flags); 2470 retry: 2471 list_for_each_entry_safe(tx, tx_next, tx_list, list) { 2472 tx->wait = iowait_ioww_to_iow(wait); 2473 if (unlikely(!__sdma_running(sde))) 2474 goto unlock_noconn; 2475 if (unlikely(tx->num_desc > sde->desc_avail)) 2476 goto nodesc; 2477 if (unlikely(tx->tlen)) { 2478 ret = -EINVAL; 2479 goto update_tail; 2480 } 2481 list_del_init(&tx->list); 2482 tail = submit_tx(sde, tx); 2483 submit_count++; 2484 if (tail != INVALID_TAIL && 2485 (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) { 2486 sdma_update_tail(sde, tail); 2487 tail = INVALID_TAIL; 2488 } 2489 } 2490 update_tail: 2491 total_count = submit_count + flush_count; 2492 if (wait) { 2493 iowait_sdma_add(iowait_ioww_to_iow(wait), total_count); 2494 iowait_starve_clear(submit_count > 0, 2495 iowait_ioww_to_iow(wait)); 2496 } 2497 if (tail != INVALID_TAIL) 2498 sdma_update_tail(sde, tail); 2499 spin_unlock_irqrestore(&sde->tail_lock, flags); 2500 *count_out = total_count; 2501 return ret; 2502 unlock_noconn: 2503 spin_lock(&sde->flushlist_lock); 2504 list_for_each_entry_safe(tx, tx_next, tx_list, list) { 2505 tx->wait = iowait_ioww_to_iow(wait); 2506 list_del_init(&tx->list); 2507 tx->next_descq_idx = 0; 2508 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 2509 tx->sn = sde->tail_sn++; 2510 trace_hfi1_sdma_in_sn(sde, tx->sn); 2511 #endif 2512 list_add_tail(&tx->list, &sde->flushlist); 2513 flush_count++; 2514 iowait_inc_wait_count(wait, tx->num_desc); 2515 } 2516 spin_unlock(&sde->flushlist_lock); 2517 schedule_work(&sde->flush_worker); 2518 ret = -ECOMM; 2519 goto update_tail; 2520 nodesc: 2521 ret = sdma_check_progress(sde, wait, tx, submit_count > 0); 2522 if (ret == -EAGAIN) { 2523 ret = 0; 2524 goto retry; 2525 } 2526 sde->descq_full_count++; 2527 goto update_tail; 2528 } 2529 2530 static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event) 2531 { 2532 unsigned long flags; 2533 2534 spin_lock_irqsave(&sde->tail_lock, flags); 2535 write_seqlock(&sde->head_lock); 2536 2537 __sdma_process_event(sde, event); 2538 2539 if (sde->state.current_state == sdma_state_s99_running) 2540 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); 2541 2542 write_sequnlock(&sde->head_lock); 2543 spin_unlock_irqrestore(&sde->tail_lock, flags); 2544 } 2545 2546 static void __sdma_process_event(struct sdma_engine *sde, 2547 enum sdma_events event) 2548 { 2549 struct sdma_state *ss = &sde->state; 2550 int need_progress = 0; 2551 2552 /* CONFIG SDMA temporary */ 2553 #ifdef CONFIG_SDMA_VERBOSITY 2554 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx, 2555 sdma_state_names[ss->current_state], 2556 sdma_event_names[event]); 2557 #endif 2558 2559 switch (ss->current_state) { 2560 case sdma_state_s00_hw_down: 2561 switch (event) { 2562 case sdma_event_e00_go_hw_down: 2563 break; 2564 case sdma_event_e30_go_running: 2565 /* 2566 * If down, but running requested (usually result 2567 * of link up, then we need to start up. 2568 * This can happen when hw down is requested while 2569 * bringing the link up with traffic active on 2570 * 7220, e.g. 2571 */ 2572 ss->go_s99_running = 1; 2573 /* fall through -- and start dma engine */ 2574 case sdma_event_e10_go_hw_start: 2575 /* This reference means the state machine is started */ 2576 sdma_get(&sde->state); 2577 sdma_set_state(sde, 2578 sdma_state_s10_hw_start_up_halt_wait); 2579 break; 2580 case sdma_event_e15_hw_halt_done: 2581 break; 2582 case sdma_event_e25_hw_clean_up_done: 2583 break; 2584 case sdma_event_e40_sw_cleaned: 2585 sdma_sw_tear_down(sde); 2586 break; 2587 case sdma_event_e50_hw_cleaned: 2588 break; 2589 case sdma_event_e60_hw_halted: 2590 break; 2591 case sdma_event_e70_go_idle: 2592 break; 2593 case sdma_event_e80_hw_freeze: 2594 break; 2595 case sdma_event_e81_hw_frozen: 2596 break; 2597 case sdma_event_e82_hw_unfreeze: 2598 break; 2599 case sdma_event_e85_link_down: 2600 break; 2601 case sdma_event_e90_sw_halted: 2602 break; 2603 } 2604 break; 2605 2606 case sdma_state_s10_hw_start_up_halt_wait: 2607 switch (event) { 2608 case sdma_event_e00_go_hw_down: 2609 sdma_set_state(sde, sdma_state_s00_hw_down); 2610 sdma_sw_tear_down(sde); 2611 break; 2612 case sdma_event_e10_go_hw_start: 2613 break; 2614 case sdma_event_e15_hw_halt_done: 2615 sdma_set_state(sde, 2616 sdma_state_s15_hw_start_up_clean_wait); 2617 sdma_start_hw_clean_up(sde); 2618 break; 2619 case sdma_event_e25_hw_clean_up_done: 2620 break; 2621 case sdma_event_e30_go_running: 2622 ss->go_s99_running = 1; 2623 break; 2624 case sdma_event_e40_sw_cleaned: 2625 break; 2626 case sdma_event_e50_hw_cleaned: 2627 break; 2628 case sdma_event_e60_hw_halted: 2629 schedule_work(&sde->err_halt_worker); 2630 break; 2631 case sdma_event_e70_go_idle: 2632 ss->go_s99_running = 0; 2633 break; 2634 case sdma_event_e80_hw_freeze: 2635 break; 2636 case sdma_event_e81_hw_frozen: 2637 break; 2638 case sdma_event_e82_hw_unfreeze: 2639 break; 2640 case sdma_event_e85_link_down: 2641 break; 2642 case sdma_event_e90_sw_halted: 2643 break; 2644 } 2645 break; 2646 2647 case sdma_state_s15_hw_start_up_clean_wait: 2648 switch (event) { 2649 case sdma_event_e00_go_hw_down: 2650 sdma_set_state(sde, sdma_state_s00_hw_down); 2651 sdma_sw_tear_down(sde); 2652 break; 2653 case sdma_event_e10_go_hw_start: 2654 break; 2655 case sdma_event_e15_hw_halt_done: 2656 break; 2657 case sdma_event_e25_hw_clean_up_done: 2658 sdma_hw_start_up(sde); 2659 sdma_set_state(sde, ss->go_s99_running ? 2660 sdma_state_s99_running : 2661 sdma_state_s20_idle); 2662 break; 2663 case sdma_event_e30_go_running: 2664 ss->go_s99_running = 1; 2665 break; 2666 case sdma_event_e40_sw_cleaned: 2667 break; 2668 case sdma_event_e50_hw_cleaned: 2669 break; 2670 case sdma_event_e60_hw_halted: 2671 break; 2672 case sdma_event_e70_go_idle: 2673 ss->go_s99_running = 0; 2674 break; 2675 case sdma_event_e80_hw_freeze: 2676 break; 2677 case sdma_event_e81_hw_frozen: 2678 break; 2679 case sdma_event_e82_hw_unfreeze: 2680 break; 2681 case sdma_event_e85_link_down: 2682 break; 2683 case sdma_event_e90_sw_halted: 2684 break; 2685 } 2686 break; 2687 2688 case sdma_state_s20_idle: 2689 switch (event) { 2690 case sdma_event_e00_go_hw_down: 2691 sdma_set_state(sde, sdma_state_s00_hw_down); 2692 sdma_sw_tear_down(sde); 2693 break; 2694 case sdma_event_e10_go_hw_start: 2695 break; 2696 case sdma_event_e15_hw_halt_done: 2697 break; 2698 case sdma_event_e25_hw_clean_up_done: 2699 break; 2700 case sdma_event_e30_go_running: 2701 sdma_set_state(sde, sdma_state_s99_running); 2702 ss->go_s99_running = 1; 2703 break; 2704 case sdma_event_e40_sw_cleaned: 2705 break; 2706 case sdma_event_e50_hw_cleaned: 2707 break; 2708 case sdma_event_e60_hw_halted: 2709 sdma_set_state(sde, sdma_state_s50_hw_halt_wait); 2710 schedule_work(&sde->err_halt_worker); 2711 break; 2712 case sdma_event_e70_go_idle: 2713 break; 2714 case sdma_event_e85_link_down: 2715 /* fall through */ 2716 case sdma_event_e80_hw_freeze: 2717 sdma_set_state(sde, sdma_state_s80_hw_freeze); 2718 atomic_dec(&sde->dd->sdma_unfreeze_count); 2719 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 2720 break; 2721 case sdma_event_e81_hw_frozen: 2722 break; 2723 case sdma_event_e82_hw_unfreeze: 2724 break; 2725 case sdma_event_e90_sw_halted: 2726 break; 2727 } 2728 break; 2729 2730 case sdma_state_s30_sw_clean_up_wait: 2731 switch (event) { 2732 case sdma_event_e00_go_hw_down: 2733 sdma_set_state(sde, sdma_state_s00_hw_down); 2734 break; 2735 case sdma_event_e10_go_hw_start: 2736 break; 2737 case sdma_event_e15_hw_halt_done: 2738 break; 2739 case sdma_event_e25_hw_clean_up_done: 2740 break; 2741 case sdma_event_e30_go_running: 2742 ss->go_s99_running = 1; 2743 break; 2744 case sdma_event_e40_sw_cleaned: 2745 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait); 2746 sdma_start_hw_clean_up(sde); 2747 break; 2748 case sdma_event_e50_hw_cleaned: 2749 break; 2750 case sdma_event_e60_hw_halted: 2751 break; 2752 case sdma_event_e70_go_idle: 2753 ss->go_s99_running = 0; 2754 break; 2755 case sdma_event_e80_hw_freeze: 2756 break; 2757 case sdma_event_e81_hw_frozen: 2758 break; 2759 case sdma_event_e82_hw_unfreeze: 2760 break; 2761 case sdma_event_e85_link_down: 2762 ss->go_s99_running = 0; 2763 break; 2764 case sdma_event_e90_sw_halted: 2765 break; 2766 } 2767 break; 2768 2769 case sdma_state_s40_hw_clean_up_wait: 2770 switch (event) { 2771 case sdma_event_e00_go_hw_down: 2772 sdma_set_state(sde, sdma_state_s00_hw_down); 2773 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2774 break; 2775 case sdma_event_e10_go_hw_start: 2776 break; 2777 case sdma_event_e15_hw_halt_done: 2778 break; 2779 case sdma_event_e25_hw_clean_up_done: 2780 sdma_hw_start_up(sde); 2781 sdma_set_state(sde, ss->go_s99_running ? 2782 sdma_state_s99_running : 2783 sdma_state_s20_idle); 2784 break; 2785 case sdma_event_e30_go_running: 2786 ss->go_s99_running = 1; 2787 break; 2788 case sdma_event_e40_sw_cleaned: 2789 break; 2790 case sdma_event_e50_hw_cleaned: 2791 break; 2792 case sdma_event_e60_hw_halted: 2793 break; 2794 case sdma_event_e70_go_idle: 2795 ss->go_s99_running = 0; 2796 break; 2797 case sdma_event_e80_hw_freeze: 2798 break; 2799 case sdma_event_e81_hw_frozen: 2800 break; 2801 case sdma_event_e82_hw_unfreeze: 2802 break; 2803 case sdma_event_e85_link_down: 2804 ss->go_s99_running = 0; 2805 break; 2806 case sdma_event_e90_sw_halted: 2807 break; 2808 } 2809 break; 2810 2811 case sdma_state_s50_hw_halt_wait: 2812 switch (event) { 2813 case sdma_event_e00_go_hw_down: 2814 sdma_set_state(sde, sdma_state_s00_hw_down); 2815 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2816 break; 2817 case sdma_event_e10_go_hw_start: 2818 break; 2819 case sdma_event_e15_hw_halt_done: 2820 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); 2821 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2822 break; 2823 case sdma_event_e25_hw_clean_up_done: 2824 break; 2825 case sdma_event_e30_go_running: 2826 ss->go_s99_running = 1; 2827 break; 2828 case sdma_event_e40_sw_cleaned: 2829 break; 2830 case sdma_event_e50_hw_cleaned: 2831 break; 2832 case sdma_event_e60_hw_halted: 2833 schedule_work(&sde->err_halt_worker); 2834 break; 2835 case sdma_event_e70_go_idle: 2836 ss->go_s99_running = 0; 2837 break; 2838 case sdma_event_e80_hw_freeze: 2839 break; 2840 case sdma_event_e81_hw_frozen: 2841 break; 2842 case sdma_event_e82_hw_unfreeze: 2843 break; 2844 case sdma_event_e85_link_down: 2845 ss->go_s99_running = 0; 2846 break; 2847 case sdma_event_e90_sw_halted: 2848 break; 2849 } 2850 break; 2851 2852 case sdma_state_s60_idle_halt_wait: 2853 switch (event) { 2854 case sdma_event_e00_go_hw_down: 2855 sdma_set_state(sde, sdma_state_s00_hw_down); 2856 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2857 break; 2858 case sdma_event_e10_go_hw_start: 2859 break; 2860 case sdma_event_e15_hw_halt_done: 2861 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); 2862 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2863 break; 2864 case sdma_event_e25_hw_clean_up_done: 2865 break; 2866 case sdma_event_e30_go_running: 2867 ss->go_s99_running = 1; 2868 break; 2869 case sdma_event_e40_sw_cleaned: 2870 break; 2871 case sdma_event_e50_hw_cleaned: 2872 break; 2873 case sdma_event_e60_hw_halted: 2874 schedule_work(&sde->err_halt_worker); 2875 break; 2876 case sdma_event_e70_go_idle: 2877 ss->go_s99_running = 0; 2878 break; 2879 case sdma_event_e80_hw_freeze: 2880 break; 2881 case sdma_event_e81_hw_frozen: 2882 break; 2883 case sdma_event_e82_hw_unfreeze: 2884 break; 2885 case sdma_event_e85_link_down: 2886 break; 2887 case sdma_event_e90_sw_halted: 2888 break; 2889 } 2890 break; 2891 2892 case sdma_state_s80_hw_freeze: 2893 switch (event) { 2894 case sdma_event_e00_go_hw_down: 2895 sdma_set_state(sde, sdma_state_s00_hw_down); 2896 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2897 break; 2898 case sdma_event_e10_go_hw_start: 2899 break; 2900 case sdma_event_e15_hw_halt_done: 2901 break; 2902 case sdma_event_e25_hw_clean_up_done: 2903 break; 2904 case sdma_event_e30_go_running: 2905 ss->go_s99_running = 1; 2906 break; 2907 case sdma_event_e40_sw_cleaned: 2908 break; 2909 case sdma_event_e50_hw_cleaned: 2910 break; 2911 case sdma_event_e60_hw_halted: 2912 break; 2913 case sdma_event_e70_go_idle: 2914 ss->go_s99_running = 0; 2915 break; 2916 case sdma_event_e80_hw_freeze: 2917 break; 2918 case sdma_event_e81_hw_frozen: 2919 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean); 2920 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2921 break; 2922 case sdma_event_e82_hw_unfreeze: 2923 break; 2924 case sdma_event_e85_link_down: 2925 break; 2926 case sdma_event_e90_sw_halted: 2927 break; 2928 } 2929 break; 2930 2931 case sdma_state_s82_freeze_sw_clean: 2932 switch (event) { 2933 case sdma_event_e00_go_hw_down: 2934 sdma_set_state(sde, sdma_state_s00_hw_down); 2935 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2936 break; 2937 case sdma_event_e10_go_hw_start: 2938 break; 2939 case sdma_event_e15_hw_halt_done: 2940 break; 2941 case sdma_event_e25_hw_clean_up_done: 2942 break; 2943 case sdma_event_e30_go_running: 2944 ss->go_s99_running = 1; 2945 break; 2946 case sdma_event_e40_sw_cleaned: 2947 /* notify caller this engine is done cleaning */ 2948 atomic_dec(&sde->dd->sdma_unfreeze_count); 2949 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 2950 break; 2951 case sdma_event_e50_hw_cleaned: 2952 break; 2953 case sdma_event_e60_hw_halted: 2954 break; 2955 case sdma_event_e70_go_idle: 2956 ss->go_s99_running = 0; 2957 break; 2958 case sdma_event_e80_hw_freeze: 2959 break; 2960 case sdma_event_e81_hw_frozen: 2961 break; 2962 case sdma_event_e82_hw_unfreeze: 2963 sdma_hw_start_up(sde); 2964 sdma_set_state(sde, ss->go_s99_running ? 2965 sdma_state_s99_running : 2966 sdma_state_s20_idle); 2967 break; 2968 case sdma_event_e85_link_down: 2969 break; 2970 case sdma_event_e90_sw_halted: 2971 break; 2972 } 2973 break; 2974 2975 case sdma_state_s99_running: 2976 switch (event) { 2977 case sdma_event_e00_go_hw_down: 2978 sdma_set_state(sde, sdma_state_s00_hw_down); 2979 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2980 break; 2981 case sdma_event_e10_go_hw_start: 2982 break; 2983 case sdma_event_e15_hw_halt_done: 2984 break; 2985 case sdma_event_e25_hw_clean_up_done: 2986 break; 2987 case sdma_event_e30_go_running: 2988 break; 2989 case sdma_event_e40_sw_cleaned: 2990 break; 2991 case sdma_event_e50_hw_cleaned: 2992 break; 2993 case sdma_event_e60_hw_halted: 2994 need_progress = 1; 2995 sdma_err_progress_check_schedule(sde); 2996 /* fall through */ 2997 case sdma_event_e90_sw_halted: 2998 /* 2999 * SW initiated halt does not perform engines 3000 * progress check 3001 */ 3002 sdma_set_state(sde, sdma_state_s50_hw_halt_wait); 3003 schedule_work(&sde->err_halt_worker); 3004 break; 3005 case sdma_event_e70_go_idle: 3006 sdma_set_state(sde, sdma_state_s60_idle_halt_wait); 3007 break; 3008 case sdma_event_e85_link_down: 3009 ss->go_s99_running = 0; 3010 /* fall through */ 3011 case sdma_event_e80_hw_freeze: 3012 sdma_set_state(sde, sdma_state_s80_hw_freeze); 3013 atomic_dec(&sde->dd->sdma_unfreeze_count); 3014 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 3015 break; 3016 case sdma_event_e81_hw_frozen: 3017 break; 3018 case sdma_event_e82_hw_unfreeze: 3019 break; 3020 } 3021 break; 3022 } 3023 3024 ss->last_event = event; 3025 if (need_progress) 3026 sdma_make_progress(sde, 0); 3027 } 3028 3029 /* 3030 * _extend_sdma_tx_descs() - helper to extend txreq 3031 * 3032 * This is called once the initial nominal allocation 3033 * of descriptors in the sdma_txreq is exhausted. 3034 * 3035 * The code will bump the allocation up to the max 3036 * of MAX_DESC (64) descriptors. There doesn't seem 3037 * much point in an interim step. The last descriptor 3038 * is reserved for coalesce buffer in order to support 3039 * cases where input packet has >MAX_DESC iovecs. 3040 * 3041 */ 3042 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) 3043 { 3044 int i; 3045 3046 /* Handle last descriptor */ 3047 if (unlikely((tx->num_desc == (MAX_DESC - 1)))) { 3048 /* if tlen is 0, it is for padding, release last descriptor */ 3049 if (!tx->tlen) { 3050 tx->desc_limit = MAX_DESC; 3051 } else if (!tx->coalesce_buf) { 3052 /* allocate coalesce buffer with space for padding */ 3053 tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32), 3054 GFP_ATOMIC); 3055 if (!tx->coalesce_buf) 3056 goto enomem; 3057 tx->coalesce_idx = 0; 3058 } 3059 return 0; 3060 } 3061 3062 if (unlikely(tx->num_desc == MAX_DESC)) 3063 goto enomem; 3064 3065 tx->descp = kmalloc_array( 3066 MAX_DESC, 3067 sizeof(struct sdma_desc), 3068 GFP_ATOMIC); 3069 if (!tx->descp) 3070 goto enomem; 3071 3072 /* reserve last descriptor for coalescing */ 3073 tx->desc_limit = MAX_DESC - 1; 3074 /* copy ones already built */ 3075 for (i = 0; i < tx->num_desc; i++) 3076 tx->descp[i] = tx->descs[i]; 3077 return 0; 3078 enomem: 3079 __sdma_txclean(dd, tx); 3080 return -ENOMEM; 3081 } 3082 3083 /* 3084 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors 3085 * 3086 * This is called once the initial nominal allocation of descriptors 3087 * in the sdma_txreq is exhausted. 3088 * 3089 * This function calls _extend_sdma_tx_descs to extend or allocate 3090 * coalesce buffer. If there is a allocated coalesce buffer, it will 3091 * copy the input packet data into the coalesce buffer. It also adds 3092 * coalesce buffer descriptor once when whole packet is received. 3093 * 3094 * Return: 3095 * <0 - error 3096 * 0 - coalescing, don't populate descriptor 3097 * 1 - continue with populating descriptor 3098 */ 3099 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, 3100 int type, void *kvaddr, struct page *page, 3101 unsigned long offset, u16 len) 3102 { 3103 int pad_len, rval; 3104 dma_addr_t addr; 3105 3106 rval = _extend_sdma_tx_descs(dd, tx); 3107 if (rval) { 3108 __sdma_txclean(dd, tx); 3109 return rval; 3110 } 3111 3112 /* If coalesce buffer is allocated, copy data into it */ 3113 if (tx->coalesce_buf) { 3114 if (type == SDMA_MAP_NONE) { 3115 __sdma_txclean(dd, tx); 3116 return -EINVAL; 3117 } 3118 3119 if (type == SDMA_MAP_PAGE) { 3120 kvaddr = kmap(page); 3121 kvaddr += offset; 3122 } else if (WARN_ON(!kvaddr)) { 3123 __sdma_txclean(dd, tx); 3124 return -EINVAL; 3125 } 3126 3127 memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len); 3128 tx->coalesce_idx += len; 3129 if (type == SDMA_MAP_PAGE) 3130 kunmap(page); 3131 3132 /* If there is more data, return */ 3133 if (tx->tlen - tx->coalesce_idx) 3134 return 0; 3135 3136 /* Whole packet is received; add any padding */ 3137 pad_len = tx->packet_len & (sizeof(u32) - 1); 3138 if (pad_len) { 3139 pad_len = sizeof(u32) - pad_len; 3140 memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len); 3141 /* padding is taken care of for coalescing case */ 3142 tx->packet_len += pad_len; 3143 tx->tlen += pad_len; 3144 } 3145 3146 /* dma map the coalesce buffer */ 3147 addr = dma_map_single(&dd->pcidev->dev, 3148 tx->coalesce_buf, 3149 tx->tlen, 3150 DMA_TO_DEVICE); 3151 3152 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { 3153 __sdma_txclean(dd, tx); 3154 return -ENOSPC; 3155 } 3156 3157 /* Add descriptor for coalesce buffer */ 3158 tx->desc_limit = MAX_DESC; 3159 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, 3160 addr, tx->tlen); 3161 } 3162 3163 return 1; 3164 } 3165 3166 /* Update sdes when the lmc changes */ 3167 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid) 3168 { 3169 struct sdma_engine *sde; 3170 int i; 3171 u64 sreg; 3172 3173 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) << 3174 SD(CHECK_SLID_MASK_SHIFT)) | 3175 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) << 3176 SD(CHECK_SLID_VALUE_SHIFT)); 3177 3178 for (i = 0; i < dd->num_sdma; i++) { 3179 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x", 3180 i, (u32)sreg); 3181 sde = &dd->per_sdma[i]; 3182 write_sde_csr(sde, SD(CHECK_SLID), sreg); 3183 } 3184 } 3185 3186 /* tx not dword sized - pad */ 3187 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) 3188 { 3189 int rval = 0; 3190 3191 tx->num_desc++; 3192 if ((unlikely(tx->num_desc == tx->desc_limit))) { 3193 rval = _extend_sdma_tx_descs(dd, tx); 3194 if (rval) { 3195 __sdma_txclean(dd, tx); 3196 return rval; 3197 } 3198 } 3199 /* finish the one just added */ 3200 make_tx_sdma_desc( 3201 tx, 3202 SDMA_MAP_NONE, 3203 dd->sdma_pad_phys, 3204 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1))); 3205 _sdma_close_tx(dd, tx); 3206 return rval; 3207 } 3208 3209 /* 3210 * Add ahg to the sdma_txreq 3211 * 3212 * The logic will consume up to 3 3213 * descriptors at the beginning of 3214 * sdma_txreq. 3215 */ 3216 void _sdma_txreq_ahgadd( 3217 struct sdma_txreq *tx, 3218 u8 num_ahg, 3219 u8 ahg_entry, 3220 u32 *ahg, 3221 u8 ahg_hlen) 3222 { 3223 u32 i, shift = 0, desc = 0; 3224 u8 mode; 3225 3226 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4); 3227 /* compute mode */ 3228 if (num_ahg == 1) 3229 mode = SDMA_AHG_APPLY_UPDATE1; 3230 else if (num_ahg <= 5) 3231 mode = SDMA_AHG_APPLY_UPDATE2; 3232 else 3233 mode = SDMA_AHG_APPLY_UPDATE3; 3234 tx->num_desc++; 3235 /* initialize to consumed descriptors to zero */ 3236 switch (mode) { 3237 case SDMA_AHG_APPLY_UPDATE3: 3238 tx->num_desc++; 3239 tx->descs[2].qw[0] = 0; 3240 tx->descs[2].qw[1] = 0; 3241 /* FALLTHROUGH */ 3242 case SDMA_AHG_APPLY_UPDATE2: 3243 tx->num_desc++; 3244 tx->descs[1].qw[0] = 0; 3245 tx->descs[1].qw[1] = 0; 3246 break; 3247 } 3248 ahg_hlen >>= 2; 3249 tx->descs[0].qw[1] |= 3250 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK) 3251 << SDMA_DESC1_HEADER_INDEX_SHIFT) | 3252 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK) 3253 << SDMA_DESC1_HEADER_DWS_SHIFT) | 3254 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK) 3255 << SDMA_DESC1_HEADER_MODE_SHIFT) | 3256 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK) 3257 << SDMA_DESC1_HEADER_UPDATE1_SHIFT); 3258 for (i = 0; i < (num_ahg - 1); i++) { 3259 if (!shift && !(i & 2)) 3260 desc++; 3261 tx->descs[desc].qw[!!(i & 2)] |= 3262 (((u64)ahg[i + 1]) 3263 << shift); 3264 shift = (shift + 32) & 63; 3265 } 3266 } 3267 3268 /** 3269 * sdma_ahg_alloc - allocate an AHG entry 3270 * @sde: engine to allocate from 3271 * 3272 * Return: 3273 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled, 3274 * -ENOSPC if an entry is not available 3275 */ 3276 int sdma_ahg_alloc(struct sdma_engine *sde) 3277 { 3278 int nr; 3279 int oldbit; 3280 3281 if (!sde) { 3282 trace_hfi1_ahg_allocate(sde, -EINVAL); 3283 return -EINVAL; 3284 } 3285 while (1) { 3286 nr = ffz(READ_ONCE(sde->ahg_bits)); 3287 if (nr > 31) { 3288 trace_hfi1_ahg_allocate(sde, -ENOSPC); 3289 return -ENOSPC; 3290 } 3291 oldbit = test_and_set_bit(nr, &sde->ahg_bits); 3292 if (!oldbit) 3293 break; 3294 cpu_relax(); 3295 } 3296 trace_hfi1_ahg_allocate(sde, nr); 3297 return nr; 3298 } 3299 3300 /** 3301 * sdma_ahg_free - free an AHG entry 3302 * @sde: engine to return AHG entry 3303 * @ahg_index: index to free 3304 * 3305 * This routine frees the indicate AHG entry. 3306 */ 3307 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index) 3308 { 3309 if (!sde) 3310 return; 3311 trace_hfi1_ahg_deallocate(sde, ahg_index); 3312 if (ahg_index < 0 || ahg_index > 31) 3313 return; 3314 clear_bit(ahg_index, &sde->ahg_bits); 3315 } 3316 3317 /* 3318 * SPC freeze handling for SDMA engines. Called when the driver knows 3319 * the SPC is going into a freeze but before the freeze is fully 3320 * settled. Generally an error interrupt. 3321 * 3322 * This event will pull the engine out of running so no more entries can be 3323 * added to the engine's queue. 3324 */ 3325 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down) 3326 { 3327 int i; 3328 enum sdma_events event = link_down ? sdma_event_e85_link_down : 3329 sdma_event_e80_hw_freeze; 3330 3331 /* set up the wait but do not wait here */ 3332 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); 3333 3334 /* tell all engines to stop running and wait */ 3335 for (i = 0; i < dd->num_sdma; i++) 3336 sdma_process_event(&dd->per_sdma[i], event); 3337 3338 /* sdma_freeze() will wait for all engines to have stopped */ 3339 } 3340 3341 /* 3342 * SPC freeze handling for SDMA engines. Called when the driver knows 3343 * the SPC is fully frozen. 3344 */ 3345 void sdma_freeze(struct hfi1_devdata *dd) 3346 { 3347 int i; 3348 int ret; 3349 3350 /* 3351 * Make sure all engines have moved out of the running state before 3352 * continuing. 3353 */ 3354 ret = wait_event_interruptible(dd->sdma_unfreeze_wq, 3355 atomic_read(&dd->sdma_unfreeze_count) <= 3356 0); 3357 /* interrupted or count is negative, then unloading - just exit */ 3358 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0) 3359 return; 3360 3361 /* set up the count for the next wait */ 3362 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); 3363 3364 /* tell all engines that the SPC is frozen, they can start cleaning */ 3365 for (i = 0; i < dd->num_sdma; i++) 3366 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen); 3367 3368 /* 3369 * Wait for everyone to finish software clean before exiting. The 3370 * software clean will read engine CSRs, so must be completed before 3371 * the next step, which will clear the engine CSRs. 3372 */ 3373 (void)wait_event_interruptible(dd->sdma_unfreeze_wq, 3374 atomic_read(&dd->sdma_unfreeze_count) <= 0); 3375 /* no need to check results - done no matter what */ 3376 } 3377 3378 /* 3379 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen. 3380 * 3381 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All 3382 * that is left is a software clean. We could do it after the SPC is fully 3383 * frozen, but then we'd have to add another state to wait for the unfreeze. 3384 * Instead, just defer the software clean until the unfreeze step. 3385 */ 3386 void sdma_unfreeze(struct hfi1_devdata *dd) 3387 { 3388 int i; 3389 3390 /* tell all engines start freeze clean up */ 3391 for (i = 0; i < dd->num_sdma; i++) 3392 sdma_process_event(&dd->per_sdma[i], 3393 sdma_event_e82_hw_unfreeze); 3394 } 3395 3396 /** 3397 * _sdma_engine_progress_schedule() - schedule progress on engine 3398 * @sde: sdma_engine to schedule progress 3399 * 3400 */ 3401 void _sdma_engine_progress_schedule( 3402 struct sdma_engine *sde) 3403 { 3404 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask); 3405 /* assume we have selected a good cpu */ 3406 write_csr(sde->dd, 3407 CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), 3408 sde->progress_mask); 3409 } 3410