1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 #include <linux/mm.h> 48 #include <linux/types.h> 49 #include <linux/device.h> 50 #include <linux/dmapool.h> 51 #include <linux/slab.h> 52 #include <linux/list.h> 53 #include <linux/highmem.h> 54 #include <linux/io.h> 55 #include <linux/uio.h> 56 #include <linux/rbtree.h> 57 #include <linux/spinlock.h> 58 #include <linux/delay.h> 59 #include <linux/kthread.h> 60 #include <linux/mmu_context.h> 61 #include <linux/module.h> 62 #include <linux/vmalloc.h> 63 #include <linux/string.h> 64 65 #include "hfi.h" 66 #include "sdma.h" 67 #include "mmu_rb.h" 68 #include "user_sdma.h" 69 #include "verbs.h" /* for the headers */ 70 #include "common.h" /* for struct hfi1_tid_info */ 71 #include "trace.h" 72 73 static uint hfi1_sdma_comp_ring_size = 128; 74 module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO); 75 MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128"); 76 77 static unsigned initial_pkt_count = 8; 78 79 static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts); 80 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status); 81 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq); 82 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin); 83 static int pin_vector_pages(struct user_sdma_request *req, 84 struct user_sdma_iovec *iovec); 85 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, 86 unsigned start, unsigned npages); 87 static int check_header_template(struct user_sdma_request *req, 88 struct hfi1_pkt_header *hdr, u32 lrhlen, 89 u32 datalen); 90 static int set_txreq_header(struct user_sdma_request *req, 91 struct user_sdma_txreq *tx, u32 datalen); 92 static int set_txreq_header_ahg(struct user_sdma_request *req, 93 struct user_sdma_txreq *tx, u32 len); 94 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, 95 struct hfi1_user_sdma_comp_q *cq, 96 u16 idx, enum hfi1_sdma_comp_state state, 97 int ret); 98 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags); 99 static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len); 100 101 static int defer_packet_queue( 102 struct sdma_engine *sde, 103 struct iowait_work *wait, 104 struct sdma_txreq *txreq, 105 uint seq, 106 bool pkts_sent); 107 static void activate_packet_queue(struct iowait *wait, int reason); 108 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr, 109 unsigned long len); 110 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode); 111 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode, 112 void *arg2, bool *stop); 113 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode); 114 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode); 115 116 static struct mmu_rb_ops sdma_rb_ops = { 117 .filter = sdma_rb_filter, 118 .insert = sdma_rb_insert, 119 .evict = sdma_rb_evict, 120 .remove = sdma_rb_remove, 121 .invalidate = sdma_rb_invalidate 122 }; 123 124 static int defer_packet_queue( 125 struct sdma_engine *sde, 126 struct iowait_work *wait, 127 struct sdma_txreq *txreq, 128 uint seq, 129 bool pkts_sent) 130 { 131 struct hfi1_user_sdma_pkt_q *pq = 132 container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy); 133 134 write_seqlock(&sde->waitlock); 135 if (sdma_progress(sde, seq, txreq)) 136 goto eagain; 137 /* 138 * We are assuming that if the list is enqueued somewhere, it 139 * is to the dmawait list since that is the only place where 140 * it is supposed to be enqueued. 141 */ 142 xchg(&pq->state, SDMA_PKT_Q_DEFERRED); 143 if (list_empty(&pq->busy.list)) { 144 iowait_get_priority(&pq->busy); 145 iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); 146 } 147 write_sequnlock(&sde->waitlock); 148 return -EBUSY; 149 eagain: 150 write_sequnlock(&sde->waitlock); 151 return -EAGAIN; 152 } 153 154 static void activate_packet_queue(struct iowait *wait, int reason) 155 { 156 struct hfi1_user_sdma_pkt_q *pq = 157 container_of(wait, struct hfi1_user_sdma_pkt_q, busy); 158 xchg(&pq->state, SDMA_PKT_Q_ACTIVE); 159 wake_up(&wait->wait_dma); 160 }; 161 162 int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, 163 struct hfi1_filedata *fd) 164 { 165 int ret = -ENOMEM; 166 char buf[64]; 167 struct hfi1_devdata *dd; 168 struct hfi1_user_sdma_comp_q *cq; 169 struct hfi1_user_sdma_pkt_q *pq; 170 171 if (!uctxt || !fd) 172 return -EBADF; 173 174 if (!hfi1_sdma_comp_ring_size) 175 return -EINVAL; 176 177 dd = uctxt->dd; 178 179 pq = kzalloc(sizeof(*pq), GFP_KERNEL); 180 if (!pq) 181 return -ENOMEM; 182 pq->dd = dd; 183 pq->ctxt = uctxt->ctxt; 184 pq->subctxt = fd->subctxt; 185 pq->n_max_reqs = hfi1_sdma_comp_ring_size; 186 atomic_set(&pq->n_reqs, 0); 187 init_waitqueue_head(&pq->wait); 188 atomic_set(&pq->n_locked, 0); 189 pq->mm = fd->mm; 190 191 iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue, 192 activate_packet_queue, NULL, NULL); 193 pq->reqidx = 0; 194 195 pq->reqs = kcalloc(hfi1_sdma_comp_ring_size, 196 sizeof(*pq->reqs), 197 GFP_KERNEL); 198 if (!pq->reqs) 199 goto pq_reqs_nomem; 200 201 pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size), 202 sizeof(*pq->req_in_use), 203 GFP_KERNEL); 204 if (!pq->req_in_use) 205 goto pq_reqs_no_in_use; 206 207 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt, 208 fd->subctxt); 209 pq->txreq_cache = kmem_cache_create(buf, 210 sizeof(struct user_sdma_txreq), 211 L1_CACHE_BYTES, 212 SLAB_HWCACHE_ALIGN, 213 NULL); 214 if (!pq->txreq_cache) { 215 dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n", 216 uctxt->ctxt); 217 goto pq_txreq_nomem; 218 } 219 220 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 221 if (!cq) 222 goto cq_nomem; 223 224 cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps) 225 * hfi1_sdma_comp_ring_size)); 226 if (!cq->comps) 227 goto cq_comps_nomem; 228 229 cq->nentries = hfi1_sdma_comp_ring_size; 230 231 ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq, 232 &pq->handler); 233 if (ret) { 234 dd_dev_err(dd, "Failed to register with MMU %d", ret); 235 goto pq_mmu_fail; 236 } 237 238 rcu_assign_pointer(fd->pq, pq); 239 fd->cq = cq; 240 241 return 0; 242 243 pq_mmu_fail: 244 vfree(cq->comps); 245 cq_comps_nomem: 246 kfree(cq); 247 cq_nomem: 248 kmem_cache_destroy(pq->txreq_cache); 249 pq_txreq_nomem: 250 kfree(pq->req_in_use); 251 pq_reqs_no_in_use: 252 kfree(pq->reqs); 253 pq_reqs_nomem: 254 kfree(pq); 255 256 return ret; 257 } 258 259 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, 260 struct hfi1_ctxtdata *uctxt) 261 { 262 struct hfi1_user_sdma_pkt_q *pq; 263 264 trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt); 265 266 spin_lock(&fd->pq_rcu_lock); 267 pq = srcu_dereference_check(fd->pq, &fd->pq_srcu, 268 lockdep_is_held(&fd->pq_rcu_lock)); 269 if (pq) { 270 rcu_assign_pointer(fd->pq, NULL); 271 spin_unlock(&fd->pq_rcu_lock); 272 synchronize_srcu(&fd->pq_srcu); 273 /* at this point there can be no more new requests */ 274 if (pq->handler) 275 hfi1_mmu_rb_unregister(pq->handler); 276 iowait_sdma_drain(&pq->busy); 277 /* Wait until all requests have been freed. */ 278 wait_event_interruptible( 279 pq->wait, 280 !atomic_read(&pq->n_reqs)); 281 kfree(pq->reqs); 282 kfree(pq->req_in_use); 283 kmem_cache_destroy(pq->txreq_cache); 284 kfree(pq); 285 } else { 286 spin_unlock(&fd->pq_rcu_lock); 287 } 288 if (fd->cq) { 289 vfree(fd->cq->comps); 290 kfree(fd->cq); 291 fd->cq = NULL; 292 } 293 return 0; 294 } 295 296 static u8 dlid_to_selector(u16 dlid) 297 { 298 static u8 mapping[256]; 299 static int initialized; 300 static u8 next; 301 int hash; 302 303 if (!initialized) { 304 memset(mapping, 0xFF, 256); 305 initialized = 1; 306 } 307 308 hash = ((dlid >> 8) ^ dlid) & 0xFF; 309 if (mapping[hash] == 0xFF) { 310 mapping[hash] = next; 311 next = (next + 1) & 0x7F; 312 } 313 314 return mapping[hash]; 315 } 316 317 /** 318 * hfi1_user_sdma_process_request() - Process and start a user sdma request 319 * @fd: valid file descriptor 320 * @iovec: array of io vectors to process 321 * @dim: overall iovec array size 322 * @count: number of io vector array entries processed 323 */ 324 int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, 325 struct iovec *iovec, unsigned long dim, 326 unsigned long *count) 327 { 328 int ret = 0, i; 329 struct hfi1_ctxtdata *uctxt = fd->uctxt; 330 struct hfi1_user_sdma_pkt_q *pq = 331 srcu_dereference(fd->pq, &fd->pq_srcu); 332 struct hfi1_user_sdma_comp_q *cq = fd->cq; 333 struct hfi1_devdata *dd = pq->dd; 334 unsigned long idx = 0; 335 u8 pcount = initial_pkt_count; 336 struct sdma_req_info info; 337 struct user_sdma_request *req; 338 u8 opcode, sc, vl; 339 u16 pkey; 340 u32 slid; 341 u16 dlid; 342 u32 selector; 343 344 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { 345 hfi1_cdbg( 346 SDMA, 347 "[%u:%u:%u] First vector not big enough for header %lu/%lu", 348 dd->unit, uctxt->ctxt, fd->subctxt, 349 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr)); 350 return -EINVAL; 351 } 352 ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info)); 353 if (ret) { 354 hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)", 355 dd->unit, uctxt->ctxt, fd->subctxt, ret); 356 return -EFAULT; 357 } 358 359 trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt, 360 (u16 *)&info); 361 if (info.comp_idx >= hfi1_sdma_comp_ring_size) { 362 hfi1_cdbg(SDMA, 363 "[%u:%u:%u:%u] Invalid comp index", 364 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); 365 return -EINVAL; 366 } 367 368 /* 369 * Sanity check the header io vector count. Need at least 1 vector 370 * (header) and cannot be larger than the actual io vector count. 371 */ 372 if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) { 373 hfi1_cdbg(SDMA, 374 "[%u:%u:%u:%u] Invalid iov count %d, dim %ld", 375 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx, 376 req_iovcnt(info.ctrl), dim); 377 return -EINVAL; 378 } 379 380 if (!info.fragsize) { 381 hfi1_cdbg(SDMA, 382 "[%u:%u:%u:%u] Request does not specify fragsize", 383 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); 384 return -EINVAL; 385 } 386 387 /* Try to claim the request. */ 388 if (test_and_set_bit(info.comp_idx, pq->req_in_use)) { 389 hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use", 390 dd->unit, uctxt->ctxt, fd->subctxt, 391 info.comp_idx); 392 return -EBADSLT; 393 } 394 /* 395 * All safety checks have been done and this request has been claimed. 396 */ 397 trace_hfi1_sdma_user_process_request(dd, uctxt->ctxt, fd->subctxt, 398 info.comp_idx); 399 req = pq->reqs + info.comp_idx; 400 req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */ 401 req->data_len = 0; 402 req->pq = pq; 403 req->cq = cq; 404 req->ahg_idx = -1; 405 req->iov_idx = 0; 406 req->sent = 0; 407 req->seqnum = 0; 408 req->seqcomp = 0; 409 req->seqsubmitted = 0; 410 req->tids = NULL; 411 req->has_error = 0; 412 INIT_LIST_HEAD(&req->txps); 413 414 memcpy(&req->info, &info, sizeof(info)); 415 416 /* The request is initialized, count it */ 417 atomic_inc(&pq->n_reqs); 418 419 if (req_opcode(info.ctrl) == EXPECTED) { 420 /* expected must have a TID info and at least one data vector */ 421 if (req->data_iovs < 2) { 422 SDMA_DBG(req, 423 "Not enough vectors for expected request"); 424 ret = -EINVAL; 425 goto free_req; 426 } 427 req->data_iovs--; 428 } 429 430 if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) { 431 SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs, 432 MAX_VECTORS_PER_REQ); 433 ret = -EINVAL; 434 goto free_req; 435 } 436 /* Copy the header from the user buffer */ 437 ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info), 438 sizeof(req->hdr)); 439 if (ret) { 440 SDMA_DBG(req, "Failed to copy header template (%d)", ret); 441 ret = -EFAULT; 442 goto free_req; 443 } 444 445 /* If Static rate control is not enabled, sanitize the header. */ 446 if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL)) 447 req->hdr.pbc[2] = 0; 448 449 /* Validate the opcode. Do not trust packets from user space blindly. */ 450 opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff; 451 if ((opcode & USER_OPCODE_CHECK_MASK) != 452 USER_OPCODE_CHECK_VAL) { 453 SDMA_DBG(req, "Invalid opcode (%d)", opcode); 454 ret = -EINVAL; 455 goto free_req; 456 } 457 /* 458 * Validate the vl. Do not trust packets from user space blindly. 459 * VL comes from PBC, SC comes from LRH, and the VL needs to 460 * match the SC look up. 461 */ 462 vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF; 463 sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) | 464 (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4)); 465 if (vl >= dd->pport->vls_operational || 466 vl != sc_to_vlt(dd, sc)) { 467 SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl); 468 ret = -EINVAL; 469 goto free_req; 470 } 471 472 /* Checking P_KEY for requests from user-space */ 473 pkey = (u16)be32_to_cpu(req->hdr.bth[0]); 474 slid = be16_to_cpu(req->hdr.lrh[3]); 475 if (egress_pkey_check(dd->pport, slid, pkey, sc, PKEY_CHECK_INVALID)) { 476 ret = -EINVAL; 477 goto free_req; 478 } 479 480 /* 481 * Also should check the BTH.lnh. If it says the next header is GRH then 482 * the RXE parsing will be off and will land in the middle of the KDETH 483 * or miss it entirely. 484 */ 485 if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) { 486 SDMA_DBG(req, "User tried to pass in a GRH"); 487 ret = -EINVAL; 488 goto free_req; 489 } 490 491 req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]); 492 /* 493 * Calculate the initial TID offset based on the values of 494 * KDETH.OFFSET and KDETH.OM that are passed in. 495 */ 496 req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) * 497 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ? 498 KDETH_OM_LARGE : KDETH_OM_SMALL); 499 trace_hfi1_sdma_user_initial_tidoffset(dd, uctxt->ctxt, fd->subctxt, 500 info.comp_idx, req->tidoffset); 501 idx++; 502 503 /* Save all the IO vector structures */ 504 for (i = 0; i < req->data_iovs; i++) { 505 req->iovs[i].offset = 0; 506 INIT_LIST_HEAD(&req->iovs[i].list); 507 memcpy(&req->iovs[i].iov, 508 iovec + idx++, 509 sizeof(req->iovs[i].iov)); 510 ret = pin_vector_pages(req, &req->iovs[i]); 511 if (ret) { 512 req->data_iovs = i; 513 goto free_req; 514 } 515 req->data_len += req->iovs[i].iov.iov_len; 516 } 517 trace_hfi1_sdma_user_data_length(dd, uctxt->ctxt, fd->subctxt, 518 info.comp_idx, req->data_len); 519 if (pcount > req->info.npkts) 520 pcount = req->info.npkts; 521 /* 522 * Copy any TID info 523 * User space will provide the TID info only when the 524 * request type is EXPECTED. This is true even if there is 525 * only one packet in the request and the header is already 526 * setup. The reason for the singular TID case is that the 527 * driver needs to perform safety checks. 528 */ 529 if (req_opcode(req->info.ctrl) == EXPECTED) { 530 u16 ntids = iovec[idx].iov_len / sizeof(*req->tids); 531 u32 *tmp; 532 533 if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) { 534 ret = -EINVAL; 535 goto free_req; 536 } 537 538 /* 539 * We have to copy all of the tids because they may vary 540 * in size and, therefore, the TID count might not be 541 * equal to the pkt count. However, there is no way to 542 * tell at this point. 543 */ 544 tmp = memdup_user(iovec[idx].iov_base, 545 ntids * sizeof(*req->tids)); 546 if (IS_ERR(tmp)) { 547 ret = PTR_ERR(tmp); 548 SDMA_DBG(req, "Failed to copy %d TIDs (%d)", 549 ntids, ret); 550 goto free_req; 551 } 552 req->tids = tmp; 553 req->n_tids = ntids; 554 req->tididx = 0; 555 idx++; 556 } 557 558 dlid = be16_to_cpu(req->hdr.lrh[1]); 559 selector = dlid_to_selector(dlid); 560 selector += uctxt->ctxt + fd->subctxt; 561 req->sde = sdma_select_user_engine(dd, selector, vl); 562 563 if (!req->sde || !sdma_running(req->sde)) { 564 ret = -ECOMM; 565 goto free_req; 566 } 567 568 /* We don't need an AHG entry if the request contains only one packet */ 569 if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) 570 req->ahg_idx = sdma_ahg_alloc(req->sde); 571 572 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); 573 pq->state = SDMA_PKT_Q_ACTIVE; 574 /* Send the first N packets in the request to buy us some time */ 575 ret = user_sdma_send_pkts(req, pcount); 576 if (unlikely(ret < 0 && ret != -EBUSY)) 577 goto free_req; 578 579 /* 580 * This is a somewhat blocking send implementation. 581 * The driver will block the caller until all packets of the 582 * request have been submitted to the SDMA engine. However, it 583 * will not wait for send completions. 584 */ 585 while (req->seqsubmitted != req->info.npkts) { 586 ret = user_sdma_send_pkts(req, pcount); 587 if (ret < 0) { 588 if (ret != -EBUSY) 589 goto free_req; 590 wait_event_interruptible_timeout( 591 pq->busy.wait_dma, 592 (pq->state == SDMA_PKT_Q_ACTIVE), 593 msecs_to_jiffies( 594 SDMA_IOWAIT_TIMEOUT)); 595 } 596 } 597 *count += idx; 598 return 0; 599 free_req: 600 /* 601 * If the submitted seqsubmitted == npkts, the completion routine 602 * controls the final state. If sequbmitted < npkts, wait for any 603 * outstanding packets to finish before cleaning up. 604 */ 605 if (req->seqsubmitted < req->info.npkts) { 606 if (req->seqsubmitted) 607 wait_event(pq->busy.wait_dma, 608 (req->seqcomp == req->seqsubmitted - 1)); 609 user_sdma_free_request(req, true); 610 pq_update(pq); 611 set_comp_state(pq, cq, info.comp_idx, ERROR, ret); 612 } 613 return ret; 614 } 615 616 static inline u32 compute_data_length(struct user_sdma_request *req, 617 struct user_sdma_txreq *tx) 618 { 619 /* 620 * Determine the proper size of the packet data. 621 * The size of the data of the first packet is in the header 622 * template. However, it includes the header and ICRC, which need 623 * to be subtracted. 624 * The minimum representable packet data length in a header is 4 bytes, 625 * therefore, when the data length request is less than 4 bytes, there's 626 * only one packet, and the packet data length is equal to that of the 627 * request data length. 628 * The size of the remaining packets is the minimum of the frag 629 * size (MTU) or remaining data in the request. 630 */ 631 u32 len; 632 633 if (!req->seqnum) { 634 if (req->data_len < sizeof(u32)) 635 len = req->data_len; 636 else 637 len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) - 638 (sizeof(tx->hdr) - 4)); 639 } else if (req_opcode(req->info.ctrl) == EXPECTED) { 640 u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) * 641 PAGE_SIZE; 642 /* 643 * Get the data length based on the remaining space in the 644 * TID pair. 645 */ 646 len = min(tidlen - req->tidoffset, (u32)req->info.fragsize); 647 /* If we've filled up the TID pair, move to the next one. */ 648 if (unlikely(!len) && ++req->tididx < req->n_tids && 649 req->tids[req->tididx]) { 650 tidlen = EXP_TID_GET(req->tids[req->tididx], 651 LEN) * PAGE_SIZE; 652 req->tidoffset = 0; 653 len = min_t(u32, tidlen, req->info.fragsize); 654 } 655 /* 656 * Since the TID pairs map entire pages, make sure that we 657 * are not going to try to send more data that we have 658 * remaining. 659 */ 660 len = min(len, req->data_len - req->sent); 661 } else { 662 len = min(req->data_len - req->sent, (u32)req->info.fragsize); 663 } 664 trace_hfi1_sdma_user_compute_length(req->pq->dd, 665 req->pq->ctxt, 666 req->pq->subctxt, 667 req->info.comp_idx, 668 len); 669 return len; 670 } 671 672 static inline u32 pad_len(u32 len) 673 { 674 if (len & (sizeof(u32) - 1)) 675 len += sizeof(u32) - (len & (sizeof(u32) - 1)); 676 return len; 677 } 678 679 static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len) 680 { 681 /* (Size of complete header - size of PBC) + 4B ICRC + data length */ 682 return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len); 683 } 684 685 static int user_sdma_txadd_ahg(struct user_sdma_request *req, 686 struct user_sdma_txreq *tx, 687 u32 datalen) 688 { 689 int ret; 690 u16 pbclen = le16_to_cpu(req->hdr.pbc[0]); 691 u32 lrhlen = get_lrh_len(req->hdr, pad_len(datalen)); 692 struct hfi1_user_sdma_pkt_q *pq = req->pq; 693 694 /* 695 * Copy the request header into the tx header 696 * because the HW needs a cacheline-aligned 697 * address. 698 * This copy can be optimized out if the hdr 699 * member of user_sdma_request were also 700 * cacheline aligned. 701 */ 702 memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr)); 703 if (PBC2LRH(pbclen) != lrhlen) { 704 pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen); 705 tx->hdr.pbc[0] = cpu_to_le16(pbclen); 706 } 707 ret = check_header_template(req, &tx->hdr, lrhlen, datalen); 708 if (ret) 709 return ret; 710 ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY, 711 sizeof(tx->hdr) + datalen, req->ahg_idx, 712 0, NULL, 0, user_sdma_txreq_cb); 713 if (ret) 714 return ret; 715 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr)); 716 if (ret) 717 sdma_txclean(pq->dd, &tx->txreq); 718 return ret; 719 } 720 721 static int user_sdma_txadd(struct user_sdma_request *req, 722 struct user_sdma_txreq *tx, 723 struct user_sdma_iovec *iovec, u32 datalen, 724 u32 *queued_ptr, u32 *data_sent_ptr, 725 u64 *iov_offset_ptr) 726 { 727 int ret; 728 unsigned int pageidx, len; 729 unsigned long base, offset; 730 u64 iov_offset = *iov_offset_ptr; 731 u32 queued = *queued_ptr, data_sent = *data_sent_ptr; 732 struct hfi1_user_sdma_pkt_q *pq = req->pq; 733 734 base = (unsigned long)iovec->iov.iov_base; 735 offset = offset_in_page(base + iovec->offset + iov_offset); 736 pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >> 737 PAGE_SHIFT); 738 len = offset + req->info.fragsize > PAGE_SIZE ? 739 PAGE_SIZE - offset : req->info.fragsize; 740 len = min((datalen - queued), len); 741 ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx], 742 offset, len); 743 if (ret) { 744 SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret); 745 return ret; 746 } 747 iov_offset += len; 748 queued += len; 749 data_sent += len; 750 if (unlikely(queued < datalen && pageidx == iovec->npages && 751 req->iov_idx < req->data_iovs - 1)) { 752 iovec->offset += iov_offset; 753 iovec = &req->iovs[++req->iov_idx]; 754 iov_offset = 0; 755 } 756 757 *queued_ptr = queued; 758 *data_sent_ptr = data_sent; 759 *iov_offset_ptr = iov_offset; 760 return ret; 761 } 762 763 static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts) 764 { 765 int ret = 0; 766 u16 count; 767 unsigned npkts = 0; 768 struct user_sdma_txreq *tx = NULL; 769 struct hfi1_user_sdma_pkt_q *pq = NULL; 770 struct user_sdma_iovec *iovec = NULL; 771 772 if (!req->pq) 773 return -EINVAL; 774 775 pq = req->pq; 776 777 /* If tx completion has reported an error, we are done. */ 778 if (READ_ONCE(req->has_error)) 779 return -EFAULT; 780 781 /* 782 * Check if we might have sent the entire request already 783 */ 784 if (unlikely(req->seqnum == req->info.npkts)) { 785 if (!list_empty(&req->txps)) 786 goto dosend; 787 return ret; 788 } 789 790 if (!maxpkts || maxpkts > req->info.npkts - req->seqnum) 791 maxpkts = req->info.npkts - req->seqnum; 792 793 while (npkts < maxpkts) { 794 u32 datalen = 0, queued = 0, data_sent = 0; 795 u64 iov_offset = 0; 796 797 /* 798 * Check whether any of the completions have come back 799 * with errors. If so, we are not going to process any 800 * more packets from this request. 801 */ 802 if (READ_ONCE(req->has_error)) 803 return -EFAULT; 804 805 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL); 806 if (!tx) 807 return -ENOMEM; 808 809 tx->flags = 0; 810 tx->req = req; 811 INIT_LIST_HEAD(&tx->list); 812 813 /* 814 * For the last packet set the ACK request 815 * and disable header suppression. 816 */ 817 if (req->seqnum == req->info.npkts - 1) 818 tx->flags |= (TXREQ_FLAGS_REQ_ACK | 819 TXREQ_FLAGS_REQ_DISABLE_SH); 820 821 /* 822 * Calculate the payload size - this is min of the fragment 823 * (MTU) size or the remaining bytes in the request but only 824 * if we have payload data. 825 */ 826 if (req->data_len) { 827 iovec = &req->iovs[req->iov_idx]; 828 if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { 829 if (++req->iov_idx == req->data_iovs) { 830 ret = -EFAULT; 831 goto free_tx; 832 } 833 iovec = &req->iovs[req->iov_idx]; 834 WARN_ON(iovec->offset); 835 } 836 837 datalen = compute_data_length(req, tx); 838 839 /* 840 * Disable header suppression for the payload <= 8DWS. 841 * If there is an uncorrectable error in the receive 842 * data FIFO when the received payload size is less than 843 * or equal to 8DWS then the RxDmaDataFifoRdUncErr is 844 * not reported.There is set RHF.EccErr if the header 845 * is not suppressed. 846 */ 847 if (!datalen) { 848 SDMA_DBG(req, 849 "Request has data but pkt len is 0"); 850 ret = -EFAULT; 851 goto free_tx; 852 } else if (datalen <= 32) { 853 tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH; 854 } 855 } 856 857 if (req->ahg_idx >= 0) { 858 if (!req->seqnum) { 859 ret = user_sdma_txadd_ahg(req, tx, datalen); 860 if (ret) 861 goto free_tx; 862 } else { 863 int changes; 864 865 changes = set_txreq_header_ahg(req, tx, 866 datalen); 867 if (changes < 0) { 868 ret = changes; 869 goto free_tx; 870 } 871 } 872 } else { 873 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + 874 datalen, user_sdma_txreq_cb); 875 if (ret) 876 goto free_tx; 877 /* 878 * Modify the header for this packet. This only needs 879 * to be done if we are not going to use AHG. Otherwise, 880 * the HW will do it based on the changes we gave it 881 * during sdma_txinit_ahg(). 882 */ 883 ret = set_txreq_header(req, tx, datalen); 884 if (ret) 885 goto free_txreq; 886 } 887 888 /* 889 * If the request contains any data vectors, add up to 890 * fragsize bytes to the descriptor. 891 */ 892 while (queued < datalen && 893 (req->sent + data_sent) < req->data_len) { 894 ret = user_sdma_txadd(req, tx, iovec, datalen, 895 &queued, &data_sent, &iov_offset); 896 if (ret) 897 goto free_txreq; 898 } 899 /* 900 * The txreq was submitted successfully so we can update 901 * the counters. 902 */ 903 req->koffset += datalen; 904 if (req_opcode(req->info.ctrl) == EXPECTED) 905 req->tidoffset += datalen; 906 req->sent += data_sent; 907 if (req->data_len) 908 iovec->offset += iov_offset; 909 list_add_tail(&tx->txreq.list, &req->txps); 910 /* 911 * It is important to increment this here as it is used to 912 * generate the BTH.PSN and, therefore, can't be bulk-updated 913 * outside of the loop. 914 */ 915 tx->seqnum = req->seqnum++; 916 npkts++; 917 } 918 dosend: 919 ret = sdma_send_txlist(req->sde, 920 iowait_get_ib_work(&pq->busy), 921 &req->txps, &count); 922 req->seqsubmitted += count; 923 if (req->seqsubmitted == req->info.npkts) { 924 /* 925 * The txreq has already been submitted to the HW queue 926 * so we can free the AHG entry now. Corruption will not 927 * happen due to the sequential manner in which 928 * descriptors are processed. 929 */ 930 if (req->ahg_idx >= 0) 931 sdma_ahg_free(req->sde, req->ahg_idx); 932 } 933 return ret; 934 935 free_txreq: 936 sdma_txclean(pq->dd, &tx->txreq); 937 free_tx: 938 kmem_cache_free(pq->txreq_cache, tx); 939 return ret; 940 } 941 942 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) 943 { 944 struct evict_data evict_data; 945 946 evict_data.cleared = 0; 947 evict_data.target = npages; 948 hfi1_mmu_rb_evict(pq->handler, &evict_data); 949 return evict_data.cleared; 950 } 951 952 static int pin_sdma_pages(struct user_sdma_request *req, 953 struct user_sdma_iovec *iovec, 954 struct sdma_mmu_node *node, 955 int npages) 956 { 957 int pinned, cleared; 958 struct page **pages; 959 struct hfi1_user_sdma_pkt_q *pq = req->pq; 960 961 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); 962 if (!pages) 963 return -ENOMEM; 964 memcpy(pages, node->pages, node->npages * sizeof(*pages)); 965 966 npages -= node->npages; 967 retry: 968 if (!hfi1_can_pin_pages(pq->dd, pq->mm, 969 atomic_read(&pq->n_locked), npages)) { 970 cleared = sdma_cache_evict(pq, npages); 971 if (cleared >= npages) 972 goto retry; 973 } 974 pinned = hfi1_acquire_user_pages(pq->mm, 975 ((unsigned long)iovec->iov.iov_base + 976 (node->npages * PAGE_SIZE)), npages, 0, 977 pages + node->npages); 978 if (pinned < 0) { 979 kfree(pages); 980 return pinned; 981 } 982 if (pinned != npages) { 983 unpin_vector_pages(pq->mm, pages, node->npages, pinned); 984 return -EFAULT; 985 } 986 kfree(node->pages); 987 node->rb.len = iovec->iov.iov_len; 988 node->pages = pages; 989 atomic_add(pinned, &pq->n_locked); 990 return pinned; 991 } 992 993 static void unpin_sdma_pages(struct sdma_mmu_node *node) 994 { 995 if (node->npages) { 996 unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages); 997 atomic_sub(node->npages, &node->pq->n_locked); 998 } 999 } 1000 1001 static int pin_vector_pages(struct user_sdma_request *req, 1002 struct user_sdma_iovec *iovec) 1003 { 1004 int ret = 0, pinned, npages; 1005 struct hfi1_user_sdma_pkt_q *pq = req->pq; 1006 struct sdma_mmu_node *node = NULL; 1007 struct mmu_rb_node *rb_node; 1008 struct iovec *iov; 1009 bool extracted; 1010 1011 extracted = 1012 hfi1_mmu_rb_remove_unless_exact(pq->handler, 1013 (unsigned long) 1014 iovec->iov.iov_base, 1015 iovec->iov.iov_len, &rb_node); 1016 if (rb_node) { 1017 node = container_of(rb_node, struct sdma_mmu_node, rb); 1018 if (!extracted) { 1019 atomic_inc(&node->refcount); 1020 iovec->pages = node->pages; 1021 iovec->npages = node->npages; 1022 iovec->node = node; 1023 return 0; 1024 } 1025 } 1026 1027 if (!node) { 1028 node = kzalloc(sizeof(*node), GFP_KERNEL); 1029 if (!node) 1030 return -ENOMEM; 1031 1032 node->rb.addr = (unsigned long)iovec->iov.iov_base; 1033 node->pq = pq; 1034 atomic_set(&node->refcount, 0); 1035 } 1036 1037 iov = &iovec->iov; 1038 npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len); 1039 if (node->npages < npages) { 1040 pinned = pin_sdma_pages(req, iovec, node, npages); 1041 if (pinned < 0) { 1042 ret = pinned; 1043 goto bail; 1044 } 1045 node->npages += pinned; 1046 npages = node->npages; 1047 } 1048 iovec->pages = node->pages; 1049 iovec->npages = npages; 1050 iovec->node = node; 1051 1052 ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb); 1053 if (ret) { 1054 iovec->node = NULL; 1055 goto bail; 1056 } 1057 return 0; 1058 bail: 1059 unpin_sdma_pages(node); 1060 kfree(node); 1061 return ret; 1062 } 1063 1064 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, 1065 unsigned start, unsigned npages) 1066 { 1067 hfi1_release_user_pages(mm, pages + start, npages, false); 1068 kfree(pages); 1069 } 1070 1071 static int check_header_template(struct user_sdma_request *req, 1072 struct hfi1_pkt_header *hdr, u32 lrhlen, 1073 u32 datalen) 1074 { 1075 /* 1076 * Perform safety checks for any type of packet: 1077 * - transfer size is multiple of 64bytes 1078 * - packet length is multiple of 4 bytes 1079 * - packet length is not larger than MTU size 1080 * 1081 * These checks are only done for the first packet of the 1082 * transfer since the header is "given" to us by user space. 1083 * For the remainder of the packets we compute the values. 1084 */ 1085 if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 || 1086 lrhlen > get_lrh_len(*hdr, req->info.fragsize)) 1087 return -EINVAL; 1088 1089 if (req_opcode(req->info.ctrl) == EXPECTED) { 1090 /* 1091 * The header is checked only on the first packet. Furthermore, 1092 * we ensure that at least one TID entry is copied when the 1093 * request is submitted. Therefore, we don't have to verify that 1094 * tididx points to something sane. 1095 */ 1096 u32 tidval = req->tids[req->tididx], 1097 tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE, 1098 tididx = EXP_TID_GET(tidval, IDX), 1099 tidctrl = EXP_TID_GET(tidval, CTRL), 1100 tidoff; 1101 __le32 kval = hdr->kdeth.ver_tid_offset; 1102 1103 tidoff = KDETH_GET(kval, OFFSET) * 1104 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ? 1105 KDETH_OM_LARGE : KDETH_OM_SMALL); 1106 /* 1107 * Expected receive packets have the following 1108 * additional checks: 1109 * - offset is not larger than the TID size 1110 * - TIDCtrl values match between header and TID array 1111 * - TID indexes match between header and TID array 1112 */ 1113 if ((tidoff + datalen > tidlen) || 1114 KDETH_GET(kval, TIDCTRL) != tidctrl || 1115 KDETH_GET(kval, TID) != tididx) 1116 return -EINVAL; 1117 } 1118 return 0; 1119 } 1120 1121 /* 1122 * Correctly set the BTH.PSN field based on type of 1123 * transfer - eager packets can just increment the PSN but 1124 * expected packets encode generation and sequence in the 1125 * BTH.PSN field so just incrementing will result in errors. 1126 */ 1127 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags) 1128 { 1129 u32 val = be32_to_cpu(bthpsn), 1130 mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull : 1131 0xffffffull), 1132 psn = val & mask; 1133 if (expct) 1134 psn = (psn & ~HFI1_KDETH_BTH_SEQ_MASK) | 1135 ((psn + frags) & HFI1_KDETH_BTH_SEQ_MASK); 1136 else 1137 psn = psn + frags; 1138 return psn & mask; 1139 } 1140 1141 static int set_txreq_header(struct user_sdma_request *req, 1142 struct user_sdma_txreq *tx, u32 datalen) 1143 { 1144 struct hfi1_user_sdma_pkt_q *pq = req->pq; 1145 struct hfi1_pkt_header *hdr = &tx->hdr; 1146 u8 omfactor; /* KDETH.OM */ 1147 u16 pbclen; 1148 int ret; 1149 u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen)); 1150 1151 /* Copy the header template to the request before modification */ 1152 memcpy(hdr, &req->hdr, sizeof(*hdr)); 1153 1154 /* 1155 * Check if the PBC and LRH length are mismatched. If so 1156 * adjust both in the header. 1157 */ 1158 pbclen = le16_to_cpu(hdr->pbc[0]); 1159 if (PBC2LRH(pbclen) != lrhlen) { 1160 pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen); 1161 hdr->pbc[0] = cpu_to_le16(pbclen); 1162 hdr->lrh[2] = cpu_to_be16(lrhlen >> 2); 1163 /* 1164 * Third packet 1165 * This is the first packet in the sequence that has 1166 * a "static" size that can be used for the rest of 1167 * the packets (besides the last one). 1168 */ 1169 if (unlikely(req->seqnum == 2)) { 1170 /* 1171 * From this point on the lengths in both the 1172 * PBC and LRH are the same until the last 1173 * packet. 1174 * Adjust the template so we don't have to update 1175 * every packet 1176 */ 1177 req->hdr.pbc[0] = hdr->pbc[0]; 1178 req->hdr.lrh[2] = hdr->lrh[2]; 1179 } 1180 } 1181 /* 1182 * We only have to modify the header if this is not the 1183 * first packet in the request. Otherwise, we use the 1184 * header given to us. 1185 */ 1186 if (unlikely(!req->seqnum)) { 1187 ret = check_header_template(req, hdr, lrhlen, datalen); 1188 if (ret) 1189 return ret; 1190 goto done; 1191 } 1192 1193 hdr->bth[2] = cpu_to_be32( 1194 set_pkt_bth_psn(hdr->bth[2], 1195 (req_opcode(req->info.ctrl) == EXPECTED), 1196 req->seqnum)); 1197 1198 /* Set ACK request on last packet */ 1199 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK)) 1200 hdr->bth[2] |= cpu_to_be32(1UL << 31); 1201 1202 /* Set the new offset */ 1203 hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset); 1204 /* Expected packets have to fill in the new TID information */ 1205 if (req_opcode(req->info.ctrl) == EXPECTED) { 1206 tidval = req->tids[req->tididx]; 1207 /* 1208 * If the offset puts us at the end of the current TID, 1209 * advance everything. 1210 */ 1211 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) * 1212 PAGE_SIZE)) { 1213 req->tidoffset = 0; 1214 /* 1215 * Since we don't copy all the TIDs, all at once, 1216 * we have to check again. 1217 */ 1218 if (++req->tididx > req->n_tids - 1 || 1219 !req->tids[req->tididx]) { 1220 return -EINVAL; 1221 } 1222 tidval = req->tids[req->tididx]; 1223 } 1224 omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >= 1225 KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE_SHIFT : 1226 KDETH_OM_SMALL_SHIFT; 1227 /* Set KDETH.TIDCtrl based on value for this TID. */ 1228 KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL, 1229 EXP_TID_GET(tidval, CTRL)); 1230 /* Set KDETH.TID based on value for this TID */ 1231 KDETH_SET(hdr->kdeth.ver_tid_offset, TID, 1232 EXP_TID_GET(tidval, IDX)); 1233 /* Clear KDETH.SH when DISABLE_SH flag is set */ 1234 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) 1235 KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0); 1236 /* 1237 * Set the KDETH.OFFSET and KDETH.OM based on size of 1238 * transfer. 1239 */ 1240 trace_hfi1_sdma_user_tid_info( 1241 pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx, 1242 req->tidoffset, req->tidoffset >> omfactor, 1243 omfactor != KDETH_OM_SMALL_SHIFT); 1244 KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, 1245 req->tidoffset >> omfactor); 1246 KDETH_SET(hdr->kdeth.ver_tid_offset, OM, 1247 omfactor != KDETH_OM_SMALL_SHIFT); 1248 } 1249 done: 1250 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, 1251 req->info.comp_idx, hdr, tidval); 1252 return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr)); 1253 } 1254 1255 static int set_txreq_header_ahg(struct user_sdma_request *req, 1256 struct user_sdma_txreq *tx, u32 datalen) 1257 { 1258 u32 ahg[AHG_KDETH_ARRAY_SIZE]; 1259 int idx = 0; 1260 u8 omfactor; /* KDETH.OM */ 1261 struct hfi1_user_sdma_pkt_q *pq = req->pq; 1262 struct hfi1_pkt_header *hdr = &req->hdr; 1263 u16 pbclen = le16_to_cpu(hdr->pbc[0]); 1264 u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen)); 1265 size_t array_size = ARRAY_SIZE(ahg); 1266 1267 if (PBC2LRH(pbclen) != lrhlen) { 1268 /* PBC.PbcLengthDWs */ 1269 idx = ahg_header_set(ahg, idx, array_size, 0, 0, 12, 1270 (__force u16)cpu_to_le16(LRH2PBC(lrhlen))); 1271 if (idx < 0) 1272 return idx; 1273 /* LRH.PktLen (we need the full 16 bits due to byte swap) */ 1274 idx = ahg_header_set(ahg, idx, array_size, 3, 0, 16, 1275 (__force u16)cpu_to_be16(lrhlen >> 2)); 1276 if (idx < 0) 1277 return idx; 1278 } 1279 1280 /* 1281 * Do the common updates 1282 */ 1283 /* BTH.PSN and BTH.A */ 1284 val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) & 1285 (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff); 1286 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK)) 1287 val32 |= 1UL << 31; 1288 idx = ahg_header_set(ahg, idx, array_size, 6, 0, 16, 1289 (__force u16)cpu_to_be16(val32 >> 16)); 1290 if (idx < 0) 1291 return idx; 1292 idx = ahg_header_set(ahg, idx, array_size, 6, 16, 16, 1293 (__force u16)cpu_to_be16(val32 & 0xffff)); 1294 if (idx < 0) 1295 return idx; 1296 /* KDETH.Offset */ 1297 idx = ahg_header_set(ahg, idx, array_size, 15, 0, 16, 1298 (__force u16)cpu_to_le16(req->koffset & 0xffff)); 1299 if (idx < 0) 1300 return idx; 1301 idx = ahg_header_set(ahg, idx, array_size, 15, 16, 16, 1302 (__force u16)cpu_to_le16(req->koffset >> 16)); 1303 if (idx < 0) 1304 return idx; 1305 if (req_opcode(req->info.ctrl) == EXPECTED) { 1306 __le16 val; 1307 1308 tidval = req->tids[req->tididx]; 1309 1310 /* 1311 * If the offset puts us at the end of the current TID, 1312 * advance everything. 1313 */ 1314 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) * 1315 PAGE_SIZE)) { 1316 req->tidoffset = 0; 1317 /* 1318 * Since we don't copy all the TIDs, all at once, 1319 * we have to check again. 1320 */ 1321 if (++req->tididx > req->n_tids - 1 || 1322 !req->tids[req->tididx]) 1323 return -EINVAL; 1324 tidval = req->tids[req->tididx]; 1325 } 1326 omfactor = ((EXP_TID_GET(tidval, LEN) * 1327 PAGE_SIZE) >= 1328 KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT : 1329 KDETH_OM_SMALL_SHIFT; 1330 /* KDETH.OM and KDETH.OFFSET (TID) */ 1331 idx = ahg_header_set( 1332 ahg, idx, array_size, 7, 0, 16, 1333 ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 | 1334 ((req->tidoffset >> omfactor) 1335 & 0x7fff))); 1336 if (idx < 0) 1337 return idx; 1338 /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */ 1339 val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) | 1340 (EXP_TID_GET(tidval, IDX) & 0x3ff)); 1341 1342 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) { 1343 val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset, 1344 INTR) << 1345 AHG_KDETH_INTR_SHIFT)); 1346 } else { 1347 val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ? 1348 cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) : 1349 cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset, 1350 INTR) << 1351 AHG_KDETH_INTR_SHIFT)); 1352 } 1353 1354 idx = ahg_header_set(ahg, idx, array_size, 1355 7, 16, 14, (__force u16)val); 1356 if (idx < 0) 1357 return idx; 1358 } 1359 1360 trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt, 1361 req->info.comp_idx, req->sde->this_idx, 1362 req->ahg_idx, ahg, idx, tidval); 1363 sdma_txinit_ahg(&tx->txreq, 1364 SDMA_TXREQ_F_USE_AHG, 1365 datalen, req->ahg_idx, idx, 1366 ahg, sizeof(req->hdr), 1367 user_sdma_txreq_cb); 1368 1369 return idx; 1370 } 1371 1372 /** 1373 * user_sdma_txreq_cb() - SDMA tx request completion callback. 1374 * @txreq: valid sdma tx request 1375 * @status: success/failure of request 1376 * 1377 * Called when the SDMA progress state machine gets notification that 1378 * the SDMA descriptors for this tx request have been processed by the 1379 * DMA engine. Called in interrupt context. 1380 * Only do work on completed sequences. 1381 */ 1382 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) 1383 { 1384 struct user_sdma_txreq *tx = 1385 container_of(txreq, struct user_sdma_txreq, txreq); 1386 struct user_sdma_request *req; 1387 struct hfi1_user_sdma_pkt_q *pq; 1388 struct hfi1_user_sdma_comp_q *cq; 1389 enum hfi1_sdma_comp_state state = COMPLETE; 1390 1391 if (!tx->req) 1392 return; 1393 1394 req = tx->req; 1395 pq = req->pq; 1396 cq = req->cq; 1397 1398 if (status != SDMA_TXREQ_S_OK) { 1399 SDMA_DBG(req, "SDMA completion with error %d", 1400 status); 1401 WRITE_ONCE(req->has_error, 1); 1402 state = ERROR; 1403 } 1404 1405 req->seqcomp = tx->seqnum; 1406 kmem_cache_free(pq->txreq_cache, tx); 1407 1408 /* sequence isn't complete? We are done */ 1409 if (req->seqcomp != req->info.npkts - 1) 1410 return; 1411 1412 user_sdma_free_request(req, false); 1413 set_comp_state(pq, cq, req->info.comp_idx, state, status); 1414 pq_update(pq); 1415 } 1416 1417 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) 1418 { 1419 if (atomic_dec_and_test(&pq->n_reqs)) 1420 wake_up(&pq->wait); 1421 } 1422 1423 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) 1424 { 1425 int i; 1426 1427 if (!list_empty(&req->txps)) { 1428 struct sdma_txreq *t, *p; 1429 1430 list_for_each_entry_safe(t, p, &req->txps, list) { 1431 struct user_sdma_txreq *tx = 1432 container_of(t, struct user_sdma_txreq, txreq); 1433 list_del_init(&t->list); 1434 sdma_txclean(req->pq->dd, t); 1435 kmem_cache_free(req->pq->txreq_cache, tx); 1436 } 1437 } 1438 1439 for (i = 0; i < req->data_iovs; i++) { 1440 struct sdma_mmu_node *node = req->iovs[i].node; 1441 1442 if (!node) 1443 continue; 1444 1445 req->iovs[i].node = NULL; 1446 1447 if (unpin) 1448 hfi1_mmu_rb_remove(req->pq->handler, 1449 &node->rb); 1450 else 1451 atomic_dec(&node->refcount); 1452 } 1453 1454 kfree(req->tids); 1455 clear_bit(req->info.comp_idx, req->pq->req_in_use); 1456 } 1457 1458 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, 1459 struct hfi1_user_sdma_comp_q *cq, 1460 u16 idx, enum hfi1_sdma_comp_state state, 1461 int ret) 1462 { 1463 if (state == ERROR) 1464 cq->comps[idx].errcode = -ret; 1465 smp_wmb(); /* make sure errcode is visible first */ 1466 cq->comps[idx].status = state; 1467 trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt, 1468 idx, state, ret); 1469 } 1470 1471 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr, 1472 unsigned long len) 1473 { 1474 return (bool)(node->addr == addr); 1475 } 1476 1477 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode) 1478 { 1479 struct sdma_mmu_node *node = 1480 container_of(mnode, struct sdma_mmu_node, rb); 1481 1482 atomic_inc(&node->refcount); 1483 return 0; 1484 } 1485 1486 /* 1487 * Return 1 to remove the node from the rb tree and call the remove op. 1488 * 1489 * Called with the rb tree lock held. 1490 */ 1491 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode, 1492 void *evict_arg, bool *stop) 1493 { 1494 struct sdma_mmu_node *node = 1495 container_of(mnode, struct sdma_mmu_node, rb); 1496 struct evict_data *evict_data = evict_arg; 1497 1498 /* is this node still being used? */ 1499 if (atomic_read(&node->refcount)) 1500 return 0; /* keep this node */ 1501 1502 /* this node will be evicted, add its pages to our count */ 1503 evict_data->cleared += node->npages; 1504 1505 /* have enough pages been cleared? */ 1506 if (evict_data->cleared >= evict_data->target) 1507 *stop = true; 1508 1509 return 1; /* remove this node */ 1510 } 1511 1512 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode) 1513 { 1514 struct sdma_mmu_node *node = 1515 container_of(mnode, struct sdma_mmu_node, rb); 1516 1517 unpin_sdma_pages(node); 1518 kfree(node); 1519 } 1520 1521 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode) 1522 { 1523 struct sdma_mmu_node *node = 1524 container_of(mnode, struct sdma_mmu_node, rb); 1525 1526 if (!atomic_read(&node->refcount)) 1527 return 1; 1528 return 0; 1529 } 1530