1 /* 2 * Copyright(c) 2015-2017 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 #include <linux/poll.h> 48 #include <linux/cdev.h> 49 #include <linux/vmalloc.h> 50 #include <linux/io.h> 51 #include <linux/sched/mm.h> 52 #include <linux/bitmap.h> 53 54 #include <rdma/ib.h> 55 56 #include "hfi.h" 57 #include "pio.h" 58 #include "device.h" 59 #include "common.h" 60 #include "trace.h" 61 #include "mmu_rb.h" 62 #include "user_sdma.h" 63 #include "user_exp_rcv.h" 64 #include "aspm.h" 65 66 #undef pr_fmt 67 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 68 69 #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */ 70 71 /* 72 * File operation functions 73 */ 74 static int hfi1_file_open(struct inode *inode, struct file *fp); 75 static int hfi1_file_close(struct inode *inode, struct file *fp); 76 static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from); 77 static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt); 78 static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma); 79 80 static u64 kvirt_to_phys(void *addr); 81 static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len); 82 static void init_subctxts(struct hfi1_ctxtdata *uctxt, 83 const struct hfi1_user_info *uinfo); 84 static int init_user_ctxt(struct hfi1_filedata *fd, 85 struct hfi1_ctxtdata *uctxt); 86 static void user_init(struct hfi1_ctxtdata *uctxt); 87 static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len); 88 static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len); 89 static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg, 90 u32 len); 91 static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg, 92 u32 len); 93 static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg, 94 u32 len); 95 static int setup_base_ctxt(struct hfi1_filedata *fd, 96 struct hfi1_ctxtdata *uctxt); 97 static int setup_subctxt(struct hfi1_ctxtdata *uctxt); 98 99 static int find_sub_ctxt(struct hfi1_filedata *fd, 100 const struct hfi1_user_info *uinfo); 101 static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd, 102 struct hfi1_user_info *uinfo, 103 struct hfi1_ctxtdata **cd); 104 static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt); 105 static __poll_t poll_urgent(struct file *fp, struct poll_table_struct *pt); 106 static __poll_t poll_next(struct file *fp, struct poll_table_struct *pt); 107 static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt, 108 unsigned long arg); 109 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg); 110 static int ctxt_reset(struct hfi1_ctxtdata *uctxt); 111 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt, 112 unsigned long arg); 113 static vm_fault_t vma_fault(struct vm_fault *vmf); 114 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, 115 unsigned long arg); 116 117 static const struct file_operations hfi1_file_ops = { 118 .owner = THIS_MODULE, 119 .write_iter = hfi1_write_iter, 120 .open = hfi1_file_open, 121 .release = hfi1_file_close, 122 .unlocked_ioctl = hfi1_file_ioctl, 123 .poll = hfi1_poll, 124 .mmap = hfi1_file_mmap, 125 .llseek = noop_llseek, 126 }; 127 128 static const struct vm_operations_struct vm_ops = { 129 .fault = vma_fault, 130 }; 131 132 /* 133 * Types of memories mapped into user processes' space 134 */ 135 enum mmap_types { 136 PIO_BUFS = 1, 137 PIO_BUFS_SOP, 138 PIO_CRED, 139 RCV_HDRQ, 140 RCV_EGRBUF, 141 UREGS, 142 EVENTS, 143 STATUS, 144 RTAIL, 145 SUBCTXT_UREGS, 146 SUBCTXT_RCV_HDRQ, 147 SUBCTXT_EGRBUF, 148 SDMA_COMP 149 }; 150 151 /* 152 * Masks and offsets defining the mmap tokens 153 */ 154 #define HFI1_MMAP_OFFSET_MASK 0xfffULL 155 #define HFI1_MMAP_OFFSET_SHIFT 0 156 #define HFI1_MMAP_SUBCTXT_MASK 0xfULL 157 #define HFI1_MMAP_SUBCTXT_SHIFT 12 158 #define HFI1_MMAP_CTXT_MASK 0xffULL 159 #define HFI1_MMAP_CTXT_SHIFT 16 160 #define HFI1_MMAP_TYPE_MASK 0xfULL 161 #define HFI1_MMAP_TYPE_SHIFT 24 162 #define HFI1_MMAP_MAGIC_MASK 0xffffffffULL 163 #define HFI1_MMAP_MAGIC_SHIFT 32 164 165 #define HFI1_MMAP_MAGIC 0xdabbad00 166 167 #define HFI1_MMAP_TOKEN_SET(field, val) \ 168 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT) 169 #define HFI1_MMAP_TOKEN_GET(field, token) \ 170 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK) 171 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ 172 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \ 173 HFI1_MMAP_TOKEN_SET(TYPE, type) | \ 174 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \ 175 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \ 176 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr)))) 177 178 #define dbg(fmt, ...) \ 179 pr_info(fmt, ##__VA_ARGS__) 180 181 static inline int is_valid_mmap(u64 token) 182 { 183 return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC); 184 } 185 186 static int hfi1_file_open(struct inode *inode, struct file *fp) 187 { 188 struct hfi1_filedata *fd; 189 struct hfi1_devdata *dd = container_of(inode->i_cdev, 190 struct hfi1_devdata, 191 user_cdev); 192 193 if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1)) 194 return -EINVAL; 195 196 if (!atomic_inc_not_zero(&dd->user_refcount)) 197 return -ENXIO; 198 199 /* The real work is performed later in assign_ctxt() */ 200 201 fd = kzalloc(sizeof(*fd), GFP_KERNEL); 202 203 if (!fd || init_srcu_struct(&fd->pq_srcu)) 204 goto nomem; 205 spin_lock_init(&fd->pq_rcu_lock); 206 spin_lock_init(&fd->tid_lock); 207 spin_lock_init(&fd->invalid_lock); 208 fd->rec_cpu_num = -1; /* no cpu affinity by default */ 209 fd->mm = current->mm; 210 mmgrab(fd->mm); 211 fd->dd = dd; 212 kobject_get(&fd->dd->kobj); 213 fp->private_data = fd; 214 return 0; 215 nomem: 216 kfree(fd); 217 fp->private_data = NULL; 218 if (atomic_dec_and_test(&dd->user_refcount)) 219 complete(&dd->user_comp); 220 return -ENOMEM; 221 } 222 223 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, 224 unsigned long arg) 225 { 226 struct hfi1_filedata *fd = fp->private_data; 227 struct hfi1_ctxtdata *uctxt = fd->uctxt; 228 int ret = 0; 229 int uval = 0; 230 231 hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd); 232 if (cmd != HFI1_IOCTL_ASSIGN_CTXT && 233 cmd != HFI1_IOCTL_GET_VERS && 234 !uctxt) 235 return -EINVAL; 236 237 switch (cmd) { 238 case HFI1_IOCTL_ASSIGN_CTXT: 239 ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd)); 240 break; 241 242 case HFI1_IOCTL_CTXT_INFO: 243 ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd)); 244 break; 245 246 case HFI1_IOCTL_USER_INFO: 247 ret = get_base_info(fd, arg, _IOC_SIZE(cmd)); 248 break; 249 250 case HFI1_IOCTL_CREDIT_UPD: 251 if (uctxt) 252 sc_return_credits(uctxt->sc); 253 break; 254 255 case HFI1_IOCTL_TID_UPDATE: 256 ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd)); 257 break; 258 259 case HFI1_IOCTL_TID_FREE: 260 ret = user_exp_rcv_clear(fd, arg, _IOC_SIZE(cmd)); 261 break; 262 263 case HFI1_IOCTL_TID_INVAL_READ: 264 ret = user_exp_rcv_invalid(fd, arg, _IOC_SIZE(cmd)); 265 break; 266 267 case HFI1_IOCTL_RECV_CTRL: 268 ret = manage_rcvq(uctxt, fd->subctxt, arg); 269 break; 270 271 case HFI1_IOCTL_POLL_TYPE: 272 if (get_user(uval, (int __user *)arg)) 273 return -EFAULT; 274 uctxt->poll_type = (typeof(uctxt->poll_type))uval; 275 break; 276 277 case HFI1_IOCTL_ACK_EVENT: 278 ret = user_event_ack(uctxt, fd->subctxt, arg); 279 break; 280 281 case HFI1_IOCTL_SET_PKEY: 282 ret = set_ctxt_pkey(uctxt, arg); 283 break; 284 285 case HFI1_IOCTL_CTXT_RESET: 286 ret = ctxt_reset(uctxt); 287 break; 288 289 case HFI1_IOCTL_GET_VERS: 290 uval = HFI1_USER_SWVERSION; 291 if (put_user(uval, (int __user *)arg)) 292 return -EFAULT; 293 break; 294 295 default: 296 return -EINVAL; 297 } 298 299 return ret; 300 } 301 302 static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from) 303 { 304 struct hfi1_filedata *fd = kiocb->ki_filp->private_data; 305 struct hfi1_user_sdma_pkt_q *pq; 306 struct hfi1_user_sdma_comp_q *cq = fd->cq; 307 int done = 0, reqs = 0; 308 unsigned long dim = from->nr_segs; 309 int idx; 310 311 idx = srcu_read_lock(&fd->pq_srcu); 312 pq = srcu_dereference(fd->pq, &fd->pq_srcu); 313 if (!cq || !pq) { 314 srcu_read_unlock(&fd->pq_srcu, idx); 315 return -EIO; 316 } 317 318 if (!iter_is_iovec(from) || !dim) { 319 srcu_read_unlock(&fd->pq_srcu, idx); 320 return -EINVAL; 321 } 322 323 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); 324 325 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) { 326 srcu_read_unlock(&fd->pq_srcu, idx); 327 return -ENOSPC; 328 } 329 330 while (dim) { 331 int ret; 332 unsigned long count = 0; 333 334 ret = hfi1_user_sdma_process_request( 335 fd, (struct iovec *)(from->iov + done), 336 dim, &count); 337 if (ret) { 338 reqs = ret; 339 break; 340 } 341 dim -= count; 342 done += count; 343 reqs++; 344 } 345 346 srcu_read_unlock(&fd->pq_srcu, idx); 347 return reqs; 348 } 349 350 static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) 351 { 352 struct hfi1_filedata *fd = fp->private_data; 353 struct hfi1_ctxtdata *uctxt = fd->uctxt; 354 struct hfi1_devdata *dd; 355 unsigned long flags; 356 u64 token = vma->vm_pgoff << PAGE_SHIFT, 357 memaddr = 0; 358 void *memvirt = NULL; 359 u8 subctxt, mapio = 0, vmf = 0, type; 360 ssize_t memlen = 0; 361 int ret = 0; 362 u16 ctxt; 363 364 if (!is_valid_mmap(token) || !uctxt || 365 !(vma->vm_flags & VM_SHARED)) { 366 ret = -EINVAL; 367 goto done; 368 } 369 dd = uctxt->dd; 370 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); 371 subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token); 372 type = HFI1_MMAP_TOKEN_GET(TYPE, token); 373 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { 374 ret = -EINVAL; 375 goto done; 376 } 377 378 flags = vma->vm_flags; 379 380 switch (type) { 381 case PIO_BUFS: 382 case PIO_BUFS_SOP: 383 memaddr = ((dd->physaddr + TXE_PIO_SEND) + 384 /* chip pio base */ 385 (uctxt->sc->hw_context * BIT(16))) + 386 /* 64K PIO space / ctxt */ 387 (type == PIO_BUFS_SOP ? 388 (TXE_PIO_SIZE / 2) : 0); /* sop? */ 389 /* 390 * Map only the amount allocated to the context, not the 391 * entire available context's PIO space. 392 */ 393 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE); 394 flags &= ~VM_MAYREAD; 395 flags |= VM_DONTCOPY | VM_DONTEXPAND; 396 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 397 mapio = 1; 398 break; 399 case PIO_CRED: 400 if (flags & VM_WRITE) { 401 ret = -EPERM; 402 goto done; 403 } 404 /* 405 * The credit return location for this context could be on the 406 * second or third page allocated for credit returns (if number 407 * of enabled contexts > 64 and 128 respectively). 408 */ 409 memvirt = dd->cr_base[uctxt->numa_id].va; 410 memaddr = virt_to_phys(memvirt) + 411 (((u64)uctxt->sc->hw_free - 412 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK); 413 memlen = PAGE_SIZE; 414 flags &= ~VM_MAYWRITE; 415 flags |= VM_DONTCOPY | VM_DONTEXPAND; 416 /* 417 * The driver has already allocated memory for credit 418 * returns and programmed it into the chip. Has that 419 * memory been flagged as non-cached? 420 */ 421 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */ 422 mapio = 1; 423 break; 424 case RCV_HDRQ: 425 memlen = rcvhdrq_size(uctxt); 426 memvirt = uctxt->rcvhdrq; 427 break; 428 case RCV_EGRBUF: { 429 unsigned long addr; 430 int i; 431 /* 432 * The RcvEgr buffer need to be handled differently 433 * as multiple non-contiguous pages need to be mapped 434 * into the user process. 435 */ 436 memlen = uctxt->egrbufs.size; 437 if ((vma->vm_end - vma->vm_start) != memlen) { 438 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n", 439 (vma->vm_end - vma->vm_start), memlen); 440 ret = -EINVAL; 441 goto done; 442 } 443 if (vma->vm_flags & VM_WRITE) { 444 ret = -EPERM; 445 goto done; 446 } 447 vma->vm_flags &= ~VM_MAYWRITE; 448 addr = vma->vm_start; 449 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) { 450 memlen = uctxt->egrbufs.buffers[i].len; 451 memvirt = uctxt->egrbufs.buffers[i].addr; 452 ret = remap_pfn_range( 453 vma, addr, 454 /* 455 * virt_to_pfn() does the same, but 456 * it's not available on x86_64 457 * when CONFIG_MMU is enabled. 458 */ 459 PFN_DOWN(__pa(memvirt)), 460 memlen, 461 vma->vm_page_prot); 462 if (ret < 0) 463 goto done; 464 addr += memlen; 465 } 466 ret = 0; 467 goto done; 468 } 469 case UREGS: 470 /* 471 * Map only the page that contains this context's user 472 * registers. 473 */ 474 memaddr = (unsigned long) 475 (dd->physaddr + RXE_PER_CONTEXT_USER) 476 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); 477 /* 478 * TidFlow table is on the same page as the rest of the 479 * user registers. 480 */ 481 memlen = PAGE_SIZE; 482 flags |= VM_DONTCOPY | VM_DONTEXPAND; 483 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 484 mapio = 1; 485 break; 486 case EVENTS: 487 /* 488 * Use the page where this context's flags are. User level 489 * knows where it's own bitmap is within the page. 490 */ 491 memaddr = (unsigned long) 492 (dd->events + uctxt_offset(uctxt)) & PAGE_MASK; 493 memlen = PAGE_SIZE; 494 /* 495 * v3.7 removes VM_RESERVED but the effect is kept by 496 * using VM_IO. 497 */ 498 flags |= VM_IO | VM_DONTEXPAND; 499 vmf = 1; 500 break; 501 case STATUS: 502 if (flags & VM_WRITE) { 503 ret = -EPERM; 504 goto done; 505 } 506 memaddr = kvirt_to_phys((void *)dd->status); 507 memlen = PAGE_SIZE; 508 flags |= VM_IO | VM_DONTEXPAND; 509 break; 510 case RTAIL: 511 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) { 512 /* 513 * If the memory allocation failed, the context alloc 514 * also would have failed, so we would never get here 515 */ 516 ret = -EINVAL; 517 goto done; 518 } 519 if ((flags & VM_WRITE) || !hfi1_rcvhdrtail_kvaddr(uctxt)) { 520 ret = -EPERM; 521 goto done; 522 } 523 memlen = PAGE_SIZE; 524 memvirt = (void *)hfi1_rcvhdrtail_kvaddr(uctxt); 525 flags &= ~VM_MAYWRITE; 526 break; 527 case SUBCTXT_UREGS: 528 memaddr = (u64)uctxt->subctxt_uregbase; 529 memlen = PAGE_SIZE; 530 flags |= VM_IO | VM_DONTEXPAND; 531 vmf = 1; 532 break; 533 case SUBCTXT_RCV_HDRQ: 534 memaddr = (u64)uctxt->subctxt_rcvhdr_base; 535 memlen = rcvhdrq_size(uctxt) * uctxt->subctxt_cnt; 536 flags |= VM_IO | VM_DONTEXPAND; 537 vmf = 1; 538 break; 539 case SUBCTXT_EGRBUF: 540 memaddr = (u64)uctxt->subctxt_rcvegrbuf; 541 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt; 542 flags |= VM_IO | VM_DONTEXPAND; 543 flags &= ~VM_MAYWRITE; 544 vmf = 1; 545 break; 546 case SDMA_COMP: { 547 struct hfi1_user_sdma_comp_q *cq = fd->cq; 548 549 if (!cq) { 550 ret = -EFAULT; 551 goto done; 552 } 553 memaddr = (u64)cq->comps; 554 memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries); 555 flags |= VM_IO | VM_DONTEXPAND; 556 vmf = 1; 557 break; 558 } 559 default: 560 ret = -EINVAL; 561 break; 562 } 563 564 if ((vma->vm_end - vma->vm_start) != memlen) { 565 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu", 566 uctxt->ctxt, fd->subctxt, 567 (vma->vm_end - vma->vm_start), memlen); 568 ret = -EINVAL; 569 goto done; 570 } 571 572 vma->vm_flags = flags; 573 hfi1_cdbg(PROC, 574 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n", 575 ctxt, subctxt, type, mapio, vmf, memaddr, memlen, 576 vma->vm_end - vma->vm_start, vma->vm_flags); 577 if (vmf) { 578 vma->vm_pgoff = PFN_DOWN(memaddr); 579 vma->vm_ops = &vm_ops; 580 ret = 0; 581 } else if (mapio) { 582 ret = io_remap_pfn_range(vma, vma->vm_start, 583 PFN_DOWN(memaddr), 584 memlen, 585 vma->vm_page_prot); 586 } else if (memvirt) { 587 ret = remap_pfn_range(vma, vma->vm_start, 588 PFN_DOWN(__pa(memvirt)), 589 memlen, 590 vma->vm_page_prot); 591 } else { 592 ret = remap_pfn_range(vma, vma->vm_start, 593 PFN_DOWN(memaddr), 594 memlen, 595 vma->vm_page_prot); 596 } 597 done: 598 return ret; 599 } 600 601 /* 602 * Local (non-chip) user memory is not mapped right away but as it is 603 * accessed by the user-level code. 604 */ 605 static vm_fault_t vma_fault(struct vm_fault *vmf) 606 { 607 struct page *page; 608 609 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); 610 if (!page) 611 return VM_FAULT_SIGBUS; 612 613 get_page(page); 614 vmf->page = page; 615 616 return 0; 617 } 618 619 static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt) 620 { 621 struct hfi1_ctxtdata *uctxt; 622 __poll_t pollflag; 623 624 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt; 625 if (!uctxt) 626 pollflag = EPOLLERR; 627 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT) 628 pollflag = poll_urgent(fp, pt); 629 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV) 630 pollflag = poll_next(fp, pt); 631 else /* invalid */ 632 pollflag = EPOLLERR; 633 634 return pollflag; 635 } 636 637 static int hfi1_file_close(struct inode *inode, struct file *fp) 638 { 639 struct hfi1_filedata *fdata = fp->private_data; 640 struct hfi1_ctxtdata *uctxt = fdata->uctxt; 641 struct hfi1_devdata *dd = container_of(inode->i_cdev, 642 struct hfi1_devdata, 643 user_cdev); 644 unsigned long flags, *ev; 645 646 fp->private_data = NULL; 647 648 if (!uctxt) 649 goto done; 650 651 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); 652 653 flush_wc(); 654 /* drain user sdma queue */ 655 hfi1_user_sdma_free_queues(fdata, uctxt); 656 657 /* release the cpu */ 658 hfi1_put_proc_affinity(fdata->rec_cpu_num); 659 660 /* clean up rcv side */ 661 hfi1_user_exp_rcv_free(fdata); 662 663 /* 664 * fdata->uctxt is used in the above cleanup. It is not ready to be 665 * removed until here. 666 */ 667 fdata->uctxt = NULL; 668 hfi1_rcd_put(uctxt); 669 670 /* 671 * Clear any left over, unhandled events so the next process that 672 * gets this context doesn't get confused. 673 */ 674 ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt; 675 *ev = 0; 676 677 spin_lock_irqsave(&dd->uctxt_lock, flags); 678 __clear_bit(fdata->subctxt, uctxt->in_use_ctxts); 679 if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) { 680 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 681 goto done; 682 } 683 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 684 685 /* 686 * Disable receive context and interrupt available, reset all 687 * RcvCtxtCtrl bits to default values. 688 */ 689 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | 690 HFI1_RCVCTRL_TIDFLOW_DIS | 691 HFI1_RCVCTRL_INTRAVAIL_DIS | 692 HFI1_RCVCTRL_TAILUPD_DIS | 693 HFI1_RCVCTRL_ONE_PKT_EGR_DIS | 694 HFI1_RCVCTRL_NO_RHQ_DROP_DIS | 695 HFI1_RCVCTRL_NO_EGR_DROP_DIS | 696 HFI1_RCVCTRL_URGENT_DIS, uctxt); 697 /* Clear the context's J_KEY */ 698 hfi1_clear_ctxt_jkey(dd, uctxt); 699 /* 700 * If a send context is allocated, reset context integrity 701 * checks to default and disable the send context. 702 */ 703 if (uctxt->sc) { 704 sc_disable(uctxt->sc); 705 set_pio_integrity(uctxt->sc); 706 } 707 708 hfi1_free_ctxt_rcv_groups(uctxt); 709 hfi1_clear_ctxt_pkey(dd, uctxt); 710 711 uctxt->event_flags = 0; 712 713 deallocate_ctxt(uctxt); 714 done: 715 mmdrop(fdata->mm); 716 kobject_put(&dd->kobj); 717 718 if (atomic_dec_and_test(&dd->user_refcount)) 719 complete(&dd->user_comp); 720 721 cleanup_srcu_struct(&fdata->pq_srcu); 722 kfree(fdata); 723 return 0; 724 } 725 726 /* 727 * Convert kernel *virtual* addresses to physical addresses. 728 * This is used to vmalloc'ed addresses. 729 */ 730 static u64 kvirt_to_phys(void *addr) 731 { 732 struct page *page; 733 u64 paddr = 0; 734 735 page = vmalloc_to_page(addr); 736 if (page) 737 paddr = page_to_pfn(page) << PAGE_SHIFT; 738 739 return paddr; 740 } 741 742 /** 743 * complete_subctxt 744 * @fd: valid filedata pointer 745 * 746 * Sub-context info can only be set up after the base context 747 * has been completed. This is indicated by the clearing of the 748 * HFI1_CTXT_BASE_UINIT bit. 749 * 750 * Wait for the bit to be cleared, and then complete the subcontext 751 * initialization. 752 * 753 */ 754 static int complete_subctxt(struct hfi1_filedata *fd) 755 { 756 int ret; 757 unsigned long flags; 758 759 /* 760 * sub-context info can only be set up after the base context 761 * has been completed. 762 */ 763 ret = wait_event_interruptible( 764 fd->uctxt->wait, 765 !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags)); 766 767 if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags)) 768 ret = -ENOMEM; 769 770 /* Finish the sub-context init */ 771 if (!ret) { 772 fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id); 773 ret = init_user_ctxt(fd, fd->uctxt); 774 } 775 776 if (ret) { 777 spin_lock_irqsave(&fd->dd->uctxt_lock, flags); 778 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); 779 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags); 780 hfi1_rcd_put(fd->uctxt); 781 fd->uctxt = NULL; 782 } 783 784 return ret; 785 } 786 787 static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len) 788 { 789 int ret; 790 unsigned int swmajor; 791 struct hfi1_ctxtdata *uctxt = NULL; 792 struct hfi1_user_info uinfo; 793 794 if (fd->uctxt) 795 return -EINVAL; 796 797 if (sizeof(uinfo) != len) 798 return -EINVAL; 799 800 if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo))) 801 return -EFAULT; 802 803 swmajor = uinfo.userversion >> 16; 804 if (swmajor != HFI1_USER_SWMAJOR) 805 return -ENODEV; 806 807 if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS) 808 return -EINVAL; 809 810 /* 811 * Acquire the mutex to protect against multiple creations of what 812 * could be a shared base context. 813 */ 814 mutex_lock(&hfi1_mutex); 815 /* 816 * Get a sub context if available (fd->uctxt will be set). 817 * ret < 0 error, 0 no context, 1 sub-context found 818 */ 819 ret = find_sub_ctxt(fd, &uinfo); 820 821 /* 822 * Allocate a base context if context sharing is not required or a 823 * sub context wasn't found. 824 */ 825 if (!ret) 826 ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt); 827 828 mutex_unlock(&hfi1_mutex); 829 830 /* Depending on the context type, finish the appropriate init */ 831 switch (ret) { 832 case 0: 833 ret = setup_base_ctxt(fd, uctxt); 834 if (ret) 835 deallocate_ctxt(uctxt); 836 break; 837 case 1: 838 ret = complete_subctxt(fd); 839 break; 840 default: 841 break; 842 } 843 844 return ret; 845 } 846 847 /** 848 * match_ctxt 849 * @fd: valid filedata pointer 850 * @uinfo: user info to compare base context with 851 * @uctxt: context to compare uinfo to. 852 * 853 * Compare the given context with the given information to see if it 854 * can be used for a sub context. 855 */ 856 static int match_ctxt(struct hfi1_filedata *fd, 857 const struct hfi1_user_info *uinfo, 858 struct hfi1_ctxtdata *uctxt) 859 { 860 struct hfi1_devdata *dd = fd->dd; 861 unsigned long flags; 862 u16 subctxt; 863 864 /* Skip dynamically allocated kernel contexts */ 865 if (uctxt->sc && (uctxt->sc->type == SC_KERNEL)) 866 return 0; 867 868 /* Skip ctxt if it doesn't match the requested one */ 869 if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) || 870 uctxt->jkey != generate_jkey(current_uid()) || 871 uctxt->subctxt_id != uinfo->subctxt_id || 872 uctxt->subctxt_cnt != uinfo->subctxt_cnt) 873 return 0; 874 875 /* Verify the sharing process matches the base */ 876 if (uctxt->userversion != uinfo->userversion) 877 return -EINVAL; 878 879 /* Find an unused sub context */ 880 spin_lock_irqsave(&dd->uctxt_lock, flags); 881 if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) { 882 /* context is being closed, do not use */ 883 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 884 return 0; 885 } 886 887 subctxt = find_first_zero_bit(uctxt->in_use_ctxts, 888 HFI1_MAX_SHARED_CTXTS); 889 if (subctxt >= uctxt->subctxt_cnt) { 890 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 891 return -EBUSY; 892 } 893 894 fd->subctxt = subctxt; 895 __set_bit(fd->subctxt, uctxt->in_use_ctxts); 896 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 897 898 fd->uctxt = uctxt; 899 hfi1_rcd_get(uctxt); 900 901 return 1; 902 } 903 904 /** 905 * find_sub_ctxt 906 * @fd: valid filedata pointer 907 * @uinfo: matching info to use to find a possible context to share. 908 * 909 * The hfi1_mutex must be held when this function is called. It is 910 * necessary to ensure serialized creation of shared contexts. 911 * 912 * Return: 913 * 0 No sub-context found 914 * 1 Subcontext found and allocated 915 * errno EINVAL (incorrect parameters) 916 * EBUSY (all sub contexts in use) 917 */ 918 static int find_sub_ctxt(struct hfi1_filedata *fd, 919 const struct hfi1_user_info *uinfo) 920 { 921 struct hfi1_ctxtdata *uctxt; 922 struct hfi1_devdata *dd = fd->dd; 923 u16 i; 924 int ret; 925 926 if (!uinfo->subctxt_cnt) 927 return 0; 928 929 for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) { 930 uctxt = hfi1_rcd_get_by_index(dd, i); 931 if (uctxt) { 932 ret = match_ctxt(fd, uinfo, uctxt); 933 hfi1_rcd_put(uctxt); 934 /* value of != 0 will return */ 935 if (ret) 936 return ret; 937 } 938 } 939 940 return 0; 941 } 942 943 static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd, 944 struct hfi1_user_info *uinfo, 945 struct hfi1_ctxtdata **rcd) 946 { 947 struct hfi1_ctxtdata *uctxt; 948 int ret, numa; 949 950 if (dd->flags & HFI1_FROZEN) { 951 /* 952 * Pick an error that is unique from all other errors 953 * that are returned so the user process knows that 954 * it tried to allocate while the SPC was frozen. It 955 * it should be able to retry with success in a short 956 * while. 957 */ 958 return -EIO; 959 } 960 961 if (!dd->freectxts) 962 return -EBUSY; 963 964 /* 965 * If we don't have a NUMA node requested, preference is towards 966 * device NUMA node. 967 */ 968 fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node); 969 if (fd->rec_cpu_num != -1) 970 numa = cpu_to_node(fd->rec_cpu_num); 971 else 972 numa = numa_node_id(); 973 ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt); 974 if (ret < 0) { 975 dd_dev_err(dd, "user ctxtdata allocation failed\n"); 976 return ret; 977 } 978 hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)", 979 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num, 980 uctxt->numa_id); 981 982 /* 983 * Allocate and enable a PIO send context. 984 */ 985 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node); 986 if (!uctxt->sc) { 987 ret = -ENOMEM; 988 goto ctxdata_free; 989 } 990 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index, 991 uctxt->sc->hw_context); 992 ret = sc_enable(uctxt->sc); 993 if (ret) 994 goto ctxdata_free; 995 996 /* 997 * Setup sub context information if the user-level has requested 998 * sub contexts. 999 * This has to be done here so the rest of the sub-contexts find the 1000 * proper base context. 1001 * NOTE: _set_bit() can be used here because the context creation is 1002 * protected by the mutex (rather than the spin_lock), and will be the 1003 * very first instance of this context. 1004 */ 1005 __set_bit(0, uctxt->in_use_ctxts); 1006 if (uinfo->subctxt_cnt) 1007 init_subctxts(uctxt, uinfo); 1008 uctxt->userversion = uinfo->userversion; 1009 uctxt->flags = hfi1_cap_mask; /* save current flag state */ 1010 init_waitqueue_head(&uctxt->wait); 1011 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm)); 1012 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)); 1013 uctxt->jkey = generate_jkey(current_uid()); 1014 hfi1_stats.sps_ctxts++; 1015 /* 1016 * Disable ASPM when there are open user/PSM contexts to avoid 1017 * issues with ASPM L1 exit latency 1018 */ 1019 if (dd->freectxts-- == dd->num_user_contexts) 1020 aspm_disable_all(dd); 1021 1022 *rcd = uctxt; 1023 1024 return 0; 1025 1026 ctxdata_free: 1027 hfi1_free_ctxt(uctxt); 1028 return ret; 1029 } 1030 1031 static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt) 1032 { 1033 mutex_lock(&hfi1_mutex); 1034 hfi1_stats.sps_ctxts--; 1035 if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts) 1036 aspm_enable_all(uctxt->dd); 1037 mutex_unlock(&hfi1_mutex); 1038 1039 hfi1_free_ctxt(uctxt); 1040 } 1041 1042 static void init_subctxts(struct hfi1_ctxtdata *uctxt, 1043 const struct hfi1_user_info *uinfo) 1044 { 1045 uctxt->subctxt_cnt = uinfo->subctxt_cnt; 1046 uctxt->subctxt_id = uinfo->subctxt_id; 1047 set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); 1048 } 1049 1050 static int setup_subctxt(struct hfi1_ctxtdata *uctxt) 1051 { 1052 int ret = 0; 1053 u16 num_subctxts = uctxt->subctxt_cnt; 1054 1055 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE); 1056 if (!uctxt->subctxt_uregbase) 1057 return -ENOMEM; 1058 1059 /* We can take the size of the RcvHdr Queue from the master */ 1060 uctxt->subctxt_rcvhdr_base = vmalloc_user(rcvhdrq_size(uctxt) * 1061 num_subctxts); 1062 if (!uctxt->subctxt_rcvhdr_base) { 1063 ret = -ENOMEM; 1064 goto bail_ureg; 1065 } 1066 1067 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size * 1068 num_subctxts); 1069 if (!uctxt->subctxt_rcvegrbuf) { 1070 ret = -ENOMEM; 1071 goto bail_rhdr; 1072 } 1073 1074 return 0; 1075 1076 bail_rhdr: 1077 vfree(uctxt->subctxt_rcvhdr_base); 1078 uctxt->subctxt_rcvhdr_base = NULL; 1079 bail_ureg: 1080 vfree(uctxt->subctxt_uregbase); 1081 uctxt->subctxt_uregbase = NULL; 1082 1083 return ret; 1084 } 1085 1086 static void user_init(struct hfi1_ctxtdata *uctxt) 1087 { 1088 unsigned int rcvctrl_ops = 0; 1089 1090 /* initialize poll variables... */ 1091 uctxt->urgent = 0; 1092 uctxt->urgent_poll = 0; 1093 1094 /* 1095 * Now enable the ctxt for receive. 1096 * For chips that are set to DMA the tail register to memory 1097 * when they change (and when the update bit transitions from 1098 * 0 to 1. So for those chips, we turn it off and then back on. 1099 * This will (very briefly) affect any other open ctxts, but the 1100 * duration is very short, and therefore isn't an issue. We 1101 * explicitly set the in-memory tail copy to 0 beforehand, so we 1102 * don't have to wait to be sure the DMA update has happened 1103 * (chip resets head/tail to 0 on transition to enable). 1104 */ 1105 if (hfi1_rcvhdrtail_kvaddr(uctxt)) 1106 clear_rcvhdrtail(uctxt); 1107 1108 /* Setup J_KEY before enabling the context */ 1109 hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey); 1110 1111 rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB; 1112 rcvctrl_ops |= HFI1_RCVCTRL_URGENT_ENB; 1113 if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP)) 1114 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB; 1115 /* 1116 * Ignore the bit in the flags for now until proper 1117 * support for multiple packet per rcv array entry is 1118 * added. 1119 */ 1120 if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR)) 1121 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; 1122 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL)) 1123 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; 1124 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL)) 1125 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; 1126 /* 1127 * The RcvCtxtCtrl.TailUpd bit has to be explicitly written. 1128 * We can't rely on the correct value to be set from prior 1129 * uses of the chip or ctxt. Therefore, add the rcvctrl op 1130 * for both cases. 1131 */ 1132 if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL)) 1133 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB; 1134 else 1135 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS; 1136 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt); 1137 } 1138 1139 static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len) 1140 { 1141 struct hfi1_ctxt_info cinfo; 1142 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1143 1144 if (sizeof(cinfo) != len) 1145 return -EINVAL; 1146 1147 memset(&cinfo, 0, sizeof(cinfo)); 1148 cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) & 1149 HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) | 1150 HFI1_CAP_UGET_MASK(uctxt->flags, MASK) | 1151 HFI1_CAP_KGET_MASK(uctxt->flags, K2U); 1152 /* adjust flag if this fd is not able to cache */ 1153 if (!fd->use_mn) 1154 cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */ 1155 1156 cinfo.num_active = hfi1_count_active_units(); 1157 cinfo.unit = uctxt->dd->unit; 1158 cinfo.ctxt = uctxt->ctxt; 1159 cinfo.subctxt = fd->subctxt; 1160 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced, 1161 uctxt->dd->rcv_entries.group_size) + 1162 uctxt->expected_count; 1163 cinfo.credits = uctxt->sc->credits; 1164 cinfo.numa_node = uctxt->numa_id; 1165 cinfo.rec_cpu = fd->rec_cpu_num; 1166 cinfo.send_ctxt = uctxt->sc->hw_context; 1167 1168 cinfo.egrtids = uctxt->egrbufs.alloced; 1169 cinfo.rcvhdrq_cnt = get_hdrq_cnt(uctxt); 1170 cinfo.rcvhdrq_entsize = get_hdrqentsize(uctxt) << 2; 1171 cinfo.sdma_ring_size = fd->cq->nentries; 1172 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size; 1173 1174 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, &cinfo); 1175 if (copy_to_user((void __user *)arg, &cinfo, len)) 1176 return -EFAULT; 1177 1178 return 0; 1179 } 1180 1181 static int init_user_ctxt(struct hfi1_filedata *fd, 1182 struct hfi1_ctxtdata *uctxt) 1183 { 1184 int ret; 1185 1186 ret = hfi1_user_sdma_alloc_queues(uctxt, fd); 1187 if (ret) 1188 return ret; 1189 1190 ret = hfi1_user_exp_rcv_init(fd, uctxt); 1191 if (ret) 1192 hfi1_user_sdma_free_queues(fd, uctxt); 1193 1194 return ret; 1195 } 1196 1197 static int setup_base_ctxt(struct hfi1_filedata *fd, 1198 struct hfi1_ctxtdata *uctxt) 1199 { 1200 struct hfi1_devdata *dd = uctxt->dd; 1201 int ret = 0; 1202 1203 hfi1_init_ctxt(uctxt->sc); 1204 1205 /* Now allocate the RcvHdr queue and eager buffers. */ 1206 ret = hfi1_create_rcvhdrq(dd, uctxt); 1207 if (ret) 1208 goto done; 1209 1210 ret = hfi1_setup_eagerbufs(uctxt); 1211 if (ret) 1212 goto done; 1213 1214 /* If sub-contexts are enabled, do the appropriate setup */ 1215 if (uctxt->subctxt_cnt) 1216 ret = setup_subctxt(uctxt); 1217 if (ret) 1218 goto done; 1219 1220 ret = hfi1_alloc_ctxt_rcv_groups(uctxt); 1221 if (ret) 1222 goto done; 1223 1224 ret = init_user_ctxt(fd, uctxt); 1225 if (ret) 1226 goto done; 1227 1228 user_init(uctxt); 1229 1230 /* Now that the context is set up, the fd can get a reference. */ 1231 fd->uctxt = uctxt; 1232 hfi1_rcd_get(uctxt); 1233 1234 done: 1235 if (uctxt->subctxt_cnt) { 1236 /* 1237 * On error, set the failed bit so sub-contexts will clean up 1238 * correctly. 1239 */ 1240 if (ret) 1241 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); 1242 1243 /* 1244 * Base context is done (successfully or not), notify anybody 1245 * using a sub-context that is waiting for this completion. 1246 */ 1247 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); 1248 wake_up(&uctxt->wait); 1249 } 1250 1251 return ret; 1252 } 1253 1254 static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len) 1255 { 1256 struct hfi1_base_info binfo; 1257 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1258 struct hfi1_devdata *dd = uctxt->dd; 1259 unsigned offset; 1260 1261 trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt); 1262 1263 if (sizeof(binfo) != len) 1264 return -EINVAL; 1265 1266 memset(&binfo, 0, sizeof(binfo)); 1267 binfo.hw_version = dd->revision; 1268 binfo.sw_version = HFI1_KERN_SWVERSION; 1269 binfo.bthqp = kdeth_qp; 1270 binfo.jkey = uctxt->jkey; 1271 /* 1272 * If more than 64 contexts are enabled the allocated credit 1273 * return will span two or three contiguous pages. Since we only 1274 * map the page containing the context's credit return address, 1275 * we need to calculate the offset in the proper page. 1276 */ 1277 offset = ((u64)uctxt->sc->hw_free - 1278 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE; 1279 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt, 1280 fd->subctxt, offset); 1281 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt, 1282 fd->subctxt, 1283 uctxt->sc->base_addr); 1284 binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP, 1285 uctxt->ctxt, 1286 fd->subctxt, 1287 uctxt->sc->base_addr); 1288 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt, 1289 fd->subctxt, 1290 uctxt->rcvhdrq); 1291 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt, 1292 fd->subctxt, 1293 uctxt->egrbufs.rcvtids[0].dma); 1294 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt, 1295 fd->subctxt, 0); 1296 /* 1297 * user regs are at 1298 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE)) 1299 */ 1300 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt, 1301 fd->subctxt, 0); 1302 offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) * 1303 sizeof(*dd->events)); 1304 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt, 1305 fd->subctxt, 1306 offset); 1307 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt, 1308 fd->subctxt, 1309 dd->status); 1310 if (HFI1_CAP_IS_USET(DMA_RTAIL)) 1311 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt, 1312 fd->subctxt, 0); 1313 if (uctxt->subctxt_cnt) { 1314 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS, 1315 uctxt->ctxt, 1316 fd->subctxt, 0); 1317 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ, 1318 uctxt->ctxt, 1319 fd->subctxt, 0); 1320 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF, 1321 uctxt->ctxt, 1322 fd->subctxt, 0); 1323 } 1324 1325 if (copy_to_user((void __user *)arg, &binfo, len)) 1326 return -EFAULT; 1327 1328 return 0; 1329 } 1330 1331 /** 1332 * user_exp_rcv_setup - Set up the given tid rcv list 1333 * @fd: file data of the current driver instance 1334 * @arg: ioctl argumnent for user space information 1335 * @len: length of data structure associated with ioctl command 1336 * 1337 * Wrapper to validate ioctl information before doing _rcv_setup. 1338 * 1339 */ 1340 static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg, 1341 u32 len) 1342 { 1343 int ret; 1344 unsigned long addr; 1345 struct hfi1_tid_info tinfo; 1346 1347 if (sizeof(tinfo) != len) 1348 return -EINVAL; 1349 1350 if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo)))) 1351 return -EFAULT; 1352 1353 ret = hfi1_user_exp_rcv_setup(fd, &tinfo); 1354 if (!ret) { 1355 /* 1356 * Copy the number of tidlist entries we used 1357 * and the length of the buffer we registered. 1358 */ 1359 addr = arg + offsetof(struct hfi1_tid_info, tidcnt); 1360 if (copy_to_user((void __user *)addr, &tinfo.tidcnt, 1361 sizeof(tinfo.tidcnt))) 1362 return -EFAULT; 1363 1364 addr = arg + offsetof(struct hfi1_tid_info, length); 1365 if (copy_to_user((void __user *)addr, &tinfo.length, 1366 sizeof(tinfo.length))) 1367 ret = -EFAULT; 1368 } 1369 1370 return ret; 1371 } 1372 1373 /** 1374 * user_exp_rcv_clear - Clear the given tid rcv list 1375 * @fd: file data of the current driver instance 1376 * @arg: ioctl argumnent for user space information 1377 * @len: length of data structure associated with ioctl command 1378 * 1379 * The hfi1_user_exp_rcv_clear() can be called from the error path. Because 1380 * of this, we need to use this wrapper to copy the user space information 1381 * before doing the clear. 1382 */ 1383 static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg, 1384 u32 len) 1385 { 1386 int ret; 1387 unsigned long addr; 1388 struct hfi1_tid_info tinfo; 1389 1390 if (sizeof(tinfo) != len) 1391 return -EINVAL; 1392 1393 if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo)))) 1394 return -EFAULT; 1395 1396 ret = hfi1_user_exp_rcv_clear(fd, &tinfo); 1397 if (!ret) { 1398 addr = arg + offsetof(struct hfi1_tid_info, tidcnt); 1399 if (copy_to_user((void __user *)addr, &tinfo.tidcnt, 1400 sizeof(tinfo.tidcnt))) 1401 return -EFAULT; 1402 } 1403 1404 return ret; 1405 } 1406 1407 /** 1408 * user_exp_rcv_invalid - Invalidate the given tid rcv list 1409 * @fd: file data of the current driver instance 1410 * @arg: ioctl argumnent for user space information 1411 * @len: length of data structure associated with ioctl command 1412 * 1413 * Wrapper to validate ioctl information before doing _rcv_invalid. 1414 * 1415 */ 1416 static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg, 1417 u32 len) 1418 { 1419 int ret; 1420 unsigned long addr; 1421 struct hfi1_tid_info tinfo; 1422 1423 if (sizeof(tinfo) != len) 1424 return -EINVAL; 1425 1426 if (!fd->invalid_tids) 1427 return -EINVAL; 1428 1429 if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo)))) 1430 return -EFAULT; 1431 1432 ret = hfi1_user_exp_rcv_invalid(fd, &tinfo); 1433 if (ret) 1434 return ret; 1435 1436 addr = arg + offsetof(struct hfi1_tid_info, tidcnt); 1437 if (copy_to_user((void __user *)addr, &tinfo.tidcnt, 1438 sizeof(tinfo.tidcnt))) 1439 ret = -EFAULT; 1440 1441 return ret; 1442 } 1443 1444 static __poll_t poll_urgent(struct file *fp, 1445 struct poll_table_struct *pt) 1446 { 1447 struct hfi1_filedata *fd = fp->private_data; 1448 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1449 struct hfi1_devdata *dd = uctxt->dd; 1450 __poll_t pollflag; 1451 1452 poll_wait(fp, &uctxt->wait, pt); 1453 1454 spin_lock_irq(&dd->uctxt_lock); 1455 if (uctxt->urgent != uctxt->urgent_poll) { 1456 pollflag = EPOLLIN | EPOLLRDNORM; 1457 uctxt->urgent_poll = uctxt->urgent; 1458 } else { 1459 pollflag = 0; 1460 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags); 1461 } 1462 spin_unlock_irq(&dd->uctxt_lock); 1463 1464 return pollflag; 1465 } 1466 1467 static __poll_t poll_next(struct file *fp, 1468 struct poll_table_struct *pt) 1469 { 1470 struct hfi1_filedata *fd = fp->private_data; 1471 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1472 struct hfi1_devdata *dd = uctxt->dd; 1473 __poll_t pollflag; 1474 1475 poll_wait(fp, &uctxt->wait, pt); 1476 1477 spin_lock_irq(&dd->uctxt_lock); 1478 if (hdrqempty(uctxt)) { 1479 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags); 1480 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt); 1481 pollflag = 0; 1482 } else { 1483 pollflag = EPOLLIN | EPOLLRDNORM; 1484 } 1485 spin_unlock_irq(&dd->uctxt_lock); 1486 1487 return pollflag; 1488 } 1489 1490 /* 1491 * Find all user contexts in use, and set the specified bit in their 1492 * event mask. 1493 * See also find_ctxt() for a similar use, that is specific to send buffers. 1494 */ 1495 int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit) 1496 { 1497 struct hfi1_ctxtdata *uctxt; 1498 struct hfi1_devdata *dd = ppd->dd; 1499 u16 ctxt; 1500 1501 if (!dd->events) 1502 return -EINVAL; 1503 1504 for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts; 1505 ctxt++) { 1506 uctxt = hfi1_rcd_get_by_index(dd, ctxt); 1507 if (uctxt) { 1508 unsigned long *evs; 1509 int i; 1510 /* 1511 * subctxt_cnt is 0 if not shared, so do base 1512 * separately, first, then remaining subctxt, if any 1513 */ 1514 evs = dd->events + uctxt_offset(uctxt); 1515 set_bit(evtbit, evs); 1516 for (i = 1; i < uctxt->subctxt_cnt; i++) 1517 set_bit(evtbit, evs + i); 1518 hfi1_rcd_put(uctxt); 1519 } 1520 } 1521 1522 return 0; 1523 } 1524 1525 /** 1526 * manage_rcvq - manage a context's receive queue 1527 * @uctxt: the context 1528 * @subctxt: the sub-context 1529 * @start_stop: action to carry out 1530 * 1531 * start_stop == 0 disables receive on the context, for use in queue 1532 * overflow conditions. start_stop==1 re-enables, to be used to 1533 * re-init the software copy of the head register 1534 */ 1535 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt, 1536 unsigned long arg) 1537 { 1538 struct hfi1_devdata *dd = uctxt->dd; 1539 unsigned int rcvctrl_op; 1540 int start_stop; 1541 1542 if (subctxt) 1543 return 0; 1544 1545 if (get_user(start_stop, (int __user *)arg)) 1546 return -EFAULT; 1547 1548 /* atomically clear receive enable ctxt. */ 1549 if (start_stop) { 1550 /* 1551 * On enable, force in-memory copy of the tail register to 1552 * 0, so that protocol code doesn't have to worry about 1553 * whether or not the chip has yet updated the in-memory 1554 * copy or not on return from the system call. The chip 1555 * always resets it's tail register back to 0 on a 1556 * transition from disabled to enabled. 1557 */ 1558 if (hfi1_rcvhdrtail_kvaddr(uctxt)) 1559 clear_rcvhdrtail(uctxt); 1560 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB; 1561 } else { 1562 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS; 1563 } 1564 hfi1_rcvctrl(dd, rcvctrl_op, uctxt); 1565 /* always; new head should be equal to new tail; see above */ 1566 1567 return 0; 1568 } 1569 1570 /* 1571 * clear the event notifier events for this context. 1572 * User process then performs actions appropriate to bit having been 1573 * set, if desired, and checks again in future. 1574 */ 1575 static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt, 1576 unsigned long arg) 1577 { 1578 int i; 1579 struct hfi1_devdata *dd = uctxt->dd; 1580 unsigned long *evs; 1581 unsigned long events; 1582 1583 if (!dd->events) 1584 return 0; 1585 1586 if (get_user(events, (unsigned long __user *)arg)) 1587 return -EFAULT; 1588 1589 evs = dd->events + uctxt_offset(uctxt) + subctxt; 1590 1591 for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) { 1592 if (!test_bit(i, &events)) 1593 continue; 1594 clear_bit(i, evs); 1595 } 1596 return 0; 1597 } 1598 1599 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg) 1600 { 1601 int i; 1602 struct hfi1_pportdata *ppd = uctxt->ppd; 1603 struct hfi1_devdata *dd = uctxt->dd; 1604 u16 pkey; 1605 1606 if (!HFI1_CAP_IS_USET(PKEY_CHECK)) 1607 return -EPERM; 1608 1609 if (get_user(pkey, (u16 __user *)arg)) 1610 return -EFAULT; 1611 1612 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) 1613 return -EINVAL; 1614 1615 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) 1616 if (pkey == ppd->pkeys[i]) 1617 return hfi1_set_ctxt_pkey(dd, uctxt, pkey); 1618 1619 return -ENOENT; 1620 } 1621 1622 /** 1623 * ctxt_reset - Reset the user context 1624 * @uctxt: valid user context 1625 */ 1626 static int ctxt_reset(struct hfi1_ctxtdata *uctxt) 1627 { 1628 struct send_context *sc; 1629 struct hfi1_devdata *dd; 1630 int ret = 0; 1631 1632 if (!uctxt || !uctxt->dd || !uctxt->sc) 1633 return -EINVAL; 1634 1635 /* 1636 * There is no protection here. User level has to guarantee that 1637 * no one will be writing to the send context while it is being 1638 * re-initialized. If user level breaks that guarantee, it will 1639 * break it's own context and no one else's. 1640 */ 1641 dd = uctxt->dd; 1642 sc = uctxt->sc; 1643 1644 /* 1645 * Wait until the interrupt handler has marked the context as 1646 * halted or frozen. Report error if we time out. 1647 */ 1648 wait_event_interruptible_timeout( 1649 sc->halt_wait, (sc->flags & SCF_HALTED), 1650 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); 1651 if (!(sc->flags & SCF_HALTED)) 1652 return -ENOLCK; 1653 1654 /* 1655 * If the send context was halted due to a Freeze, wait until the 1656 * device has been "unfrozen" before resetting the context. 1657 */ 1658 if (sc->flags & SCF_FROZEN) { 1659 wait_event_interruptible_timeout( 1660 dd->event_queue, 1661 !(READ_ONCE(dd->flags) & HFI1_FROZEN), 1662 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); 1663 if (dd->flags & HFI1_FROZEN) 1664 return -ENOLCK; 1665 1666 if (dd->flags & HFI1_FORCED_FREEZE) 1667 /* 1668 * Don't allow context reset if we are into 1669 * forced freeze 1670 */ 1671 return -ENODEV; 1672 1673 sc_disable(sc); 1674 ret = sc_enable(sc); 1675 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt); 1676 } else { 1677 ret = sc_restart(sc); 1678 } 1679 if (!ret) 1680 sc_return_credits(sc); 1681 1682 return ret; 1683 } 1684 1685 static void user_remove(struct hfi1_devdata *dd) 1686 { 1687 1688 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device); 1689 } 1690 1691 static int user_add(struct hfi1_devdata *dd) 1692 { 1693 char name[10]; 1694 int ret; 1695 1696 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); 1697 ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops, 1698 &dd->user_cdev, &dd->user_device, 1699 true, &dd->kobj); 1700 if (ret) 1701 user_remove(dd); 1702 1703 return ret; 1704 } 1705 1706 /* 1707 * Create per-unit files in /dev 1708 */ 1709 int hfi1_device_create(struct hfi1_devdata *dd) 1710 { 1711 return user_add(dd); 1712 } 1713 1714 /* 1715 * Remove per-unit files in /dev 1716 * void, core kernel returns no errors for this stuff 1717 */ 1718 void hfi1_device_remove(struct hfi1_devdata *dd) 1719 { 1720 user_remove(dd); 1721 } 1722