1 /* 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/pci.h> 36 #include <linux/poll.h> 37 #include <linux/cdev.h> 38 #include <linux/swap.h> 39 #include <linux/vmalloc.h> 40 #include <linux/highmem.h> 41 #include <linux/io.h> 42 #include <linux/jiffies.h> 43 #include <linux/delay.h> 44 #include <linux/export.h> 45 #include <linux/uio.h> 46 #include <linux/pgtable.h> 47 48 #include <rdma/ib.h> 49 50 #include "qib.h" 51 #include "qib_common.h" 52 #include "qib_user_sdma.h" 53 54 #undef pr_fmt 55 #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt 56 57 static int qib_open(struct inode *, struct file *); 58 static int qib_close(struct inode *, struct file *); 59 static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *); 60 static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *); 61 static __poll_t qib_poll(struct file *, struct poll_table_struct *); 62 static int qib_mmapf(struct file *, struct vm_area_struct *); 63 64 /* 65 * This is really, really weird shit - write() and writev() here 66 * have completely unrelated semantics. Sucky userland ABI, 67 * film at 11. 68 */ 69 static const struct file_operations qib_file_ops = { 70 .owner = THIS_MODULE, 71 .write = qib_write, 72 .write_iter = qib_write_iter, 73 .open = qib_open, 74 .release = qib_close, 75 .poll = qib_poll, 76 .mmap = qib_mmapf, 77 .llseek = noop_llseek, 78 }; 79 80 /* 81 * Convert kernel virtual addresses to physical addresses so they don't 82 * potentially conflict with the chip addresses used as mmap offsets. 83 * It doesn't really matter what mmap offset we use as long as we can 84 * interpret it correctly. 85 */ 86 static u64 cvt_kvaddr(void *p) 87 { 88 struct page *page; 89 u64 paddr = 0; 90 91 page = vmalloc_to_page(p); 92 if (page) 93 paddr = page_to_pfn(page) << PAGE_SHIFT; 94 95 return paddr; 96 } 97 98 static int qib_get_base_info(struct file *fp, void __user *ubase, 99 size_t ubase_size) 100 { 101 struct qib_ctxtdata *rcd = ctxt_fp(fp); 102 int ret = 0; 103 struct qib_base_info *kinfo = NULL; 104 struct qib_devdata *dd = rcd->dd; 105 struct qib_pportdata *ppd = rcd->ppd; 106 unsigned subctxt_cnt; 107 int shared, master; 108 size_t sz; 109 110 subctxt_cnt = rcd->subctxt_cnt; 111 if (!subctxt_cnt) { 112 shared = 0; 113 master = 0; 114 subctxt_cnt = 1; 115 } else { 116 shared = 1; 117 master = !subctxt_fp(fp); 118 } 119 120 sz = sizeof(*kinfo); 121 /* If context sharing is not requested, allow the old size structure */ 122 if (!shared) 123 sz -= 7 * sizeof(u64); 124 if (ubase_size < sz) { 125 ret = -EINVAL; 126 goto bail; 127 } 128 129 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); 130 if (kinfo == NULL) { 131 ret = -ENOMEM; 132 goto bail; 133 } 134 135 ret = dd->f_get_base_info(rcd, kinfo); 136 if (ret < 0) 137 goto bail; 138 139 kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt; 140 kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize; 141 kinfo->spi_tidegrcnt = rcd->rcvegrcnt; 142 kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize; 143 /* 144 * have to mmap whole thing 145 */ 146 kinfo->spi_rcv_egrbuftotlen = 147 rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size; 148 kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk; 149 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / 150 rcd->rcvegrbuf_chunks; 151 kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt; 152 if (master) 153 kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt; 154 /* 155 * for this use, may be cfgctxts summed over all chips that 156 * are configured and present 157 */ 158 kinfo->spi_nctxts = dd->cfgctxts; 159 /* unit (chip/board) our context is on */ 160 kinfo->spi_unit = dd->unit; 161 kinfo->spi_port = ppd->port; 162 /* for now, only a single page */ 163 kinfo->spi_tid_maxsize = PAGE_SIZE; 164 165 /* 166 * Doing this per context, and based on the skip value, etc. This has 167 * to be the actual buffer size, since the protocol code treats it 168 * as an array. 169 * 170 * These have to be set to user addresses in the user code via mmap. 171 * These values are used on return to user code for the mmap target 172 * addresses only. For 32 bit, same 44 bit address problem, so use 173 * the physical address, not virtual. Before 2.6.11, using the 174 * page_address() macro worked, but in 2.6.11, even that returns the 175 * full 64 bit address (upper bits all 1's). So far, using the 176 * physical addresses (or chip offsets, for chip mapping) works, but 177 * no doubt some future kernel release will change that, and we'll be 178 * on to yet another method of dealing with this. 179 * Normally only one of rcvhdr_tailaddr or rhf_offset is useful 180 * since the chips with non-zero rhf_offset don't normally 181 * enable tail register updates to host memory, but for testing, 182 * both can be enabled and used. 183 */ 184 kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys; 185 kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys; 186 kinfo->spi_rhf_offset = dd->rhf_offset; 187 kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys; 188 kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys; 189 /* setup per-unit (not port) status area for user programs */ 190 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + 191 (char *) ppd->statusp - 192 (char *) dd->pioavailregs_dma; 193 kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt; 194 if (!shared) { 195 kinfo->spi_piocnt = rcd->piocnt; 196 kinfo->spi_piobufbase = (u64) rcd->piobufs; 197 kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask); 198 } else if (master) { 199 kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) + 200 (rcd->piocnt % subctxt_cnt); 201 /* Master's PIO buffers are after all the slave's */ 202 kinfo->spi_piobufbase = (u64) rcd->piobufs + 203 dd->palign * 204 (rcd->piocnt - kinfo->spi_piocnt); 205 } else { 206 unsigned slave = subctxt_fp(fp) - 1; 207 208 kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt; 209 kinfo->spi_piobufbase = (u64) rcd->piobufs + 210 dd->palign * kinfo->spi_piocnt * slave; 211 } 212 213 if (shared) { 214 kinfo->spi_sendbuf_status = 215 cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]); 216 /* only spi_subctxt_* fields should be set in this block! */ 217 kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase); 218 219 kinfo->spi_subctxt_rcvegrbuf = 220 cvt_kvaddr(rcd->subctxt_rcvegrbuf); 221 kinfo->spi_subctxt_rcvhdr_base = 222 cvt_kvaddr(rcd->subctxt_rcvhdr_base); 223 } 224 225 /* 226 * All user buffers are 2KB buffers. If we ever support 227 * giving 4KB buffers to user processes, this will need some 228 * work. Can't use piobufbase directly, because it has 229 * both 2K and 4K buffer base values. 230 */ 231 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) / 232 dd->palign; 233 kinfo->spi_pioalign = dd->palign; 234 kinfo->spi_qpair = QIB_KD_QP; 235 /* 236 * user mode PIO buffers are always 2KB, even when 4KB can 237 * be received, and sent via the kernel; this is ibmaxlen 238 * for 2K MTU. 239 */ 240 kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32); 241 kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */ 242 kinfo->spi_ctxt = rcd->ctxt; 243 kinfo->spi_subctxt = subctxt_fp(fp); 244 kinfo->spi_sw_version = QIB_KERN_SWVERSION; 245 kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */ 246 kinfo->spi_hw_version = dd->revision; 247 248 if (master) 249 kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER; 250 251 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo); 252 if (copy_to_user(ubase, kinfo, sz)) 253 ret = -EFAULT; 254 bail: 255 kfree(kinfo); 256 return ret; 257 } 258 259 /** 260 * qib_tid_update - update a context TID 261 * @rcd: the context 262 * @fp: the qib device file 263 * @ti: the TID information 264 * 265 * The new implementation as of Oct 2004 is that the driver assigns 266 * the tid and returns it to the caller. To reduce search time, we 267 * keep a cursor for each context, walking the shadow tid array to find 268 * one that's not in use. 269 * 270 * For now, if we can't allocate the full list, we fail, although 271 * in the long run, we'll allocate as many as we can, and the 272 * caller will deal with that by trying the remaining pages later. 273 * That means that when we fail, we have to mark the tids as not in 274 * use again, in our shadow copy. 275 * 276 * It's up to the caller to free the tids when they are done. 277 * We'll unlock the pages as they free them. 278 * 279 * Also, right now we are locking one page at a time, but since 280 * the intended use of this routine is for a single group of 281 * virtually contiguous pages, that should change to improve 282 * performance. 283 */ 284 static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, 285 const struct qib_tid_info *ti) 286 { 287 int ret = 0, ntids; 288 u32 tid, ctxttid, cnt, i, tidcnt, tidoff; 289 u16 *tidlist; 290 struct qib_devdata *dd = rcd->dd; 291 u64 physaddr; 292 unsigned long vaddr; 293 u64 __iomem *tidbase; 294 unsigned long tidmap[8]; 295 struct page **pagep = NULL; 296 unsigned subctxt = subctxt_fp(fp); 297 298 if (!dd->pageshadow) { 299 ret = -ENOMEM; 300 goto done; 301 } 302 303 cnt = ti->tidcnt; 304 if (!cnt) { 305 ret = -EFAULT; 306 goto done; 307 } 308 ctxttid = rcd->ctxt * dd->rcvtidcnt; 309 if (!rcd->subctxt_cnt) { 310 tidcnt = dd->rcvtidcnt; 311 tid = rcd->tidcursor; 312 tidoff = 0; 313 } else if (!subctxt) { 314 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) + 315 (dd->rcvtidcnt % rcd->subctxt_cnt); 316 tidoff = dd->rcvtidcnt - tidcnt; 317 ctxttid += tidoff; 318 tid = tidcursor_fp(fp); 319 } else { 320 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt; 321 tidoff = tidcnt * (subctxt - 1); 322 ctxttid += tidoff; 323 tid = tidcursor_fp(fp); 324 } 325 if (cnt > tidcnt) { 326 /* make sure it all fits in tid_pg_list */ 327 qib_devinfo(dd->pcidev, 328 "Process tried to allocate %u TIDs, only trying max (%u)\n", 329 cnt, tidcnt); 330 cnt = tidcnt; 331 } 332 pagep = (struct page **) rcd->tid_pg_list; 333 tidlist = (u16 *) &pagep[dd->rcvtidcnt]; 334 pagep += tidoff; 335 tidlist += tidoff; 336 337 memset(tidmap, 0, sizeof(tidmap)); 338 /* before decrement; chip actual # */ 339 ntids = tidcnt; 340 tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) + 341 dd->rcvtidbase + 342 ctxttid * sizeof(*tidbase)); 343 344 /* virtual address of first page in transfer */ 345 vaddr = ti->tidvaddr; 346 if (!access_ok((void __user *) vaddr, 347 cnt * PAGE_SIZE)) { 348 ret = -EFAULT; 349 goto done; 350 } 351 ret = qib_get_user_pages(vaddr, cnt, pagep); 352 if (ret) { 353 /* 354 * if (ret == -EBUSY) 355 * We can't continue because the pagep array won't be 356 * initialized. This should never happen, 357 * unless perhaps the user has mpin'ed the pages 358 * themselves. 359 */ 360 qib_devinfo( 361 dd->pcidev, 362 "Failed to lock addr %p, %u pages: errno %d\n", 363 (void *) vaddr, cnt, -ret); 364 goto done; 365 } 366 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { 367 dma_addr_t daddr; 368 369 for (; ntids--; tid++) { 370 if (tid == tidcnt) 371 tid = 0; 372 if (!dd->pageshadow[ctxttid + tid]) 373 break; 374 } 375 if (ntids < 0) { 376 /* 377 * Oops, wrapped all the way through their TIDs, 378 * and didn't have enough free; see comments at 379 * start of routine 380 */ 381 i--; /* last tidlist[i] not filled in */ 382 ret = -ENOMEM; 383 break; 384 } 385 ret = qib_map_page(dd->pcidev, pagep[i], &daddr); 386 if (ret) 387 break; 388 389 tidlist[i] = tid + tidoff; 390 /* we "know" system pages and TID pages are same size */ 391 dd->pageshadow[ctxttid + tid] = pagep[i]; 392 dd->physshadow[ctxttid + tid] = daddr; 393 /* 394 * don't need atomic or it's overhead 395 */ 396 __set_bit(tid, tidmap); 397 physaddr = dd->physshadow[ctxttid + tid]; 398 /* PERFORMANCE: below should almost certainly be cached */ 399 dd->f_put_tid(dd, &tidbase[tid], 400 RCVHQ_RCV_TYPE_EXPECTED, physaddr); 401 /* 402 * don't check this tid in qib_ctxtshadow, since we 403 * just filled it in; start with the next one. 404 */ 405 tid++; 406 } 407 408 if (ret) { 409 u32 limit; 410 cleanup: 411 /* jump here if copy out of updated info failed... */ 412 /* same code that's in qib_free_tid() */ 413 limit = sizeof(tidmap) * BITS_PER_BYTE; 414 if (limit > tidcnt) 415 /* just in case size changes in future */ 416 limit = tidcnt; 417 tid = find_first_bit((const unsigned long *)tidmap, limit); 418 for (; tid < limit; tid++) { 419 if (!test_bit(tid, tidmap)) 420 continue; 421 if (dd->pageshadow[ctxttid + tid]) { 422 dma_addr_t phys; 423 424 phys = dd->physshadow[ctxttid + tid]; 425 dd->physshadow[ctxttid + tid] = dd->tidinvalid; 426 /* PERFORMANCE: below should almost certainly 427 * be cached 428 */ 429 dd->f_put_tid(dd, &tidbase[tid], 430 RCVHQ_RCV_TYPE_EXPECTED, 431 dd->tidinvalid); 432 dma_unmap_page(&dd->pcidev->dev, phys, 433 PAGE_SIZE, DMA_FROM_DEVICE); 434 dd->pageshadow[ctxttid + tid] = NULL; 435 } 436 } 437 qib_release_user_pages(pagep, cnt); 438 } else { 439 /* 440 * Copy the updated array, with qib_tid's filled in, back 441 * to user. Since we did the copy in already, this "should 442 * never fail" If it does, we have to clean up... 443 */ 444 if (copy_to_user((void __user *) 445 (unsigned long) ti->tidlist, 446 tidlist, cnt * sizeof(*tidlist))) { 447 ret = -EFAULT; 448 goto cleanup; 449 } 450 if (copy_to_user(u64_to_user_ptr(ti->tidmap), 451 tidmap, sizeof(tidmap))) { 452 ret = -EFAULT; 453 goto cleanup; 454 } 455 if (tid == tidcnt) 456 tid = 0; 457 if (!rcd->subctxt_cnt) 458 rcd->tidcursor = tid; 459 else 460 tidcursor_fp(fp) = tid; 461 } 462 463 done: 464 return ret; 465 } 466 467 /** 468 * qib_tid_free - free a context TID 469 * @rcd: the context 470 * @subctxt: the subcontext 471 * @ti: the TID info 472 * 473 * right now we are unlocking one page at a time, but since 474 * the intended use of this routine is for a single group of 475 * virtually contiguous pages, that should change to improve 476 * performance. We check that the TID is in range for this context 477 * but otherwise don't check validity; if user has an error and 478 * frees the wrong tid, it's only their own data that can thereby 479 * be corrupted. We do check that the TID was in use, for sanity 480 * We always use our idea of the saved address, not the address that 481 * they pass in to us. 482 */ 483 static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt, 484 const struct qib_tid_info *ti) 485 { 486 int ret = 0; 487 u32 tid, ctxttid, cnt, limit, tidcnt; 488 struct qib_devdata *dd = rcd->dd; 489 u64 __iomem *tidbase; 490 unsigned long tidmap[8]; 491 492 if (!dd->pageshadow) { 493 ret = -ENOMEM; 494 goto done; 495 } 496 497 if (copy_from_user(tidmap, u64_to_user_ptr(ti->tidmap), 498 sizeof(tidmap))) { 499 ret = -EFAULT; 500 goto done; 501 } 502 503 ctxttid = rcd->ctxt * dd->rcvtidcnt; 504 if (!rcd->subctxt_cnt) 505 tidcnt = dd->rcvtidcnt; 506 else if (!subctxt) { 507 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) + 508 (dd->rcvtidcnt % rcd->subctxt_cnt); 509 ctxttid += dd->rcvtidcnt - tidcnt; 510 } else { 511 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt; 512 ctxttid += tidcnt * (subctxt - 1); 513 } 514 tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) + 515 dd->rcvtidbase + 516 ctxttid * sizeof(*tidbase)); 517 518 limit = sizeof(tidmap) * BITS_PER_BYTE; 519 if (limit > tidcnt) 520 /* just in case size changes in future */ 521 limit = tidcnt; 522 tid = find_first_bit(tidmap, limit); 523 for (cnt = 0; tid < limit; tid++) { 524 /* 525 * small optimization; if we detect a run of 3 or so without 526 * any set, use find_first_bit again. That's mainly to 527 * accelerate the case where we wrapped, so we have some at 528 * the beginning, and some at the end, and a big gap 529 * in the middle. 530 */ 531 if (!test_bit(tid, tidmap)) 532 continue; 533 cnt++; 534 if (dd->pageshadow[ctxttid + tid]) { 535 struct page *p; 536 dma_addr_t phys; 537 538 p = dd->pageshadow[ctxttid + tid]; 539 dd->pageshadow[ctxttid + tid] = NULL; 540 phys = dd->physshadow[ctxttid + tid]; 541 dd->physshadow[ctxttid + tid] = dd->tidinvalid; 542 /* PERFORMANCE: below should almost certainly be 543 * cached 544 */ 545 dd->f_put_tid(dd, &tidbase[tid], 546 RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid); 547 dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE, 548 DMA_FROM_DEVICE); 549 qib_release_user_pages(&p, 1); 550 } 551 } 552 done: 553 return ret; 554 } 555 556 /** 557 * qib_set_part_key - set a partition key 558 * @rcd: the context 559 * @key: the key 560 * 561 * We can have up to 4 active at a time (other than the default, which is 562 * always allowed). This is somewhat tricky, since multiple contexts may set 563 * the same key, so we reference count them, and clean up at exit. All 4 564 * partition keys are packed into a single qlogic_ib register. It's an 565 * error for a process to set the same pkey multiple times. We provide no 566 * mechanism to de-allocate a pkey at this time, we may eventually need to 567 * do that. I've used the atomic operations, and no locking, and only make 568 * a single pass through what's available. This should be more than 569 * adequate for some time. I'll think about spinlocks or the like if and as 570 * it's necessary. 571 */ 572 static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key) 573 { 574 struct qib_pportdata *ppd = rcd->ppd; 575 int i, pidx = -1; 576 bool any = false; 577 u16 lkey = key & 0x7FFF; 578 579 if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) 580 /* nothing to do; this key always valid */ 581 return 0; 582 583 if (!lkey) 584 return -EINVAL; 585 586 /* 587 * Set the full membership bit, because it has to be 588 * set in the register or the packet, and it seems 589 * cleaner to set in the register than to force all 590 * callers to set it. 591 */ 592 key |= 0x8000; 593 594 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { 595 if (!rcd->pkeys[i] && pidx == -1) 596 pidx = i; 597 if (rcd->pkeys[i] == key) 598 return -EEXIST; 599 } 600 if (pidx == -1) 601 return -EBUSY; 602 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { 603 if (!ppd->pkeys[i]) { 604 any = true; 605 continue; 606 } 607 if (ppd->pkeys[i] == key) { 608 atomic_t *pkrefs = &ppd->pkeyrefs[i]; 609 610 if (atomic_inc_return(pkrefs) > 1) { 611 rcd->pkeys[pidx] = key; 612 return 0; 613 } 614 /* 615 * lost race, decrement count, catch below 616 */ 617 atomic_dec(pkrefs); 618 any = true; 619 } 620 if ((ppd->pkeys[i] & 0x7FFF) == lkey) 621 /* 622 * It makes no sense to have both the limited and 623 * full membership PKEY set at the same time since 624 * the unlimited one will disable the limited one. 625 */ 626 return -EEXIST; 627 } 628 if (!any) 629 return -EBUSY; 630 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { 631 if (!ppd->pkeys[i] && 632 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) { 633 rcd->pkeys[pidx] = key; 634 ppd->pkeys[i] = key; 635 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); 636 return 0; 637 } 638 } 639 return -EBUSY; 640 } 641 642 /** 643 * qib_manage_rcvq - manage a context's receive queue 644 * @rcd: the context 645 * @subctxt: the subcontext 646 * @start_stop: action to carry out 647 * 648 * start_stop == 0 disables receive on the context, for use in queue 649 * overflow conditions. start_stop==1 re-enables, to be used to 650 * re-init the software copy of the head register 651 */ 652 static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt, 653 int start_stop) 654 { 655 struct qib_devdata *dd = rcd->dd; 656 unsigned int rcvctrl_op; 657 658 if (subctxt) 659 goto bail; 660 /* atomically clear receive enable ctxt. */ 661 if (start_stop) { 662 /* 663 * On enable, force in-memory copy of the tail register to 664 * 0, so that protocol code doesn't have to worry about 665 * whether or not the chip has yet updated the in-memory 666 * copy or not on return from the system call. The chip 667 * always resets it's tail register back to 0 on a 668 * transition from disabled to enabled. 669 */ 670 if (rcd->rcvhdrtail_kvaddr) 671 qib_clear_rcvhdrtail(rcd); 672 rcvctrl_op = QIB_RCVCTRL_CTXT_ENB; 673 } else 674 rcvctrl_op = QIB_RCVCTRL_CTXT_DIS; 675 dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt); 676 /* always; new head should be equal to new tail; see above */ 677 bail: 678 return 0; 679 } 680 681 static void qib_clean_part_key(struct qib_ctxtdata *rcd, 682 struct qib_devdata *dd) 683 { 684 int i, j, pchanged = 0; 685 struct qib_pportdata *ppd = rcd->ppd; 686 687 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { 688 if (!rcd->pkeys[i]) 689 continue; 690 for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) { 691 /* check for match independent of the global bit */ 692 if ((ppd->pkeys[j] & 0x7fff) != 693 (rcd->pkeys[i] & 0x7fff)) 694 continue; 695 if (atomic_dec_and_test(&ppd->pkeyrefs[j])) { 696 ppd->pkeys[j] = 0; 697 pchanged++; 698 } 699 break; 700 } 701 rcd->pkeys[i] = 0; 702 } 703 if (pchanged) 704 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); 705 } 706 707 /* common code for the mappings on dma_alloc_coherent mem */ 708 static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd, 709 unsigned len, void *kvaddr, u32 write_ok, char *what) 710 { 711 struct qib_devdata *dd = rcd->dd; 712 unsigned long pfn; 713 int ret; 714 715 if ((vma->vm_end - vma->vm_start) > len) { 716 qib_devinfo(dd->pcidev, 717 "FAIL on %s: len %lx > %x\n", what, 718 vma->vm_end - vma->vm_start, len); 719 ret = -EFAULT; 720 goto bail; 721 } 722 723 /* 724 * shared context user code requires rcvhdrq mapped r/w, others 725 * only allowed readonly mapping. 726 */ 727 if (!write_ok) { 728 if (vma->vm_flags & VM_WRITE) { 729 qib_devinfo(dd->pcidev, 730 "%s must be mapped readonly\n", what); 731 ret = -EPERM; 732 goto bail; 733 } 734 735 /* don't allow them to later change with mprotect */ 736 vm_flags_clear(vma, VM_MAYWRITE); 737 } 738 739 pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT; 740 ret = remap_pfn_range(vma, vma->vm_start, pfn, 741 len, vma->vm_page_prot); 742 if (ret) 743 qib_devinfo(dd->pcidev, 744 "%s ctxt%u mmap of %lx, %x bytes failed: %d\n", 745 what, rcd->ctxt, pfn, len, ret); 746 bail: 747 return ret; 748 } 749 750 static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd, 751 u64 ureg) 752 { 753 unsigned long phys; 754 unsigned long sz; 755 int ret; 756 757 /* 758 * This is real hardware, so use io_remap. This is the mechanism 759 * for the user process to update the head registers for their ctxt 760 * in the chip. 761 */ 762 sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE; 763 if ((vma->vm_end - vma->vm_start) > sz) { 764 qib_devinfo(dd->pcidev, 765 "FAIL mmap userreg: reqlen %lx > PAGE\n", 766 vma->vm_end - vma->vm_start); 767 ret = -EFAULT; 768 } else { 769 phys = dd->physaddr + ureg; 770 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 771 772 vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND); 773 ret = io_remap_pfn_range(vma, vma->vm_start, 774 phys >> PAGE_SHIFT, 775 vma->vm_end - vma->vm_start, 776 vma->vm_page_prot); 777 } 778 return ret; 779 } 780 781 static int mmap_piobufs(struct vm_area_struct *vma, 782 struct qib_devdata *dd, 783 struct qib_ctxtdata *rcd, 784 unsigned piobufs, unsigned piocnt) 785 { 786 unsigned long phys; 787 int ret; 788 789 /* 790 * When we map the PIO buffers in the chip, we want to map them as 791 * writeonly, no read possible; unfortunately, x86 doesn't allow 792 * for this in hardware, but we still prevent users from asking 793 * for it. 794 */ 795 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) { 796 qib_devinfo(dd->pcidev, 797 "FAIL mmap piobufs: reqlen %lx > PAGE\n", 798 vma->vm_end - vma->vm_start); 799 ret = -EINVAL; 800 goto bail; 801 } 802 803 phys = dd->physaddr + piobufs; 804 805 #if defined(__powerpc__) 806 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 807 #endif 808 809 /* 810 * don't allow them to later change to readable with mprotect (for when 811 * not initially mapped readable, as is normally the case) 812 */ 813 vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND, VM_MAYREAD); 814 815 /* We used PAT if wc_cookie == 0 */ 816 if (!dd->wc_cookie) 817 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 818 819 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, 820 vma->vm_end - vma->vm_start, 821 vma->vm_page_prot); 822 bail: 823 return ret; 824 } 825 826 static int mmap_rcvegrbufs(struct vm_area_struct *vma, 827 struct qib_ctxtdata *rcd) 828 { 829 struct qib_devdata *dd = rcd->dd; 830 unsigned long start, size; 831 size_t total_size, i; 832 unsigned long pfn; 833 int ret; 834 835 size = rcd->rcvegrbuf_size; 836 total_size = rcd->rcvegrbuf_chunks * size; 837 if ((vma->vm_end - vma->vm_start) > total_size) { 838 qib_devinfo(dd->pcidev, 839 "FAIL on egr bufs: reqlen %lx > actual %lx\n", 840 vma->vm_end - vma->vm_start, 841 (unsigned long) total_size); 842 ret = -EINVAL; 843 goto bail; 844 } 845 846 if (vma->vm_flags & VM_WRITE) { 847 qib_devinfo(dd->pcidev, 848 "Can't map eager buffers as writable (flags=%lx)\n", 849 vma->vm_flags); 850 ret = -EPERM; 851 goto bail; 852 } 853 /* don't allow them to later change to writable with mprotect */ 854 vm_flags_clear(vma, VM_MAYWRITE); 855 856 start = vma->vm_start; 857 858 for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) { 859 pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT; 860 ret = remap_pfn_range(vma, start, pfn, size, 861 vma->vm_page_prot); 862 if (ret < 0) 863 goto bail; 864 } 865 ret = 0; 866 867 bail: 868 return ret; 869 } 870 871 /* 872 * qib_file_vma_fault - handle a VMA page fault. 873 */ 874 static vm_fault_t qib_file_vma_fault(struct vm_fault *vmf) 875 { 876 struct page *page; 877 878 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); 879 if (!page) 880 return VM_FAULT_SIGBUS; 881 882 get_page(page); 883 vmf->page = page; 884 885 return 0; 886 } 887 888 static const struct vm_operations_struct qib_file_vm_ops = { 889 .fault = qib_file_vma_fault, 890 }; 891 892 static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, 893 struct qib_ctxtdata *rcd, unsigned subctxt) 894 { 895 struct qib_devdata *dd = rcd->dd; 896 unsigned subctxt_cnt; 897 unsigned long len; 898 void *addr; 899 size_t size; 900 int ret = 0; 901 902 subctxt_cnt = rcd->subctxt_cnt; 903 size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size; 904 905 /* 906 * Each process has all the subctxt uregbase, rcvhdrq, and 907 * rcvegrbufs mmapped - as an array for all the processes, 908 * and also separately for this process. 909 */ 910 if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) { 911 addr = rcd->subctxt_uregbase; 912 size = PAGE_SIZE * subctxt_cnt; 913 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) { 914 addr = rcd->subctxt_rcvhdr_base; 915 size = rcd->rcvhdrq_size * subctxt_cnt; 916 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) { 917 addr = rcd->subctxt_rcvegrbuf; 918 size *= subctxt_cnt; 919 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase + 920 PAGE_SIZE * subctxt)) { 921 addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt; 922 size = PAGE_SIZE; 923 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base + 924 rcd->rcvhdrq_size * subctxt)) { 925 addr = rcd->subctxt_rcvhdr_base + 926 rcd->rcvhdrq_size * subctxt; 927 size = rcd->rcvhdrq_size; 928 } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) { 929 addr = rcd->user_event_mask; 930 size = PAGE_SIZE; 931 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf + 932 size * subctxt)) { 933 addr = rcd->subctxt_rcvegrbuf + size * subctxt; 934 /* rcvegrbufs are read-only on the slave */ 935 if (vma->vm_flags & VM_WRITE) { 936 qib_devinfo(dd->pcidev, 937 "Can't map eager buffers as writable (flags=%lx)\n", 938 vma->vm_flags); 939 ret = -EPERM; 940 goto bail; 941 } 942 /* 943 * Don't allow permission to later change to writable 944 * with mprotect. 945 */ 946 vm_flags_clear(vma, VM_MAYWRITE); 947 } else 948 goto bail; 949 len = vma->vm_end - vma->vm_start; 950 if (len > size) { 951 ret = -EINVAL; 952 goto bail; 953 } 954 955 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; 956 vma->vm_ops = &qib_file_vm_ops; 957 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); 958 ret = 1; 959 960 bail: 961 return ret; 962 } 963 964 /** 965 * qib_mmapf - mmap various structures into user space 966 * @fp: the file pointer 967 * @vma: the VM area 968 * 969 * We use this to have a shared buffer between the kernel and the user code 970 * for the rcvhdr queue, egr buffers, and the per-context user regs and pio 971 * buffers in the chip. We have the open and close entries so we can bump 972 * the ref count and keep the driver from being unloaded while still mapped. 973 */ 974 static int qib_mmapf(struct file *fp, struct vm_area_struct *vma) 975 { 976 struct qib_ctxtdata *rcd; 977 struct qib_devdata *dd; 978 u64 pgaddr, ureg; 979 unsigned piobufs, piocnt; 980 int ret, match = 1; 981 982 rcd = ctxt_fp(fp); 983 if (!rcd || !(vma->vm_flags & VM_SHARED)) { 984 ret = -EINVAL; 985 goto bail; 986 } 987 dd = rcd->dd; 988 989 /* 990 * This is the qib_do_user_init() code, mapping the shared buffers 991 * and per-context user registers into the user process. The address 992 * referred to by vm_pgoff is the file offset passed via mmap(). 993 * For shared contexts, this is the kernel vmalloc() address of the 994 * pages to share with the master. 995 * For non-shared or master ctxts, this is a physical address. 996 * We only do one mmap for each space mapped. 997 */ 998 pgaddr = vma->vm_pgoff << PAGE_SHIFT; 999 1000 /* 1001 * Check for 0 in case one of the allocations failed, but user 1002 * called mmap anyway. 1003 */ 1004 if (!pgaddr) { 1005 ret = -EINVAL; 1006 goto bail; 1007 } 1008 1009 /* 1010 * Physical addresses must fit in 40 bits for our hardware. 1011 * Check for kernel virtual addresses first, anything else must 1012 * match a HW or memory address. 1013 */ 1014 ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp)); 1015 if (ret) { 1016 if (ret > 0) 1017 ret = 0; 1018 goto bail; 1019 } 1020 1021 ureg = dd->uregbase + dd->ureg_align * rcd->ctxt; 1022 if (!rcd->subctxt_cnt) { 1023 /* ctxt is not shared */ 1024 piocnt = rcd->piocnt; 1025 piobufs = rcd->piobufs; 1026 } else if (!subctxt_fp(fp)) { 1027 /* caller is the master */ 1028 piocnt = (rcd->piocnt / rcd->subctxt_cnt) + 1029 (rcd->piocnt % rcd->subctxt_cnt); 1030 piobufs = rcd->piobufs + 1031 dd->palign * (rcd->piocnt - piocnt); 1032 } else { 1033 unsigned slave = subctxt_fp(fp) - 1; 1034 1035 /* caller is a slave */ 1036 piocnt = rcd->piocnt / rcd->subctxt_cnt; 1037 piobufs = rcd->piobufs + dd->palign * piocnt * slave; 1038 } 1039 1040 if (pgaddr == ureg) 1041 ret = mmap_ureg(vma, dd, ureg); 1042 else if (pgaddr == piobufs) 1043 ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt); 1044 else if (pgaddr == dd->pioavailregs_phys) 1045 /* in-memory copy of pioavail registers */ 1046 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE, 1047 (void *) dd->pioavailregs_dma, 0, 1048 "pioavail registers"); 1049 else if (pgaddr == rcd->rcvegr_phys) 1050 ret = mmap_rcvegrbufs(vma, rcd); 1051 else if (pgaddr == (u64) rcd->rcvhdrq_phys) 1052 /* 1053 * The rcvhdrq itself; multiple pages, contiguous 1054 * from an i/o perspective. Shared contexts need 1055 * to map r/w, so we allow writing. 1056 */ 1057 ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size, 1058 rcd->rcvhdrq, 1, "rcvhdrq"); 1059 else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys) 1060 /* in-memory copy of rcvhdrq tail register */ 1061 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE, 1062 rcd->rcvhdrtail_kvaddr, 0, 1063 "rcvhdrq tail"); 1064 else 1065 match = 0; 1066 if (!match) 1067 ret = -EINVAL; 1068 1069 vma->vm_private_data = NULL; 1070 1071 if (ret < 0) 1072 qib_devinfo(dd->pcidev, 1073 "mmap Failure %d: off %llx len %lx\n", 1074 -ret, (unsigned long long)pgaddr, 1075 vma->vm_end - vma->vm_start); 1076 bail: 1077 return ret; 1078 } 1079 1080 static __poll_t qib_poll_urgent(struct qib_ctxtdata *rcd, 1081 struct file *fp, 1082 struct poll_table_struct *pt) 1083 { 1084 struct qib_devdata *dd = rcd->dd; 1085 __poll_t pollflag; 1086 1087 poll_wait(fp, &rcd->wait, pt); 1088 1089 spin_lock_irq(&dd->uctxt_lock); 1090 if (rcd->urgent != rcd->urgent_poll) { 1091 pollflag = EPOLLIN | EPOLLRDNORM; 1092 rcd->urgent_poll = rcd->urgent; 1093 } else { 1094 pollflag = 0; 1095 set_bit(QIB_CTXT_WAITING_URG, &rcd->flag); 1096 } 1097 spin_unlock_irq(&dd->uctxt_lock); 1098 1099 return pollflag; 1100 } 1101 1102 static __poll_t qib_poll_next(struct qib_ctxtdata *rcd, 1103 struct file *fp, 1104 struct poll_table_struct *pt) 1105 { 1106 struct qib_devdata *dd = rcd->dd; 1107 __poll_t pollflag; 1108 1109 poll_wait(fp, &rcd->wait, pt); 1110 1111 spin_lock_irq(&dd->uctxt_lock); 1112 if (dd->f_hdrqempty(rcd)) { 1113 set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag); 1114 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); 1115 pollflag = 0; 1116 } else 1117 pollflag = EPOLLIN | EPOLLRDNORM; 1118 spin_unlock_irq(&dd->uctxt_lock); 1119 1120 return pollflag; 1121 } 1122 1123 static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt) 1124 { 1125 struct qib_ctxtdata *rcd; 1126 __poll_t pollflag; 1127 1128 rcd = ctxt_fp(fp); 1129 if (!rcd) 1130 pollflag = EPOLLERR; 1131 else if (rcd->poll_type == QIB_POLL_TYPE_URGENT) 1132 pollflag = qib_poll_urgent(rcd, fp, pt); 1133 else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV) 1134 pollflag = qib_poll_next(rcd, fp, pt); 1135 else /* invalid */ 1136 pollflag = EPOLLERR; 1137 1138 return pollflag; 1139 } 1140 1141 static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd) 1142 { 1143 struct qib_filedata *fd = fp->private_data; 1144 const unsigned int weight = current->nr_cpus_allowed; 1145 const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus); 1146 int local_cpu; 1147 1148 /* 1149 * If process has NOT already set it's affinity, select and 1150 * reserve a processor for it on the local NUMA node. 1151 */ 1152 if ((weight >= qib_cpulist_count) && 1153 (cpumask_weight(local_mask) <= qib_cpulist_count)) { 1154 for_each_cpu(local_cpu, local_mask) 1155 if (!test_and_set_bit(local_cpu, qib_cpulist)) { 1156 fd->rec_cpu_num = local_cpu; 1157 return; 1158 } 1159 } 1160 1161 /* 1162 * If process has NOT already set it's affinity, select and 1163 * reserve a processor for it, as a rendevous for all 1164 * users of the driver. If they don't actually later 1165 * set affinity to this cpu, or set it to some other cpu, 1166 * it just means that sooner or later we don't recommend 1167 * a cpu, and let the scheduler do it's best. 1168 */ 1169 if (weight >= qib_cpulist_count) { 1170 int cpu; 1171 1172 cpu = find_first_zero_bit(qib_cpulist, 1173 qib_cpulist_count); 1174 if (cpu == qib_cpulist_count) 1175 qib_dev_err(dd, 1176 "no cpus avail for affinity PID %u\n", 1177 current->pid); 1178 else { 1179 __set_bit(cpu, qib_cpulist); 1180 fd->rec_cpu_num = cpu; 1181 } 1182 } 1183 } 1184 1185 /* 1186 * Check that userland and driver are compatible for subcontexts. 1187 */ 1188 static int qib_compatible_subctxts(int user_swmajor, int user_swminor) 1189 { 1190 /* this code is written long-hand for clarity */ 1191 if (QIB_USER_SWMAJOR != user_swmajor) { 1192 /* no promise of compatibility if major mismatch */ 1193 return 0; 1194 } 1195 if (QIB_USER_SWMAJOR == 1) { 1196 switch (QIB_USER_SWMINOR) { 1197 case 0: 1198 case 1: 1199 case 2: 1200 /* no subctxt implementation so cannot be compatible */ 1201 return 0; 1202 case 3: 1203 /* 3 is only compatible with itself */ 1204 return user_swminor == 3; 1205 default: 1206 /* >= 4 are compatible (or are expected to be) */ 1207 return user_swminor <= QIB_USER_SWMINOR; 1208 } 1209 } 1210 /* make no promises yet for future major versions */ 1211 return 0; 1212 } 1213 1214 static int init_subctxts(struct qib_devdata *dd, 1215 struct qib_ctxtdata *rcd, 1216 const struct qib_user_info *uinfo) 1217 { 1218 int ret = 0; 1219 unsigned num_subctxts; 1220 size_t size; 1221 1222 /* 1223 * If the user is requesting zero subctxts, 1224 * skip the subctxt allocation. 1225 */ 1226 if (uinfo->spu_subctxt_cnt <= 0) 1227 goto bail; 1228 num_subctxts = uinfo->spu_subctxt_cnt; 1229 1230 /* Check for subctxt compatibility */ 1231 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16, 1232 uinfo->spu_userversion & 0xffff)) { 1233 qib_devinfo(dd->pcidev, 1234 "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n", 1235 (int) (uinfo->spu_userversion >> 16), 1236 (int) (uinfo->spu_userversion & 0xffff), 1237 QIB_USER_SWMAJOR, QIB_USER_SWMINOR); 1238 goto bail; 1239 } 1240 if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) { 1241 ret = -EINVAL; 1242 goto bail; 1243 } 1244 1245 rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts); 1246 if (!rcd->subctxt_uregbase) { 1247 ret = -ENOMEM; 1248 goto bail; 1249 } 1250 /* Note: rcd->rcvhdrq_size isn't initialized yet. */ 1251 size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * 1252 sizeof(u32), PAGE_SIZE) * num_subctxts; 1253 rcd->subctxt_rcvhdr_base = vmalloc_user(size); 1254 if (!rcd->subctxt_rcvhdr_base) { 1255 ret = -ENOMEM; 1256 goto bail_ureg; 1257 } 1258 1259 rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks * 1260 rcd->rcvegrbuf_size * 1261 num_subctxts); 1262 if (!rcd->subctxt_rcvegrbuf) { 1263 ret = -ENOMEM; 1264 goto bail_rhdr; 1265 } 1266 1267 rcd->subctxt_cnt = uinfo->spu_subctxt_cnt; 1268 rcd->subctxt_id = uinfo->spu_subctxt_id; 1269 rcd->active_slaves = 1; 1270 rcd->redirect_seq_cnt = 1; 1271 set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag); 1272 goto bail; 1273 1274 bail_rhdr: 1275 vfree(rcd->subctxt_rcvhdr_base); 1276 bail_ureg: 1277 vfree(rcd->subctxt_uregbase); 1278 rcd->subctxt_uregbase = NULL; 1279 bail: 1280 return ret; 1281 } 1282 1283 static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, 1284 struct file *fp, const struct qib_user_info *uinfo) 1285 { 1286 struct qib_filedata *fd = fp->private_data; 1287 struct qib_devdata *dd = ppd->dd; 1288 struct qib_ctxtdata *rcd; 1289 void *ptmp = NULL; 1290 int ret; 1291 int numa_id; 1292 1293 assign_ctxt_affinity(fp, dd); 1294 1295 numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ? 1296 cpu_to_node(fd->rec_cpu_num) : 1297 numa_node_id()) : dd->assigned_node_id; 1298 1299 rcd = qib_create_ctxtdata(ppd, ctxt, numa_id); 1300 1301 /* 1302 * Allocate memory for use in qib_tid_update() at open to 1303 * reduce cost of expected send setup per message segment 1304 */ 1305 if (rcd) 1306 ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) + 1307 dd->rcvtidcnt * sizeof(struct page **), 1308 GFP_KERNEL); 1309 1310 if (!rcd || !ptmp) { 1311 qib_dev_err(dd, 1312 "Unable to allocate ctxtdata memory, failing open\n"); 1313 ret = -ENOMEM; 1314 goto bailerr; 1315 } 1316 rcd->userversion = uinfo->spu_userversion; 1317 ret = init_subctxts(dd, rcd, uinfo); 1318 if (ret) 1319 goto bailerr; 1320 rcd->tid_pg_list = ptmp; 1321 rcd->pid = current->pid; 1322 init_waitqueue_head(&dd->rcd[ctxt]->wait); 1323 get_task_comm(rcd->comm, current); 1324 ctxt_fp(fp) = rcd; 1325 qib_stats.sps_ctxts++; 1326 dd->freectxts--; 1327 ret = 0; 1328 goto bail; 1329 1330 bailerr: 1331 if (fd->rec_cpu_num != -1) 1332 __clear_bit(fd->rec_cpu_num, qib_cpulist); 1333 1334 dd->rcd[ctxt] = NULL; 1335 kfree(rcd); 1336 kfree(ptmp); 1337 bail: 1338 return ret; 1339 } 1340 1341 static inline int usable(struct qib_pportdata *ppd) 1342 { 1343 struct qib_devdata *dd = ppd->dd; 1344 1345 return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid && 1346 (ppd->lflags & QIBL_LINKACTIVE); 1347 } 1348 1349 /* 1350 * Select a context on the given device, either using a requested port 1351 * or the port based on the context number. 1352 */ 1353 static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port, 1354 const struct qib_user_info *uinfo) 1355 { 1356 struct qib_pportdata *ppd = NULL; 1357 int ret, ctxt; 1358 1359 if (port) { 1360 if (!usable(dd->pport + port - 1)) { 1361 ret = -ENETDOWN; 1362 goto done; 1363 } else 1364 ppd = dd->pport + port - 1; 1365 } 1366 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt]; 1367 ctxt++) 1368 ; 1369 if (ctxt == dd->cfgctxts) { 1370 ret = -EBUSY; 1371 goto done; 1372 } 1373 if (!ppd) { 1374 u32 pidx = ctxt % dd->num_pports; 1375 1376 if (usable(dd->pport + pidx)) 1377 ppd = dd->pport + pidx; 1378 else { 1379 for (pidx = 0; pidx < dd->num_pports && !ppd; 1380 pidx++) 1381 if (usable(dd->pport + pidx)) 1382 ppd = dd->pport + pidx; 1383 } 1384 } 1385 ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN; 1386 done: 1387 return ret; 1388 } 1389 1390 static int find_free_ctxt(int unit, struct file *fp, 1391 const struct qib_user_info *uinfo) 1392 { 1393 struct qib_devdata *dd = qib_lookup(unit); 1394 int ret; 1395 1396 if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) 1397 ret = -ENODEV; 1398 else 1399 ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo); 1400 1401 return ret; 1402 } 1403 1404 static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, 1405 unsigned alg) 1406 { 1407 struct qib_devdata *udd = NULL; 1408 int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i; 1409 u32 port = uinfo->spu_port, ctxt; 1410 1411 devmax = qib_count_units(&npresent, &nup); 1412 if (!npresent) { 1413 ret = -ENXIO; 1414 goto done; 1415 } 1416 if (nup == 0) { 1417 ret = -ENETDOWN; 1418 goto done; 1419 } 1420 1421 if (alg == QIB_PORT_ALG_ACROSS) { 1422 unsigned inuse = ~0U; 1423 1424 /* find device (with ACTIVE ports) with fewest ctxts in use */ 1425 for (ndev = 0; ndev < devmax; ndev++) { 1426 struct qib_devdata *dd = qib_lookup(ndev); 1427 unsigned cused = 0, cfree = 0, pusable = 0; 1428 1429 if (!dd) 1430 continue; 1431 if (port && port <= dd->num_pports && 1432 usable(dd->pport + port - 1)) 1433 pusable = 1; 1434 else 1435 for (i = 0; i < dd->num_pports; i++) 1436 if (usable(dd->pport + i)) 1437 pusable++; 1438 if (!pusable) 1439 continue; 1440 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; 1441 ctxt++) 1442 if (dd->rcd[ctxt]) 1443 cused++; 1444 else 1445 cfree++; 1446 if (cfree && cused < inuse) { 1447 udd = dd; 1448 inuse = cused; 1449 } 1450 } 1451 if (udd) { 1452 ret = choose_port_ctxt(fp, udd, port, uinfo); 1453 goto done; 1454 } 1455 } else { 1456 for (ndev = 0; ndev < devmax; ndev++) { 1457 struct qib_devdata *dd = qib_lookup(ndev); 1458 1459 if (dd) { 1460 ret = choose_port_ctxt(fp, dd, port, uinfo); 1461 if (!ret) 1462 goto done; 1463 if (ret == -EBUSY) 1464 dusable++; 1465 } 1466 } 1467 } 1468 ret = dusable ? -EBUSY : -ENETDOWN; 1469 1470 done: 1471 return ret; 1472 } 1473 1474 static int find_shared_ctxt(struct file *fp, 1475 const struct qib_user_info *uinfo) 1476 { 1477 int devmax, ndev, i; 1478 int ret = 0; 1479 1480 devmax = qib_count_units(NULL, NULL); 1481 1482 for (ndev = 0; ndev < devmax; ndev++) { 1483 struct qib_devdata *dd = qib_lookup(ndev); 1484 1485 /* device portion of usable() */ 1486 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase)) 1487 continue; 1488 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) { 1489 struct qib_ctxtdata *rcd = dd->rcd[i]; 1490 1491 /* Skip ctxts which are not yet open */ 1492 if (!rcd || !rcd->cnt) 1493 continue; 1494 /* Skip ctxt if it doesn't match the requested one */ 1495 if (rcd->subctxt_id != uinfo->spu_subctxt_id) 1496 continue; 1497 /* Verify the sharing process matches the master */ 1498 if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt || 1499 rcd->userversion != uinfo->spu_userversion || 1500 rcd->cnt >= rcd->subctxt_cnt) { 1501 ret = -EINVAL; 1502 goto done; 1503 } 1504 ctxt_fp(fp) = rcd; 1505 subctxt_fp(fp) = rcd->cnt++; 1506 rcd->subpid[subctxt_fp(fp)] = current->pid; 1507 tidcursor_fp(fp) = 0; 1508 rcd->active_slaves |= 1 << subctxt_fp(fp); 1509 ret = 1; 1510 goto done; 1511 } 1512 } 1513 1514 done: 1515 return ret; 1516 } 1517 1518 static int qib_open(struct inode *in, struct file *fp) 1519 { 1520 /* The real work is performed later in qib_assign_ctxt() */ 1521 fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL); 1522 if (fp->private_data) /* no cpu affinity by default */ 1523 ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1; 1524 return fp->private_data ? 0 : -ENOMEM; 1525 } 1526 1527 static int find_hca(unsigned int cpu, int *unit) 1528 { 1529 int ret = 0, devmax, npresent, nup, ndev; 1530 1531 *unit = -1; 1532 1533 devmax = qib_count_units(&npresent, &nup); 1534 if (!npresent) { 1535 ret = -ENXIO; 1536 goto done; 1537 } 1538 if (!nup) { 1539 ret = -ENETDOWN; 1540 goto done; 1541 } 1542 for (ndev = 0; ndev < devmax; ndev++) { 1543 struct qib_devdata *dd = qib_lookup(ndev); 1544 1545 if (dd) { 1546 if (pcibus_to_node(dd->pcidev->bus) < 0) { 1547 ret = -EINVAL; 1548 goto done; 1549 } 1550 if (cpu_to_node(cpu) == 1551 pcibus_to_node(dd->pcidev->bus)) { 1552 *unit = ndev; 1553 goto done; 1554 } 1555 } 1556 } 1557 done: 1558 return ret; 1559 } 1560 1561 static int do_qib_user_sdma_queue_create(struct file *fp) 1562 { 1563 struct qib_filedata *fd = fp->private_data; 1564 struct qib_ctxtdata *rcd = fd->rcd; 1565 struct qib_devdata *dd = rcd->dd; 1566 1567 if (dd->flags & QIB_HAS_SEND_DMA) { 1568 1569 fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, 1570 dd->unit, 1571 rcd->ctxt, 1572 fd->subctxt); 1573 if (!fd->pq) 1574 return -ENOMEM; 1575 } 1576 1577 return 0; 1578 } 1579 1580 /* 1581 * Get ctxt early, so can set affinity prior to memory allocation. 1582 */ 1583 static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) 1584 { 1585 int ret; 1586 int i_minor; 1587 unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS; 1588 1589 /* Check to be sure we haven't already initialized this file */ 1590 if (ctxt_fp(fp)) { 1591 ret = -EINVAL; 1592 goto done; 1593 } 1594 1595 /* for now, if major version is different, bail */ 1596 swmajor = uinfo->spu_userversion >> 16; 1597 if (swmajor != QIB_USER_SWMAJOR) { 1598 ret = -ENODEV; 1599 goto done; 1600 } 1601 1602 swminor = uinfo->spu_userversion & 0xffff; 1603 1604 if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT) 1605 alg = uinfo->spu_port_alg; 1606 1607 mutex_lock(&qib_mutex); 1608 1609 if (qib_compatible_subctxts(swmajor, swminor) && 1610 uinfo->spu_subctxt_cnt) { 1611 ret = find_shared_ctxt(fp, uinfo); 1612 if (ret > 0) { 1613 ret = do_qib_user_sdma_queue_create(fp); 1614 if (!ret) 1615 assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd); 1616 goto done_ok; 1617 } 1618 } 1619 1620 i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE; 1621 if (i_minor) 1622 ret = find_free_ctxt(i_minor - 1, fp, uinfo); 1623 else { 1624 int unit; 1625 const unsigned int cpu = cpumask_first(current->cpus_ptr); 1626 const unsigned int weight = current->nr_cpus_allowed; 1627 1628 if (weight == 1 && !test_bit(cpu, qib_cpulist)) 1629 if (!find_hca(cpu, &unit) && unit >= 0) 1630 if (!find_free_ctxt(unit, fp, uinfo)) { 1631 ret = 0; 1632 goto done_chk_sdma; 1633 } 1634 ret = get_a_ctxt(fp, uinfo, alg); 1635 } 1636 1637 done_chk_sdma: 1638 if (!ret) 1639 ret = do_qib_user_sdma_queue_create(fp); 1640 done_ok: 1641 mutex_unlock(&qib_mutex); 1642 1643 done: 1644 return ret; 1645 } 1646 1647 1648 static int qib_do_user_init(struct file *fp, 1649 const struct qib_user_info *uinfo) 1650 { 1651 int ret; 1652 struct qib_ctxtdata *rcd = ctxt_fp(fp); 1653 struct qib_devdata *dd; 1654 unsigned uctxt; 1655 1656 /* Subctxts don't need to initialize anything since master did it. */ 1657 if (subctxt_fp(fp)) { 1658 ret = wait_event_interruptible(rcd->wait, 1659 !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag)); 1660 goto bail; 1661 } 1662 1663 dd = rcd->dd; 1664 1665 /* some ctxts may get extra buffers, calculate that here */ 1666 uctxt = rcd->ctxt - dd->first_user_ctxt; 1667 if (uctxt < dd->ctxts_extrabuf) { 1668 rcd->piocnt = dd->pbufsctxt + 1; 1669 rcd->pio_base = rcd->piocnt * uctxt; 1670 } else { 1671 rcd->piocnt = dd->pbufsctxt; 1672 rcd->pio_base = rcd->piocnt * uctxt + 1673 dd->ctxts_extrabuf; 1674 } 1675 1676 /* 1677 * All user buffers are 2KB buffers. If we ever support 1678 * giving 4KB buffers to user processes, this will need some 1679 * work. Can't use piobufbase directly, because it has 1680 * both 2K and 4K buffer base values. So check and handle. 1681 */ 1682 if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) { 1683 if (rcd->pio_base >= dd->piobcnt2k) { 1684 qib_dev_err(dd, 1685 "%u:ctxt%u: no 2KB buffers available\n", 1686 dd->unit, rcd->ctxt); 1687 ret = -ENOBUFS; 1688 goto bail; 1689 } 1690 rcd->piocnt = dd->piobcnt2k - rcd->pio_base; 1691 qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n", 1692 rcd->ctxt, rcd->piocnt); 1693 } 1694 1695 rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign; 1696 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt, 1697 TXCHK_CHG_TYPE_USER, rcd); 1698 /* 1699 * try to ensure that processes start up with consistent avail update 1700 * for their own range, at least. If system very quiet, it might 1701 * have the in-memory copy out of date at startup for this range of 1702 * buffers, when a context gets re-used. Do after the chg_pioavail 1703 * and before the rest of setup, so it's "almost certain" the dma 1704 * will have occurred (can't 100% guarantee, but should be many 1705 * decimals of 9s, with this ordering), given how much else happens 1706 * after this. 1707 */ 1708 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); 1709 1710 /* 1711 * Now allocate the rcvhdr Q and eager TIDs; skip the TID 1712 * array for time being. If rcd->ctxt > chip-supported, 1713 * we need to do extra stuff here to handle by handling overflow 1714 * through ctxt 0, someday 1715 */ 1716 ret = qib_create_rcvhdrq(dd, rcd); 1717 if (!ret) 1718 ret = qib_setup_eagerbufs(rcd); 1719 if (ret) 1720 goto bail_pio; 1721 1722 rcd->tidcursor = 0; /* start at beginning after open */ 1723 1724 /* initialize poll variables... */ 1725 rcd->urgent = 0; 1726 rcd->urgent_poll = 0; 1727 1728 /* 1729 * Now enable the ctxt for receive. 1730 * For chips that are set to DMA the tail register to memory 1731 * when they change (and when the update bit transitions from 1732 * 0 to 1. So for those chips, we turn it off and then back on. 1733 * This will (very briefly) affect any other open ctxts, but the 1734 * duration is very short, and therefore isn't an issue. We 1735 * explicitly set the in-memory tail copy to 0 beforehand, so we 1736 * don't have to wait to be sure the DMA update has happened 1737 * (chip resets head/tail to 0 on transition to enable). 1738 */ 1739 if (rcd->rcvhdrtail_kvaddr) 1740 qib_clear_rcvhdrtail(rcd); 1741 1742 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB, 1743 rcd->ctxt); 1744 1745 /* Notify any waiting slaves */ 1746 if (rcd->subctxt_cnt) { 1747 clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag); 1748 wake_up(&rcd->wait); 1749 } 1750 return 0; 1751 1752 bail_pio: 1753 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt, 1754 TXCHK_CHG_TYPE_KERN, rcd); 1755 bail: 1756 return ret; 1757 } 1758 1759 /** 1760 * unlock_expected_tids - unlock any expected TID entries context still had 1761 * in use 1762 * @rcd: ctxt 1763 * 1764 * We don't actually update the chip here, because we do a bulk update 1765 * below, using f_clear_tids. 1766 */ 1767 static void unlock_expected_tids(struct qib_ctxtdata *rcd) 1768 { 1769 struct qib_devdata *dd = rcd->dd; 1770 int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt; 1771 int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt; 1772 1773 for (i = ctxt_tidbase; i < maxtid; i++) { 1774 struct page *p = dd->pageshadow[i]; 1775 dma_addr_t phys; 1776 1777 if (!p) 1778 continue; 1779 1780 phys = dd->physshadow[i]; 1781 dd->physshadow[i] = dd->tidinvalid; 1782 dd->pageshadow[i] = NULL; 1783 dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE, 1784 DMA_FROM_DEVICE); 1785 qib_release_user_pages(&p, 1); 1786 cnt++; 1787 } 1788 } 1789 1790 static int qib_close(struct inode *in, struct file *fp) 1791 { 1792 struct qib_filedata *fd; 1793 struct qib_ctxtdata *rcd; 1794 struct qib_devdata *dd; 1795 unsigned long flags; 1796 unsigned ctxt; 1797 1798 mutex_lock(&qib_mutex); 1799 1800 fd = fp->private_data; 1801 fp->private_data = NULL; 1802 rcd = fd->rcd; 1803 if (!rcd) { 1804 mutex_unlock(&qib_mutex); 1805 goto bail; 1806 } 1807 1808 dd = rcd->dd; 1809 1810 /* ensure all pio buffer writes in progress are flushed */ 1811 qib_flush_wc(); 1812 1813 /* drain user sdma queue */ 1814 if (fd->pq) { 1815 qib_user_sdma_queue_drain(rcd->ppd, fd->pq); 1816 qib_user_sdma_queue_destroy(fd->pq); 1817 } 1818 1819 if (fd->rec_cpu_num != -1) 1820 __clear_bit(fd->rec_cpu_num, qib_cpulist); 1821 1822 if (--rcd->cnt) { 1823 /* 1824 * XXX If the master closes the context before the slave(s), 1825 * revoke the mmap for the eager receive queue so 1826 * the slave(s) don't wait for receive data forever. 1827 */ 1828 rcd->active_slaves &= ~(1 << fd->subctxt); 1829 rcd->subpid[fd->subctxt] = 0; 1830 mutex_unlock(&qib_mutex); 1831 goto bail; 1832 } 1833 1834 /* early; no interrupt users after this */ 1835 spin_lock_irqsave(&dd->uctxt_lock, flags); 1836 ctxt = rcd->ctxt; 1837 dd->rcd[ctxt] = NULL; 1838 rcd->pid = 0; 1839 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 1840 1841 if (rcd->rcvwait_to || rcd->piowait_to || 1842 rcd->rcvnowait || rcd->pionowait) { 1843 rcd->rcvwait_to = 0; 1844 rcd->piowait_to = 0; 1845 rcd->rcvnowait = 0; 1846 rcd->pionowait = 0; 1847 } 1848 if (rcd->flag) 1849 rcd->flag = 0; 1850 1851 if (dd->kregbase) { 1852 /* atomically clear receive enable ctxt and intr avail. */ 1853 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS | 1854 QIB_RCVCTRL_INTRAVAIL_DIS, ctxt); 1855 1856 /* clean up the pkeys for this ctxt user */ 1857 qib_clean_part_key(rcd, dd); 1858 qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt); 1859 qib_chg_pioavailkernel(dd, rcd->pio_base, 1860 rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL); 1861 1862 dd->f_clear_tids(dd, rcd); 1863 1864 if (dd->pageshadow) 1865 unlock_expected_tids(rcd); 1866 qib_stats.sps_ctxts--; 1867 dd->freectxts++; 1868 } 1869 1870 mutex_unlock(&qib_mutex); 1871 qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */ 1872 1873 bail: 1874 kfree(fd); 1875 return 0; 1876 } 1877 1878 static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo) 1879 { 1880 struct qib_ctxt_info info; 1881 int ret; 1882 size_t sz; 1883 struct qib_ctxtdata *rcd = ctxt_fp(fp); 1884 struct qib_filedata *fd; 1885 1886 fd = fp->private_data; 1887 1888 info.num_active = qib_count_active_units(); 1889 info.unit = rcd->dd->unit; 1890 info.port = rcd->ppd->port; 1891 info.ctxt = rcd->ctxt; 1892 info.subctxt = subctxt_fp(fp); 1893 /* Number of user ctxts available for this device. */ 1894 info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt; 1895 info.num_subctxts = rcd->subctxt_cnt; 1896 info.rec_cpu = fd->rec_cpu_num; 1897 sz = sizeof(info); 1898 1899 if (copy_to_user(uinfo, &info, sz)) { 1900 ret = -EFAULT; 1901 goto bail; 1902 } 1903 ret = 0; 1904 1905 bail: 1906 return ret; 1907 } 1908 1909 static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq, 1910 u32 __user *inflightp) 1911 { 1912 const u32 val = qib_user_sdma_inflight_counter(pq); 1913 1914 if (put_user(val, inflightp)) 1915 return -EFAULT; 1916 1917 return 0; 1918 } 1919 1920 static int qib_sdma_get_complete(struct qib_pportdata *ppd, 1921 struct qib_user_sdma_queue *pq, 1922 u32 __user *completep) 1923 { 1924 u32 val; 1925 int err; 1926 1927 if (!pq) 1928 return -EINVAL; 1929 1930 err = qib_user_sdma_make_progress(ppd, pq); 1931 if (err < 0) 1932 return err; 1933 1934 val = qib_user_sdma_complete_counter(pq); 1935 if (put_user(val, completep)) 1936 return -EFAULT; 1937 1938 return 0; 1939 } 1940 1941 static int disarm_req_delay(struct qib_ctxtdata *rcd) 1942 { 1943 int ret = 0; 1944 1945 if (!usable(rcd->ppd)) { 1946 int i; 1947 /* 1948 * if link is down, or otherwise not usable, delay 1949 * the caller up to 30 seconds, so we don't thrash 1950 * in trying to get the chip back to ACTIVE, and 1951 * set flag so they make the call again. 1952 */ 1953 if (rcd->user_event_mask) { 1954 /* 1955 * subctxt_cnt is 0 if not shared, so do base 1956 * separately, first, then remaining subctxt, if any 1957 */ 1958 set_bit(_QIB_EVENT_DISARM_BUFS_BIT, 1959 &rcd->user_event_mask[0]); 1960 for (i = 1; i < rcd->subctxt_cnt; i++) 1961 set_bit(_QIB_EVENT_DISARM_BUFS_BIT, 1962 &rcd->user_event_mask[i]); 1963 } 1964 for (i = 0; !usable(rcd->ppd) && i < 300; i++) 1965 msleep(100); 1966 ret = -ENETDOWN; 1967 } 1968 return ret; 1969 } 1970 1971 /* 1972 * Find all user contexts in use, and set the specified bit in their 1973 * event mask. 1974 * See also find_ctxt() for a similar use, that is specific to send buffers. 1975 */ 1976 int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit) 1977 { 1978 struct qib_ctxtdata *rcd; 1979 unsigned ctxt; 1980 int ret = 0; 1981 unsigned long flags; 1982 1983 spin_lock_irqsave(&ppd->dd->uctxt_lock, flags); 1984 for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts; 1985 ctxt++) { 1986 rcd = ppd->dd->rcd[ctxt]; 1987 if (!rcd) 1988 continue; 1989 if (rcd->user_event_mask) { 1990 int i; 1991 /* 1992 * subctxt_cnt is 0 if not shared, so do base 1993 * separately, first, then remaining subctxt, if any 1994 */ 1995 set_bit(evtbit, &rcd->user_event_mask[0]); 1996 for (i = 1; i < rcd->subctxt_cnt; i++) 1997 set_bit(evtbit, &rcd->user_event_mask[i]); 1998 } 1999 ret = 1; 2000 break; 2001 } 2002 spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags); 2003 2004 return ret; 2005 } 2006 2007 /* 2008 * clear the event notifier events for this context. 2009 * For the DISARM_BUFS case, we also take action (this obsoletes 2010 * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards 2011 * compatibility. 2012 * Other bits don't currently require actions, just atomically clear. 2013 * User process then performs actions appropriate to bit having been 2014 * set, if desired, and checks again in future. 2015 */ 2016 static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt, 2017 unsigned long events) 2018 { 2019 int ret = 0, i; 2020 2021 for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) { 2022 if (!test_bit(i, &events)) 2023 continue; 2024 if (i == _QIB_EVENT_DISARM_BUFS_BIT) { 2025 (void)qib_disarm_piobufs_ifneeded(rcd); 2026 ret = disarm_req_delay(rcd); 2027 } else 2028 clear_bit(i, &rcd->user_event_mask[subctxt]); 2029 } 2030 return ret; 2031 } 2032 2033 static ssize_t qib_write(struct file *fp, const char __user *data, 2034 size_t count, loff_t *off) 2035 { 2036 const struct qib_cmd __user *ucmd; 2037 struct qib_ctxtdata *rcd; 2038 const void __user *src; 2039 size_t consumed, copy = 0; 2040 struct qib_cmd cmd; 2041 ssize_t ret = 0; 2042 void *dest; 2043 2044 if (!ib_safe_file_access(fp)) { 2045 pr_err_once("qib_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", 2046 task_tgid_vnr(current), current->comm); 2047 return -EACCES; 2048 } 2049 2050 if (count < sizeof(cmd.type)) { 2051 ret = -EINVAL; 2052 goto bail; 2053 } 2054 2055 ucmd = (const struct qib_cmd __user *) data; 2056 2057 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { 2058 ret = -EFAULT; 2059 goto bail; 2060 } 2061 2062 consumed = sizeof(cmd.type); 2063 2064 switch (cmd.type) { 2065 case QIB_CMD_ASSIGN_CTXT: 2066 case QIB_CMD_USER_INIT: 2067 copy = sizeof(cmd.cmd.user_info); 2068 dest = &cmd.cmd.user_info; 2069 src = &ucmd->cmd.user_info; 2070 break; 2071 2072 case QIB_CMD_RECV_CTRL: 2073 copy = sizeof(cmd.cmd.recv_ctrl); 2074 dest = &cmd.cmd.recv_ctrl; 2075 src = &ucmd->cmd.recv_ctrl; 2076 break; 2077 2078 case QIB_CMD_CTXT_INFO: 2079 copy = sizeof(cmd.cmd.ctxt_info); 2080 dest = &cmd.cmd.ctxt_info; 2081 src = &ucmd->cmd.ctxt_info; 2082 break; 2083 2084 case QIB_CMD_TID_UPDATE: 2085 case QIB_CMD_TID_FREE: 2086 copy = sizeof(cmd.cmd.tid_info); 2087 dest = &cmd.cmd.tid_info; 2088 src = &ucmd->cmd.tid_info; 2089 break; 2090 2091 case QIB_CMD_SET_PART_KEY: 2092 copy = sizeof(cmd.cmd.part_key); 2093 dest = &cmd.cmd.part_key; 2094 src = &ucmd->cmd.part_key; 2095 break; 2096 2097 case QIB_CMD_DISARM_BUFS: 2098 case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */ 2099 copy = 0; 2100 src = NULL; 2101 dest = NULL; 2102 break; 2103 2104 case QIB_CMD_POLL_TYPE: 2105 copy = sizeof(cmd.cmd.poll_type); 2106 dest = &cmd.cmd.poll_type; 2107 src = &ucmd->cmd.poll_type; 2108 break; 2109 2110 case QIB_CMD_ARMLAUNCH_CTRL: 2111 copy = sizeof(cmd.cmd.armlaunch_ctrl); 2112 dest = &cmd.cmd.armlaunch_ctrl; 2113 src = &ucmd->cmd.armlaunch_ctrl; 2114 break; 2115 2116 case QIB_CMD_SDMA_INFLIGHT: 2117 copy = sizeof(cmd.cmd.sdma_inflight); 2118 dest = &cmd.cmd.sdma_inflight; 2119 src = &ucmd->cmd.sdma_inflight; 2120 break; 2121 2122 case QIB_CMD_SDMA_COMPLETE: 2123 copy = sizeof(cmd.cmd.sdma_complete); 2124 dest = &cmd.cmd.sdma_complete; 2125 src = &ucmd->cmd.sdma_complete; 2126 break; 2127 2128 case QIB_CMD_ACK_EVENT: 2129 copy = sizeof(cmd.cmd.event_mask); 2130 dest = &cmd.cmd.event_mask; 2131 src = &ucmd->cmd.event_mask; 2132 break; 2133 2134 default: 2135 ret = -EINVAL; 2136 goto bail; 2137 } 2138 2139 if (copy) { 2140 if ((count - consumed) < copy) { 2141 ret = -EINVAL; 2142 goto bail; 2143 } 2144 if (copy_from_user(dest, src, copy)) { 2145 ret = -EFAULT; 2146 goto bail; 2147 } 2148 consumed += copy; 2149 } 2150 2151 rcd = ctxt_fp(fp); 2152 if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) { 2153 ret = -EINVAL; 2154 goto bail; 2155 } 2156 2157 switch (cmd.type) { 2158 case QIB_CMD_ASSIGN_CTXT: 2159 if (rcd) { 2160 ret = -EINVAL; 2161 goto bail; 2162 } 2163 2164 ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); 2165 if (ret) 2166 goto bail; 2167 break; 2168 2169 case QIB_CMD_USER_INIT: 2170 ret = qib_do_user_init(fp, &cmd.cmd.user_info); 2171 if (ret) 2172 goto bail; 2173 ret = qib_get_base_info(fp, u64_to_user_ptr( 2174 cmd.cmd.user_info.spu_base_info), 2175 cmd.cmd.user_info.spu_base_info_size); 2176 break; 2177 2178 case QIB_CMD_RECV_CTRL: 2179 ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl); 2180 break; 2181 2182 case QIB_CMD_CTXT_INFO: 2183 ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *) 2184 (unsigned long) cmd.cmd.ctxt_info); 2185 break; 2186 2187 case QIB_CMD_TID_UPDATE: 2188 ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info); 2189 break; 2190 2191 case QIB_CMD_TID_FREE: 2192 ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info); 2193 break; 2194 2195 case QIB_CMD_SET_PART_KEY: 2196 ret = qib_set_part_key(rcd, cmd.cmd.part_key); 2197 break; 2198 2199 case QIB_CMD_DISARM_BUFS: 2200 (void)qib_disarm_piobufs_ifneeded(rcd); 2201 ret = disarm_req_delay(rcd); 2202 break; 2203 2204 case QIB_CMD_PIOAVAILUPD: 2205 qib_force_pio_avail_update(rcd->dd); 2206 break; 2207 2208 case QIB_CMD_POLL_TYPE: 2209 rcd->poll_type = cmd.cmd.poll_type; 2210 break; 2211 2212 case QIB_CMD_ARMLAUNCH_CTRL: 2213 rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl); 2214 break; 2215 2216 case QIB_CMD_SDMA_INFLIGHT: 2217 ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp), 2218 (u32 __user *) (unsigned long) 2219 cmd.cmd.sdma_inflight); 2220 break; 2221 2222 case QIB_CMD_SDMA_COMPLETE: 2223 ret = qib_sdma_get_complete(rcd->ppd, 2224 user_sdma_queue_fp(fp), 2225 (u32 __user *) (unsigned long) 2226 cmd.cmd.sdma_complete); 2227 break; 2228 2229 case QIB_CMD_ACK_EVENT: 2230 ret = qib_user_event_ack(rcd, subctxt_fp(fp), 2231 cmd.cmd.event_mask); 2232 break; 2233 } 2234 2235 if (ret >= 0) 2236 ret = consumed; 2237 2238 bail: 2239 return ret; 2240 } 2241 2242 static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from) 2243 { 2244 struct qib_filedata *fp = iocb->ki_filp->private_data; 2245 struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp); 2246 struct qib_user_sdma_queue *pq = fp->pq; 2247 2248 if (!iter_is_iovec(from) || !from->nr_segs || !pq) 2249 return -EINVAL; 2250 2251 return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs); 2252 } 2253 2254 static struct class *qib_class; 2255 static dev_t qib_dev; 2256 2257 int qib_cdev_init(int minor, const char *name, 2258 const struct file_operations *fops, 2259 struct cdev **cdevp, struct device **devp) 2260 { 2261 const dev_t dev = MKDEV(MAJOR(qib_dev), minor); 2262 struct cdev *cdev; 2263 struct device *device = NULL; 2264 int ret; 2265 2266 cdev = cdev_alloc(); 2267 if (!cdev) { 2268 pr_err("Could not allocate cdev for minor %d, %s\n", 2269 minor, name); 2270 ret = -ENOMEM; 2271 goto done; 2272 } 2273 2274 cdev->owner = THIS_MODULE; 2275 cdev->ops = fops; 2276 kobject_set_name(&cdev->kobj, name); 2277 2278 ret = cdev_add(cdev, dev, 1); 2279 if (ret < 0) { 2280 pr_err("Could not add cdev for minor %d, %s (err %d)\n", 2281 minor, name, -ret); 2282 goto err_cdev; 2283 } 2284 2285 device = device_create(qib_class, NULL, dev, NULL, "%s", name); 2286 if (!IS_ERR(device)) 2287 goto done; 2288 ret = PTR_ERR(device); 2289 device = NULL; 2290 pr_err("Could not create device for minor %d, %s (err %d)\n", 2291 minor, name, -ret); 2292 err_cdev: 2293 cdev_del(cdev); 2294 cdev = NULL; 2295 done: 2296 *cdevp = cdev; 2297 *devp = device; 2298 return ret; 2299 } 2300 2301 void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp) 2302 { 2303 struct device *device = *devp; 2304 2305 if (device) { 2306 device_unregister(device); 2307 *devp = NULL; 2308 } 2309 2310 if (*cdevp) { 2311 cdev_del(*cdevp); 2312 *cdevp = NULL; 2313 } 2314 } 2315 2316 static struct cdev *wildcard_cdev; 2317 static struct device *wildcard_device; 2318 2319 int __init qib_dev_init(void) 2320 { 2321 int ret; 2322 2323 ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME); 2324 if (ret < 0) { 2325 pr_err("Could not allocate chrdev region (err %d)\n", -ret); 2326 goto done; 2327 } 2328 2329 qib_class = class_create(THIS_MODULE, "ipath"); 2330 if (IS_ERR(qib_class)) { 2331 ret = PTR_ERR(qib_class); 2332 pr_err("Could not create device class (err %d)\n", -ret); 2333 unregister_chrdev_region(qib_dev, QIB_NMINORS); 2334 } 2335 2336 done: 2337 return ret; 2338 } 2339 2340 void qib_dev_cleanup(void) 2341 { 2342 if (qib_class) { 2343 class_destroy(qib_class); 2344 qib_class = NULL; 2345 } 2346 2347 unregister_chrdev_region(qib_dev, QIB_NMINORS); 2348 } 2349 2350 static atomic_t user_count = ATOMIC_INIT(0); 2351 2352 static void qib_user_remove(struct qib_devdata *dd) 2353 { 2354 if (atomic_dec_return(&user_count) == 0) 2355 qib_cdev_cleanup(&wildcard_cdev, &wildcard_device); 2356 2357 qib_cdev_cleanup(&dd->user_cdev, &dd->user_device); 2358 } 2359 2360 static int qib_user_add(struct qib_devdata *dd) 2361 { 2362 char name[10]; 2363 int ret; 2364 2365 if (atomic_inc_return(&user_count) == 1) { 2366 ret = qib_cdev_init(0, "ipath", &qib_file_ops, 2367 &wildcard_cdev, &wildcard_device); 2368 if (ret) 2369 goto done; 2370 } 2371 2372 snprintf(name, sizeof(name), "ipath%d", dd->unit); 2373 ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops, 2374 &dd->user_cdev, &dd->user_device); 2375 if (ret) 2376 qib_user_remove(dd); 2377 done: 2378 return ret; 2379 } 2380 2381 /* 2382 * Create per-unit files in /dev 2383 */ 2384 int qib_device_create(struct qib_devdata *dd) 2385 { 2386 int r, ret; 2387 2388 r = qib_user_add(dd); 2389 ret = qib_diag_add(dd); 2390 if (r && !ret) 2391 ret = r; 2392 return ret; 2393 } 2394 2395 /* 2396 * Remove per-unit files in /dev 2397 * void, core kernel returns no errors for this stuff 2398 */ 2399 void qib_device_remove(struct qib_devdata *dd) 2400 { 2401 qib_user_remove(dd); 2402 qib_diag_remove(dd); 2403 } 2404