Lines Matching refs:dev

188 static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,  in mthca_alloc_mtt_range()  argument
196 if (mthca_is_memfree(dev)) in mthca_alloc_mtt_range()
197 if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg, in mthca_alloc_mtt_range()
206 static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size, in __mthca_alloc_mtt() argument
221 for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1) in __mthca_alloc_mtt()
224 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); in __mthca_alloc_mtt()
233 struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size) in mthca_alloc_mtt() argument
235 return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy); in mthca_alloc_mtt()
238 void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt) in mthca_free_mtt() argument
245 mthca_table_put_range(dev, dev->mr_table.mtt_table, in mthca_free_mtt()
252 static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, in __mthca_write_mtt() argument
260 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); in __mthca_write_mtt()
266 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base + in __mthca_write_mtt()
267 mtt->first_seg * dev->limits.mtt_seg_size + in __mthca_write_mtt()
281 err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1); in __mthca_write_mtt()
283 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err); in __mthca_write_mtt()
293 mthca_free_mailbox(dev, mailbox); in __mthca_write_mtt()
297 int mthca_write_mtt_size(struct mthca_dev *dev) in mthca_write_mtt_size() argument
299 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy || in mthca_write_mtt_size()
300 !(dev->mthca_flags & MTHCA_FLAG_FMR)) in mthca_write_mtt_size()
310 return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff; in mthca_write_mtt_size()
313 static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, in mthca_tavor_write_mtt_seg() argument
320 mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size + in mthca_tavor_write_mtt_seg()
327 static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, in mthca_arbel_write_mtt_seg() argument
339 BUG_ON(s % dev->limits.mtt_seg_size); in mthca_arbel_write_mtt_seg()
341 mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg + in mthca_arbel_write_mtt_seg()
342 s / dev->limits.mtt_seg_size, &dma_handle); in mthca_arbel_write_mtt_seg()
346 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, in mthca_arbel_write_mtt_seg()
352 dma_sync_single_for_device(&dev->pdev->dev, dma_handle, in mthca_arbel_write_mtt_seg()
356 int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, in mthca_write_mtt() argument
359 int size = mthca_write_mtt_size(dev); in mthca_write_mtt()
362 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy || in mthca_write_mtt()
363 !(dev->mthca_flags & MTHCA_FLAG_FMR)) in mthca_write_mtt()
364 return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len); in mthca_write_mtt()
368 if (mthca_is_memfree(dev)) in mthca_write_mtt()
369 mthca_arbel_write_mtt_seg(dev, mtt, start_index, in mthca_write_mtt()
372 mthca_tavor_write_mtt_seg(dev, mtt, start_index, in mthca_write_mtt()
403 static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind) in hw_index_to_key() argument
405 if (mthca_is_memfree(dev)) in hw_index_to_key()
411 static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key) in key_to_hw_index() argument
413 if (mthca_is_memfree(dev)) in key_to_hw_index()
419 static inline u32 adjust_key(struct mthca_dev *dev, u32 key) in adjust_key() argument
421 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) in adjust_key()
427 int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, in mthca_mr_alloc() argument
438 key = mthca_alloc(&dev->mr_table.mpt_alloc); in mthca_mr_alloc()
441 key = adjust_key(dev, key); in mthca_mr_alloc()
442 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); in mthca_mr_alloc()
444 if (mthca_is_memfree(dev)) { in mthca_mr_alloc()
445 err = mthca_table_get(dev, dev->mr_table.mpt_table, key); in mthca_mr_alloc()
450 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); in mthca_mr_alloc()
474 cpu_to_be64(dev->mr_table.mtt_base + in mthca_mr_alloc()
475 mr->mtt->first_seg * dev->limits.mtt_seg_size); in mthca_mr_alloc()
478 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); in mthca_mr_alloc()
488 err = mthca_SW2HW_MPT(dev, mailbox, in mthca_mr_alloc()
489 key & (dev->limits.num_mpts - 1)); in mthca_mr_alloc()
491 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); in mthca_mr_alloc()
495 mthca_free_mailbox(dev, mailbox); in mthca_mr_alloc()
499 mthca_free_mailbox(dev, mailbox); in mthca_mr_alloc()
502 mthca_table_put(dev, dev->mr_table.mpt_table, key); in mthca_mr_alloc()
505 mthca_free(&dev->mr_table.mpt_alloc, key); in mthca_mr_alloc()
509 int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, in mthca_mr_alloc_notrans() argument
513 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr); in mthca_mr_alloc_notrans()
516 int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, in mthca_mr_alloc_phys() argument
523 mr->mtt = mthca_alloc_mtt(dev, list_len); in mthca_mr_alloc_phys()
527 err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len); in mthca_mr_alloc_phys()
529 mthca_free_mtt(dev, mr->mtt); in mthca_mr_alloc_phys()
533 err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova, in mthca_mr_alloc_phys()
536 mthca_free_mtt(dev, mr->mtt); in mthca_mr_alloc_phys()
542 static void mthca_free_region(struct mthca_dev *dev, u32 lkey) in mthca_free_region() argument
544 mthca_table_put(dev, dev->mr_table.mpt_table, in mthca_free_region()
545 key_to_hw_index(dev, lkey)); in mthca_free_region()
547 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey)); in mthca_free_region()
550 void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr) in mthca_free_mr() argument
554 err = mthca_HW2SW_MPT(dev, NULL, in mthca_free_mr()
555 key_to_hw_index(dev, mr->ibmr.lkey) & in mthca_free_mr()
556 (dev->limits.num_mpts - 1)); in mthca_free_mr()
558 mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err); in mthca_free_mr()
560 mthca_free_region(dev, mr->ibmr.lkey); in mthca_free_mr()
561 mthca_free_mtt(dev, mr->mtt); in mthca_free_mr()
564 int mthca_init_mr_table(struct mthca_dev *dev) in mthca_init_mr_table() argument
569 err = mthca_alloc_init(&dev->mr_table.mpt_alloc, in mthca_init_mr_table()
570 dev->limits.num_mpts, in mthca_init_mr_table()
571 ~0, dev->limits.reserved_mrws); in mthca_init_mr_table()
575 if (!mthca_is_memfree(dev) && in mthca_init_mr_table()
576 (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) in mthca_init_mr_table()
577 dev->limits.fmr_reserved_mtts = 0; in mthca_init_mr_table()
579 dev->mthca_flags |= MTHCA_FLAG_FMR; in mthca_init_mr_table()
581 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) in mthca_init_mr_table()
582 mthca_dbg(dev, "Memory key throughput optimization activated.\n"); in mthca_init_mr_table()
584 err = mthca_buddy_init(&dev->mr_table.mtt_buddy, in mthca_init_mr_table()
585 fls(dev->limits.num_mtt_segs - 1)); in mthca_init_mr_table()
590 dev->mr_table.tavor_fmr.mpt_base = NULL; in mthca_init_mr_table()
591 dev->mr_table.tavor_fmr.mtt_base = NULL; in mthca_init_mr_table()
593 if (dev->limits.fmr_reserved_mtts) { in mthca_init_mr_table()
594 i = fls(dev->limits.fmr_reserved_mtts - 1); in mthca_init_mr_table()
597 mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n"); in mthca_init_mr_table()
603 mtts = dev->limits.num_mtt_segs; in mthca_init_mr_table()
604 mpts = dev->limits.num_mpts; in mthca_init_mr_table()
607 if (!mthca_is_memfree(dev) && in mthca_init_mr_table()
608 (dev->mthca_flags & MTHCA_FLAG_FMR)) { in mthca_init_mr_table()
610 addr = pci_resource_start(dev->pdev, 4) + in mthca_init_mr_table()
611 ((pci_resource_len(dev->pdev, 4) - 1) & in mthca_init_mr_table()
612 dev->mr_table.mpt_base); in mthca_init_mr_table()
614 dev->mr_table.tavor_fmr.mpt_base = in mthca_init_mr_table()
617 if (!dev->mr_table.tavor_fmr.mpt_base) { in mthca_init_mr_table()
618 mthca_warn(dev, "MPT ioremap for FMR failed.\n"); in mthca_init_mr_table()
623 addr = pci_resource_start(dev->pdev, 4) + in mthca_init_mr_table()
624 ((pci_resource_len(dev->pdev, 4) - 1) & in mthca_init_mr_table()
625 dev->mr_table.mtt_base); in mthca_init_mr_table()
627 dev->mr_table.tavor_fmr.mtt_base = in mthca_init_mr_table()
628 ioremap(addr, mtts * dev->limits.mtt_seg_size); in mthca_init_mr_table()
629 if (!dev->mr_table.tavor_fmr.mtt_base) { in mthca_init_mr_table()
630 mthca_warn(dev, "MTT ioremap for FMR failed.\n"); in mthca_init_mr_table()
636 if (dev->limits.fmr_reserved_mtts) { in mthca_init_mr_table()
637 err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, fls(mtts - 1)); in mthca_init_mr_table()
642 err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, fls(mtts - 1)); in mthca_init_mr_table()
646 dev->mr_table.fmr_mtt_buddy = in mthca_init_mr_table()
647 &dev->mr_table.tavor_fmr.mtt_buddy; in mthca_init_mr_table()
649 dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy; in mthca_init_mr_table()
652 if (dev->limits.reserved_mtts) { in mthca_init_mr_table()
653 i = fls(dev->limits.reserved_mtts - 1); in mthca_init_mr_table()
655 if (mthca_alloc_mtt_range(dev, i, in mthca_init_mr_table()
656 dev->mr_table.fmr_mtt_buddy) == -1) { in mthca_init_mr_table()
657 mthca_warn(dev, "MTT table of order %d is too small.\n", in mthca_init_mr_table()
658 dev->mr_table.fmr_mtt_buddy->max_order); in mthca_init_mr_table()
668 if (dev->limits.fmr_reserved_mtts) in mthca_init_mr_table()
669 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy); in mthca_init_mr_table()
672 if (dev->mr_table.tavor_fmr.mtt_base) in mthca_init_mr_table()
673 iounmap(dev->mr_table.tavor_fmr.mtt_base); in mthca_init_mr_table()
676 if (dev->mr_table.tavor_fmr.mpt_base) in mthca_init_mr_table()
677 iounmap(dev->mr_table.tavor_fmr.mpt_base); in mthca_init_mr_table()
680 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy); in mthca_init_mr_table()
683 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc); in mthca_init_mr_table()
688 void mthca_cleanup_mr_table(struct mthca_dev *dev) in mthca_cleanup_mr_table() argument
691 if (dev->limits.fmr_reserved_mtts) in mthca_cleanup_mr_table()
692 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy); in mthca_cleanup_mr_table()
694 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy); in mthca_cleanup_mr_table()
696 if (dev->mr_table.tavor_fmr.mtt_base) in mthca_cleanup_mr_table()
697 iounmap(dev->mr_table.tavor_fmr.mtt_base); in mthca_cleanup_mr_table()
698 if (dev->mr_table.tavor_fmr.mpt_base) in mthca_cleanup_mr_table()
699 iounmap(dev->mr_table.tavor_fmr.mpt_base); in mthca_cleanup_mr_table()
701 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc); in mthca_cleanup_mr_table()