Lines Matching refs:dd

21 void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)  in __cm_reset()  argument
23 write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK); in __cm_reset()
26 sendctrl = read_csr(dd, SEND_CTRL); in __cm_reset()
33 void pio_send_control(struct hfi1_devdata *dd, int op) in pio_send_control() argument
41 spin_lock_irqsave(&dd->sendctrl_lock, flags); in pio_send_control()
43 reg = read_csr(dd, SEND_CTRL); in pio_send_control()
50 for (i = 0; i < ARRAY_SIZE(dd->vld); i++) in pio_send_control()
51 if (!dd->vld[i].mtu) in pio_send_control()
68 __cm_reset(dd, reg); in pio_send_control()
76 dd_dev_err(dd, "%s: invalid control %d\n", __func__, op); in pio_send_control()
81 write_csr(dd, SEND_CTRL, reg); in pio_send_control()
83 (void)read_csr(dd, SEND_CTRL); /* flush write */ in pio_send_control()
86 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in pio_send_control()
181 int init_sc_pools_and_sizes(struct hfi1_devdata *dd) in init_sc_pools_and_sizes() argument
184 int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1; in init_sc_pools_and_sizes()
233 dd, in init_sc_pools_and_sizes()
246 dd, in init_sc_pools_and_sizes()
254 dd, in init_sc_pools_and_sizes()
263 dd, in init_sc_pools_and_sizes()
289 count = dd->n_krcv_queues; in init_sc_pools_and_sizes()
293 count = dd->num_rcv_contexts - dd->n_krcv_queues; in init_sc_pools_and_sizes()
296 dd, in init_sc_pools_and_sizes()
301 if (total_contexts + count > chip_send_contexts(dd)) in init_sc_pools_and_sizes()
302 count = chip_send_contexts(dd) - total_contexts; in init_sc_pools_and_sizes()
319 dd, in init_sc_pools_and_sizes()
325 dd->sc_sizes[i].count = count; in init_sc_pools_and_sizes()
326 dd->sc_sizes[i].size = size; in init_sc_pools_and_sizes()
330 dd, in init_sc_pools_and_sizes()
340 dd, in init_sc_pools_and_sizes()
357 dd, in init_sc_pools_and_sizes()
366 dd, in init_sc_pools_and_sizes()
378 if (dd->sc_sizes[i].size < 0) { in init_sc_pools_and_sizes()
379 unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size); in init_sc_pools_and_sizes()
382 dd->sc_sizes[i].size = mem_pool_info[pool].size; in init_sc_pools_and_sizes()
386 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS) in init_sc_pools_and_sizes()
387 dd->sc_sizes[i].size = PIO_MAX_BLOCKS; in init_sc_pools_and_sizes()
390 used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count; in init_sc_pools_and_sizes()
394 dd_dev_info(dd, "unused send context blocks: %d\n", extra); in init_sc_pools_and_sizes()
399 int init_send_contexts(struct hfi1_devdata *dd) in init_send_contexts() argument
404 ret = init_credit_return(dd); in init_send_contexts()
408 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8), in init_send_contexts()
410 dd->send_contexts = kcalloc(dd->num_send_contexts, in init_send_contexts()
413 if (!dd->send_contexts || !dd->hw_to_sw) { in init_send_contexts()
414 kfree(dd->hw_to_sw); in init_send_contexts()
415 kfree(dd->send_contexts); in init_send_contexts()
416 free_credit_return(dd); in init_send_contexts()
422 dd->hw_to_sw[i] = INVALID_SCI; in init_send_contexts()
431 struct sc_config_sizes *scs = &dd->sc_sizes[i]; in init_send_contexts()
435 &dd->send_contexts[context]; in init_send_contexts()
453 static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index, in sc_hw_alloc() argument
460 for (index = 0, sci = &dd->send_contexts[0]; in sc_hw_alloc()
461 index < dd->num_send_contexts; index++, sci++) { in sc_hw_alloc()
465 context = chip_send_contexts(dd) - index - 1; in sc_hw_alloc()
466 dd->hw_to_sw[context] = index; in sc_hw_alloc()
472 dd_dev_err(dd, "Unable to locate a free type %d send context\n", type); in sc_hw_alloc()
481 static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context) in sc_hw_free() argument
485 sci = &dd->send_contexts[sw_index]; in sc_hw_free()
487 dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n", in sc_hw_free()
491 dd->hw_to_sw[hw_context] = INVALID_SCI; in sc_hw_free()
524 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; in cr_group_addresses()
526 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; in cr_group_addresses()
603 write_kctxt_csr(sc->dd, sc->hw_context, in sc_set_cr_threshold()
623 struct hfi1_devdata *dd = sc->dd; in set_pio_integrity() local
627 write_kctxt_csr(dd, hw_context, in set_pio_integrity()
629 hfi1_pkt_default_send_ctxt_mask(dd, type)); in set_pio_integrity()
654 struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, in sc_alloc() argument
669 if (dd->flags & HFI1_FROZEN) in sc_alloc()
679 dd_dev_err(dd, in sc_alloc()
685 spin_lock_irqsave(&dd->sc_lock, flags); in sc_alloc()
686 ret = sc_hw_alloc(dd, type, &sw_index, &hw_context); in sc_alloc()
688 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_alloc()
694 sci = &dd->send_contexts[sw_index]; in sc_alloc()
697 sc->dd = dd; in sc_alloc()
720 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) in sc_alloc()
728 write_kctxt_csr(dd, hw_context, SC(CTRL), reg); in sc_alloc()
733 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1); in sc_alloc()
736 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), in sc_alloc()
751 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), in sc_alloc()
757 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg); in sc_alloc()
789 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg); in sc_alloc()
794 write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg); in sc_alloc()
797 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_alloc()
838 struct hfi1_devdata *dd; in sc_free() local
847 dd = sc->dd; in sc_free()
849 dd_dev_err(dd, "piowait list not empty!\n"); in sc_free()
855 spin_lock_irqsave(&dd->sc_lock, flags); in sc_free()
856 dd->send_contexts[sw_index].sc = NULL; in sc_free()
859 write_kctxt_csr(dd, hw_context, SC(CTRL), 0); in sc_free()
860 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0); in sc_free()
861 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0); in sc_free()
862 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0); in sc_free()
863 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0); in sc_free()
864 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0); in sc_free()
865 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0); in sc_free()
868 sc_hw_free(dd, sw_index, hw_context); in sc_free()
869 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_free()
888 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); in sc_disable()
892 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); in sc_disable()
949 static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context) in is_sc_halted() argument
951 return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) & in is_sc_halted()
971 struct hfi1_devdata *dd = sc->dd; in sc_wait_for_packet_egress() local
978 reg = read_csr(dd, sc->hw_context * 8 + in sc_wait_for_packet_egress()
982 is_sc_halted(dd, sc->hw_context) || egress_halted(reg)) in sc_wait_for_packet_egress()
992 dd_dev_err(dd, in sc_wait_for_packet_egress()
996 queue_work(dd->pport->link_wq, in sc_wait_for_packet_egress()
997 &dd->pport->link_bounce_work); in sc_wait_for_packet_egress()
1006 pause_for_credit_return(dd); in sc_wait_for_packet_egress()
1009 void sc_wait(struct hfi1_devdata *dd) in sc_wait() argument
1013 for (i = 0; i < dd->num_send_contexts; i++) { in sc_wait()
1014 struct send_context *sc = dd->send_contexts[i].sc; in sc_wait()
1033 struct hfi1_devdata *dd = sc->dd; in sc_restart() local
1042 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, in sc_restart()
1053 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS)); in sc_restart()
1057 dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n", in sc_restart()
1083 dd_dev_err(dd, in sc_restart()
1118 void pio_freeze(struct hfi1_devdata *dd) in pio_freeze() argument
1123 for (i = 0; i < dd->num_send_contexts; i++) { in pio_freeze()
1124 sc = dd->send_contexts[i].sc; in pio_freeze()
1145 void pio_kernel_unfreeze(struct hfi1_devdata *dd) in pio_kernel_unfreeze() argument
1150 for (i = 0; i < dd->num_send_contexts; i++) { in pio_kernel_unfreeze()
1151 sc = dd->send_contexts[i].sc; in pio_kernel_unfreeze()
1173 void pio_kernel_linkup(struct hfi1_devdata *dd) in pio_kernel_linkup() argument
1178 for (i = 0; i < dd->num_send_contexts; i++) { in pio_kernel_linkup()
1179 sc = dd->send_contexts[i].sc; in pio_kernel_linkup()
1193 static int pio_init_wait_progress(struct hfi1_devdata *dd) in pio_init_wait_progress() argument
1199 max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5; in pio_init_wait_progress()
1201 reg = read_csr(dd, SEND_PIO_INIT_CTXT); in pio_init_wait_progress()
1217 void pio_reset_all(struct hfi1_devdata *dd) in pio_reset_all() argument
1222 ret = pio_init_wait_progress(dd); in pio_reset_all()
1226 write_csr(dd, SEND_PIO_ERR_CLEAR, in pio_reset_all()
1231 write_csr(dd, SEND_PIO_INIT_CTXT, in pio_reset_all()
1234 ret = pio_init_wait_progress(dd); in pio_reset_all()
1236 dd_dev_err(dd, in pio_reset_all()
1246 struct hfi1_devdata *dd; in sc_enable() local
1252 dd = sc->dd; in sc_enable()
1262 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1285 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); in sc_enable()
1287 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg); in sc_enable()
1293 spin_lock(&dd->sc_init_lock); in sc_enable()
1304 write_csr(dd, SEND_PIO_INIT_CTXT, pio); in sc_enable()
1310 ret = pio_init_wait_progress(dd); in sc_enable()
1311 spin_unlock(&dd->sc_init_lock); in sc_enable()
1313 dd_dev_err(dd, in sc_enable()
1323 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl); in sc_enable()
1328 read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1344 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), in sc_return_credits()
1350 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE)); in sc_return_credits()
1352 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0); in sc_return_credits()
1370 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", in sc_drop()
1518 write_kctxt_csr(sc->dd, sc->hw_context, in sc_add_credit_return_intr()
1540 write_kctxt_csr(sc->dd, sc->hw_context, in sc_del_credit_return_intr()
1571 struct hfi1_devdata *dd = sc->dd; in sc_piobufavail() local
1579 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL && in sc_piobufavail()
1580 dd->send_contexts[sc->sw_index].type != SC_VL15) in sc_piobufavail()
1718 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context) in sc_group_release_update() argument
1724 spin_lock(&dd->sc_lock); in sc_group_release_update()
1725 sw_index = dd->hw_to_sw[hw_context]; in sc_group_release_update()
1726 if (unlikely(sw_index >= dd->num_send_contexts)) { in sc_group_release_update()
1727 dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n", in sc_group_release_update()
1731 sc = dd->send_contexts[sw_index].sc; in sc_group_release_update()
1738 sw_index = dd->hw_to_sw[gc]; in sc_group_release_update()
1739 if (unlikely(sw_index >= dd->num_send_contexts)) { in sc_group_release_update()
1740 dd_dev_err(dd, in sc_group_release_update()
1745 sc_release_update(dd->send_contexts[sw_index].sc); in sc_group_release_update()
1748 spin_unlock(&dd->sc_lock); in sc_group_release_update()
1760 struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd, in pio_select_send_context_vl() argument
1778 m = rcu_dereference(dd->pio_map); in pio_select_send_context_vl()
1781 return dd->vld[0].sc; in pio_select_send_context_vl()
1788 rval = !rval ? dd->vld[0].sc : rval; in pio_select_send_context_vl()
1800 struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd, in pio_select_send_context_sc() argument
1803 u8 vl = sc_to_vlt(dd, sc5); in pio_select_send_context_sc()
1805 return pio_select_send_context_vl(dd, selector, vl); in pio_select_send_context_sc()
1833 static void set_threshold(struct hfi1_devdata *dd, int scontext, int i) in set_threshold() argument
1837 thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], in set_threshold()
1839 sc_mtu_to_threshold(dd->kernel_send_context[scontext], in set_threshold()
1840 dd->vld[i].mtu, in set_threshold()
1841 dd->rcd[0]->rcvhdrqentsize)); in set_threshold()
1842 sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); in set_threshold()
1873 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) in pio_map_init() argument
1883 for (i = 0; i < dd->num_send_contexts; i++) in pio_map_init()
1884 if (dd->send_contexts[i].type == SC_KERNEL) in pio_map_init()
1922 if (dd->kernel_send_context[scontext]) { in pio_map_init()
1924 dd->kernel_send_context[scontext]; in pio_map_init()
1925 set_threshold(dd, scontext, i); in pio_map_init()
1939 spin_lock_irq(&dd->pio_map_lock); in pio_map_init()
1940 oldmap = rcu_dereference_protected(dd->pio_map, in pio_map_init()
1941 lockdep_is_held(&dd->pio_map_lock)); in pio_map_init()
1944 rcu_assign_pointer(dd->pio_map, newmap); in pio_map_init()
1946 spin_unlock_irq(&dd->pio_map_lock); in pio_map_init()
1957 void free_pio_map(struct hfi1_devdata *dd) in free_pio_map() argument
1960 if (rcu_access_pointer(dd->pio_map)) { in free_pio_map()
1961 spin_lock_irq(&dd->pio_map_lock); in free_pio_map()
1962 pio_map_free(rcu_access_pointer(dd->pio_map)); in free_pio_map()
1963 RCU_INIT_POINTER(dd->pio_map, NULL); in free_pio_map()
1964 spin_unlock_irq(&dd->pio_map_lock); in free_pio_map()
1967 kfree(dd->kernel_send_context); in free_pio_map()
1968 dd->kernel_send_context = NULL; in free_pio_map()
1971 int init_pervl_scs(struct hfi1_devdata *dd) in init_pervl_scs() argument
1977 struct hfi1_pportdata *ppd = dd->pport; in init_pervl_scs()
1979 dd->vld[15].sc = sc_alloc(dd, SC_VL15, in init_pervl_scs()
1980 dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
1981 if (!dd->vld[15].sc) in init_pervl_scs()
1984 hfi1_init_ctxt(dd->vld[15].sc); in init_pervl_scs()
1985 dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048); in init_pervl_scs()
1987 dd->kernel_send_context = kcalloc_node(dd->num_send_contexts, in init_pervl_scs()
1989 GFP_KERNEL, dd->node); in init_pervl_scs()
1990 if (!dd->kernel_send_context) in init_pervl_scs()
1993 dd->kernel_send_context[0] = dd->vld[15].sc; in init_pervl_scs()
2003 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL, in init_pervl_scs()
2004 dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
2005 if (!dd->vld[i].sc) in init_pervl_scs()
2007 dd->kernel_send_context[i + 1] = dd->vld[i].sc; in init_pervl_scs()
2008 hfi1_init_ctxt(dd->vld[i].sc); in init_pervl_scs()
2010 dd->vld[i].mtu = hfi1_max_mtu; in init_pervl_scs()
2013 dd->kernel_send_context[i + 1] = in init_pervl_scs()
2014 sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
2015 if (!dd->kernel_send_context[i + 1]) in init_pervl_scs()
2017 hfi1_init_ctxt(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2020 sc_enable(dd->vld[15].sc); in init_pervl_scs()
2021 ctxt = dd->vld[15].sc->hw_context; in init_pervl_scs()
2023 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
2024 dd_dev_info(dd, in init_pervl_scs()
2026 dd->vld[15].sc->sw_index, ctxt); in init_pervl_scs()
2029 sc_enable(dd->vld[i].sc); in init_pervl_scs()
2030 ctxt = dd->vld[i].sc->hw_context; in init_pervl_scs()
2032 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
2035 sc_enable(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2036 ctxt = dd->kernel_send_context[i + 1]->hw_context; in init_pervl_scs()
2038 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
2041 if (pio_map_init(dd, ppd->port - 1, num_vls, NULL)) in init_pervl_scs()
2047 sc_free(dd->vld[i].sc); in init_pervl_scs()
2048 dd->vld[i].sc = NULL; in init_pervl_scs()
2052 sc_free(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2054 kfree(dd->kernel_send_context); in init_pervl_scs()
2055 dd->kernel_send_context = NULL; in init_pervl_scs()
2058 sc_free(dd->vld[15].sc); in init_pervl_scs()
2062 int init_credit_return(struct hfi1_devdata *dd) in init_credit_return() argument
2067 dd->cr_base = kcalloc( in init_credit_return()
2071 if (!dd->cr_base) { in init_credit_return()
2078 set_dev_node(&dd->pcidev->dev, i); in init_credit_return()
2079 dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev, in init_credit_return()
2081 &dd->cr_base[i].dma, in init_credit_return()
2083 if (!dd->cr_base[i].va) { in init_credit_return()
2084 set_dev_node(&dd->pcidev->dev, dd->node); in init_credit_return()
2085 dd_dev_err(dd, in init_credit_return()
2092 set_dev_node(&dd->pcidev->dev, dd->node); in init_credit_return()
2099 free_credit_return(dd); in init_credit_return()
2103 void free_credit_return(struct hfi1_devdata *dd) in free_credit_return() argument
2107 if (!dd->cr_base) in free_credit_return()
2110 if (dd->cr_base[i].va) { in free_credit_return()
2111 dma_free_coherent(&dd->pcidev->dev, in free_credit_return()
2114 dd->cr_base[i].va, in free_credit_return()
2115 dd->cr_base[i].dma); in free_credit_return()
2118 kfree(dd->cr_base); in free_credit_return()
2119 dd->cr_base = NULL; in free_credit_return()
2138 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS)); in seqfile_dump_sci()