Lines Matching refs:rcd

133 				   struct hfi1_ctxtdata *rcd,
197 p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt; in tid_rdma_opfn_init()
199 p->jkey = priv->rcd->jkey; in tid_rdma_opfn_init()
203 p->urg = is_urg_masked(priv->rcd); in tid_rdma_opfn_init()
298 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit) in hfi1_kern_exp_rcv_init() argument
305 rcd->jkey = TID_RDMA_JKEY; in hfi1_kern_exp_rcv_init()
306 hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey); in hfi1_kern_exp_rcv_init()
307 return hfi1_alloc_ctxt_rcv_groups(rcd); in hfi1_kern_exp_rcv_init()
335 return dd->rcd[ctxt]; in qp_to_rcd()
344 qpriv->rcd = qp_to_rcd(rdi, qp); in hfi1_qp_priv_init()
369 struct hfi1_devdata *dd = qpriv->rcd->dd; in hfi1_qp_priv_init()
469 static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd, in first_qp() argument
471 __must_hold(&rcd->exp_lock) in first_qp()
475 lockdep_assert_held(&rcd->exp_lock); in first_qp()
504 static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd, in kernel_tid_waiters() argument
506 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) in kernel_tid_waiters()
512 lockdep_assert_held(&rcd->exp_lock); in kernel_tid_waiters()
513 fqp = first_qp(rcd, queue); in kernel_tid_waiters()
537 static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd, in dequeue_tid_waiter() argument
539 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) in dequeue_tid_waiter()
544 lockdep_assert_held(&rcd->exp_lock); in dequeue_tid_waiter()
564 static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd, in queue_qp_for_tid_wait() argument
566 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) in queue_qp_for_tid_wait()
571 lockdep_assert_held(&rcd->exp_lock); in queue_qp_for_tid_wait()
576 rcd->dd->verbs_dev.n_tidwait++; in queue_qp_for_tid_wait()
679 spin_lock(&priv->rcd->exp_lock); in _tid_rdma_flush_wait()
686 spin_unlock(&priv->rcd->exp_lock); in _tid_rdma_flush_wait()
694 _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue); in hfi1_tid_rdma_flush_wait()
695 _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue); in hfi1_tid_rdma_flush_wait()
716 static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last) in kern_reserve_flow() argument
717 __must_hold(&rcd->exp_lock) in kern_reserve_flow()
723 !test_and_set_bit(last, &rcd->flow_mask)) in kern_reserve_flow()
726 nr = ffz(rcd->flow_mask); in kern_reserve_flow()
728 (sizeof(rcd->flow_mask) * BITS_PER_BYTE)); in kern_reserve_flow()
731 set_bit(nr, &rcd->flow_mask); in kern_reserve_flow()
735 static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation, in kern_set_hw_flow() argument
750 write_uctxt_csr(rcd->dd, rcd->ctxt, in kern_set_hw_flow()
754 static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx) in kern_setup_hw_flow() argument
755 __must_hold(&rcd->exp_lock) in kern_setup_hw_flow()
757 u32 generation = rcd->flows[flow_idx].generation; in kern_setup_hw_flow()
759 kern_set_hw_flow(rcd, generation, flow_idx); in kern_setup_hw_flow()
772 static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx) in kern_clear_hw_flow() argument
773 __must_hold(&rcd->exp_lock) in kern_clear_hw_flow()
775 rcd->flows[flow_idx].generation = in kern_clear_hw_flow()
776 kern_flow_generation_next(rcd->flows[flow_idx].generation); in kern_clear_hw_flow()
777 kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, flow_idx); in kern_clear_hw_flow()
780 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) in hfi1_kern_setup_hw_flow() argument
792 spin_lock_irqsave(&rcd->exp_lock, flags); in hfi1_kern_setup_hw_flow()
793 if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp)) in hfi1_kern_setup_hw_flow()
796 ret = kern_reserve_flow(rcd, fs->last_index); in hfi1_kern_setup_hw_flow()
804 rcd->flows[fs->index].generation = fs->generation; in hfi1_kern_setup_hw_flow()
805 fs->generation = kern_setup_hw_flow(rcd, fs->index); in hfi1_kern_setup_hw_flow()
807 dequeue_tid_waiter(rcd, &rcd->flow_queue, qp); in hfi1_kern_setup_hw_flow()
809 fqp = first_qp(rcd, &rcd->flow_queue); in hfi1_kern_setup_hw_flow()
810 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_setup_hw_flow()
815 queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp); in hfi1_kern_setup_hw_flow()
816 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_setup_hw_flow()
820 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) in hfi1_kern_clear_hw_flow() argument
829 spin_lock_irqsave(&rcd->exp_lock, flags); in hfi1_kern_clear_hw_flow()
830 kern_clear_hw_flow(rcd, fs->index); in hfi1_kern_clear_hw_flow()
831 clear_bit(fs->index, &rcd->flow_mask); in hfi1_kern_clear_hw_flow()
837 fqp = first_qp(rcd, &rcd->flow_queue); in hfi1_kern_clear_hw_flow()
838 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_clear_hw_flow()
848 void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd) in hfi1_kern_init_ctxt_generations() argument
853 rcd->flows[i].generation = mask_generation(get_random_u32()); in hfi1_kern_init_ctxt_generations()
854 kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i); in hfi1_kern_init_ctxt_generations()
1128 dd = flow->req->rcd->dd; in dma_unmap_flow()
1144 struct hfi1_devdata *dd = flow->req->rcd->dd; in dma_map_flow()
1205 struct hfi1_ctxtdata *rcd, char *s, in kern_add_tid_node() argument
1213 dd_dev_err(rcd->dd, in kern_add_tid_node()
1239 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_alloc_tids() local
1240 struct hfi1_devdata *dd = rcd->dd; in kern_alloc_tids()
1251 list_for_each_entry(group, &rcd->tid_group_list.list, list) { in kern_alloc_tids()
1252 kern_add_tid_node(flow, rcd, "complete groups", group, in kern_alloc_tids()
1265 list_for_each_entry(used, &rcd->tid_used_list.list, list) { in kern_alloc_tids()
1268 kern_add_tid_node(flow, rcd, "used groups", used, use); in kern_alloc_tids()
1280 if (group && &group->list == &rcd->tid_group_list.list) in kern_alloc_tids()
1282 group = list_prepare_entry(group, &rcd->tid_group_list.list, in kern_alloc_tids()
1284 if (list_is_last(&group->list, &rcd->tid_group_list.list)) in kern_alloc_tids()
1288 kern_add_tid_node(flow, rcd, "complete continue", group, use); in kern_alloc_tids()
1303 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_program_rcv_group() local
1304 struct hfi1_devdata *dd = rcd->dd; in kern_program_rcv_group()
1328 rcventry -= rcd->expected_base; in kern_program_rcv_group()
1356 tid_group_move(grp, &rcd->tid_used_list, in kern_program_rcv_group()
1357 &rcd->tid_full_list); in kern_program_rcv_group()
1359 tid_group_move(grp, &rcd->tid_group_list, in kern_program_rcv_group()
1360 &rcd->tid_used_list); in kern_program_rcv_group()
1370 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_unprogram_rcv_group() local
1371 struct hfi1_devdata *dd = rcd->dd; in kern_unprogram_rcv_group()
1392 tid_group_move(grp, &rcd->tid_full_list, in kern_unprogram_rcv_group()
1393 &rcd->tid_used_list); in kern_unprogram_rcv_group()
1395 tid_group_move(grp, &rcd->tid_used_list, in kern_unprogram_rcv_group()
1396 &rcd->tid_group_list); in kern_unprogram_rcv_group()
1399 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_unprogram_rcv_group() local
1400 struct hfi1_devdata *dd = rcd->dd; in kern_unprogram_rcv_group()
1466 struct hfi1_ctxtdata *rcd = req->rcd; in hfi1_kern_exp_rcv_setup() local
1494 spin_lock_irqsave(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_setup()
1495 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp)) in hfi1_kern_exp_rcv_setup()
1528 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp); in hfi1_kern_exp_rcv_setup()
1530 fqp = first_qp(rcd, &rcd->rarr_queue); in hfi1_kern_exp_rcv_setup()
1531 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_setup()
1537 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp); in hfi1_kern_exp_rcv_setup()
1538 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_setup()
1557 struct hfi1_ctxtdata *rcd = req->rcd; in hfi1_kern_exp_rcv_clear() local
1567 spin_lock_irqsave(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_clear()
1574 fqp = first_qp(rcd, &rcd->rarr_queue); in hfi1_kern_exp_rcv_clear()
1575 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_clear()
1640 req->rcd->numa_id); in hfi1_kern_exp_rcv_alloc_flows()
1670 req->rcd = qpriv->rcd; in hfi1_init_trdma_req()
1761 qpriv->rcd->ctxt); in hfi1_build_tid_rdma_read_packet()
1811 hfi1_kern_clear_hw_flow(req->rcd, qp); in hfi1_build_tid_rdma_read_req()
1845 if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp)) in hfi1_build_tid_rdma_read_req()
1990 struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd; in tid_rdma_rcv_error() local
2007 rc_defered_ack(rcd, qp); in tid_rdma_rcv_error()
2230 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv_tid_rdma_read_req() local
2335 rc_defered_ack(rcd, qp); in hfi1_rc_rcv_tid_rdma_read_req()
2454 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv_tid_rdma_read_resp() local
2477 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn); in hfi1_rc_rcv_tid_rdma_read_resp()
2536 if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd)) in hfi1_rc_rcv_tid_rdma_read_resp()
2553 hfi1_kern_clear_hw_flow(priv->rcd, qp); in hfi1_rc_rcv_tid_rdma_read_resp()
2599 hfi1_kern_clear_hw_flow(priv->rcd, qp); in hfi1_kern_read_tid_flow_free()
2629 static void restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd, in restart_tid_rdma_read_req() argument
2643 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); in restart_tid_rdma_read_req()
2655 static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, in handle_read_kdeth_eflags() argument
2660 struct hfi1_pportdata *ppd = rcd->ppd; in handle_read_kdeth_eflags()
2707 restart_tid_rdma_read_req(rcd, qp, in handle_read_kdeth_eflags()
2717 &rcd->qp_wait_list); in handle_read_kdeth_eflags()
2798 last_psn = read_r_next_psn(dd, rcd->ctxt, in handle_read_kdeth_eflags()
2807 restart_tid_rdma_read_req(rcd, qp, in handle_read_kdeth_eflags()
2845 bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, in hfi1_handle_kdeth_eflags() argument
2876 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); in hfi1_handle_kdeth_eflags()
2924 ret = handle_read_kdeth_eflags(rcd, packet, rcv_type, rte, psn, in hfi1_handle_kdeth_eflags()
2960 read_r_next_psn(dd, rcd->ctxt, in hfi1_handle_kdeth_eflags()
3178 hfi1_kern_clear_hw_flow(qpriv->rcd, qp); in hfi1_qp_kern_exp_rcv_clear_all()
3289 struct hfi1_pportdata *ppd = qpriv->rcd->ppd; in setup_tid_rdma_wqe()
3433 bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8; in hfi1_compute_tid_rnr_timeout()
3468 struct hfi1_ctxtdata *rcd = qpriv->rcd; in hfi1_tid_write_alloc_resources() local
3500 hfi1_kern_clear_hw_flow(rcd, qp); in hfi1_tid_write_alloc_resources()
3526 hfi1_kern_clear_hw_flow(rcd, qp); in hfi1_tid_write_alloc_resources()
3533 ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp); in hfi1_tid_write_alloc_resources()
3537 &rcd->flow_queue); in hfi1_tid_write_alloc_resources()
3571 to_seg = position_in_queue(qpriv, &rcd->rarr_queue); in hfi1_tid_write_alloc_resources()
3641 rc_defered_ack(rcd, qp); in hfi1_tid_write_alloc_resources()
3659 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv_tid_rdma_write_req() local
3826 rc_defered_ack(rcd, qp); in hfi1_rc_rcv_tid_rdma_write_req()
3928 qpriv->rcd->ctxt); in hfi1_build_tid_rdma_write_resp()
4003 hfi1_kern_clear_hw_flow(qpriv->rcd, qp); in hfi1_tid_timeout()
4043 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv_tid_rdma_write_resp() local
4101 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) in hfi1_rc_rcv_tid_rdma_write_resp()
4273 struct hfi1_ctxtdata *rcd = priv->rcd; in hfi1_rc_rcv_tid_rdma_write_data() local
4297 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn); in hfi1_rc_rcv_tid_rdma_write_data()
4349 rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK; in hfi1_rc_rcv_tid_rdma_write_data()
4462 qpriv->rcd->ctxt); in hfi1_build_tid_rdma_write_ack()
4874 struct hfi1_ctxtdata *rcd = qpriv->rcd; in hfi1_rc_rcv_tid_rdma_resync() local
4903 spin_lock(&rcd->exp_lock); in hfi1_rc_rcv_tid_rdma_resync()
4912 rcd->flows[fs->index].generation = generation; in hfi1_rc_rcv_tid_rdma_resync()
4913 fs->generation = kern_setup_hw_flow(rcd, fs->index); in hfi1_rc_rcv_tid_rdma_resync()
4968 spin_unlock(&rcd->exp_lock); in hfi1_rc_rcv_tid_rdma_resync()
5517 struct hfi1_ctxtdata *rcd, in update_r_next_psn_fecn() argument
5527 struct hfi1_devdata *dd = rcd->dd; in update_r_next_psn_fecn()
5530 read_r_next_psn(dd, rcd->ctxt, flow->idx); in update_r_next_psn_fecn()