/openbmc/linux/drivers/infiniband/hw/hfi1/ |
H A D | init.c | 179 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free() 182 hfi1_free_ctxtdata(rcd->dd, rcd); in hfi1_rcd_free() 238 dd->rcd[ctxt] = rcd; in allocate_rcd_index() 291 rcd = dd->rcd[ctxt]; in hfi1_rcd_get_by_index() 337 rcd->do_interrupt = rcd->slow_handler; in hfi1_create_ctxtdata() 404 rcd->ctxt, rcd->egrbufs.count); in hfi1_create_ctxtdata() 436 rcd->ctxt, rcd->egrbufs.size); in hfi1_create_ctxtdata() 1095 rcd->rcvhdrq, rcd->rcvhdrq_dma); in hfi1_free_ctxtdata() 1788 set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize, in hfi1_create_rcvhdrq() 1924 rcd->ctxt, rcd->egrbufs.alloced, in hfi1_setup_eagerbufs() [all …]
|
H A D | aspm.c | 172 struct hfi1_ctxtdata *rcd = from_timer(rcd, t, aspm_timer); in aspm_ctx_timer_function() local 176 aspm_enable_dec(rcd->dd); in aspm_ctx_timer_function() 193 if (rcd) { in aspm_disable_all() 198 hfi1_rcd_put(rcd); in aspm_disable_all() 220 if (rcd) { in aspm_enable_all() 225 hfi1_rcd_put(rcd); in aspm_enable_all() 234 rcd->aspm_intr_supported = rcd->dd->aspm_supported && in aspm_ctx_init() 236 rcd->ctxt < rcd->dd->first_dyn_alloc_ctxt; in aspm_ctx_init() 249 if (rcd) in aspm_init() 250 aspm_ctx_init(rcd); in aspm_init() [all …]
|
H A D | exp_rcv.c | 26 hfi1_exp_tid_set_init(&rcd->tid_used_list); in hfi1_exp_tid_group_init() 36 struct hfi1_devdata *dd = rcd->dd; in hfi1_alloc_ctxt_rcv_groups() 43 rcd->groups = in hfi1_alloc_ctxt_rcv_groups() 45 GFP_KERNEL, rcd->numa_id); in hfi1_alloc_ctxt_rcv_groups() 46 if (!rcd->groups) in hfi1_alloc_ctxt_rcv_groups() 48 tidbase = rcd->expected_base; in hfi1_alloc_ctxt_rcv_groups() 50 grp = &rcd->groups[i]; in hfi1_alloc_ctxt_rcv_groups() 73 kfree(rcd->groups); in hfi1_free_ctxt_rcv_groups() 74 rcd->groups = NULL; in hfi1_free_ctxt_rcv_groups() 75 hfi1_exp_tid_group_init(rcd); in hfi1_free_ctxt_rcv_groups() [all …]
|
H A D | driver.c | 373 packet->rcd = rcd; in init_packet() 504 struct hfi1_ctxtdata *rcd = packet->rcd; in init_ps_mdata() local 506 mdata->rcd = rcd; in init_ps_mdata() 572 struct hfi1_ctxtdata *rcd = packet->rcd; in __prescan_rxq() local 638 struct hfi1_ctxtdata *rcd = packet->rcd; in process_rcv_qp_work() local 942 rcd->do_interrupt = rcd->slow_handler; in set_all_slowpath() 1534 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_setup_bypass_packet() local 1607 struct hfi1_ctxtdata *rcd = packet->rcd; in show_eflags_errs() local 1625 struct hfi1_ctxtdata *rcd = packet->rcd; in handle_eflags() local 1627 rcv_hdrerr(rcd, rcd->ppd, packet); in handle_eflags() [all …]
|
H A D | msix.c | 131 int nr = msix_request_irq(rcd->dd, rcd, handler, thread, in msix_request_rcd_irq_common() 132 rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT, in msix_request_rcd_irq_common() 141 rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64; in msix_request_rcd_irq_common() 142 rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64); in msix_request_rcd_irq_common() 143 rcd->msix_intr = nr; in msix_request_rcd_irq_common() 144 remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr); in msix_request_rcd_irq_common() 159 rcd->dd->unit, rcd->ctxt); in msix_request_rcd_irq() 175 rcd->dd->unit, rcd->ctxt); in msix_netdev_request_rcd_irq() 270 if (rcd) in msix_request_irqs() 271 ret = msix_request_rcd_irq(rcd); in msix_request_irqs() [all …]
|
H A D | hfi.h | 1465 return rcd->head; in hfi1_rcd_head() 1475 rcd->head = head; in hfi1_set_rcd_head() 1481 return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset; in get_rhf_addr() 1511 return rcd->seq_cnt; in hfi1_seq_cnt() 1522 rcd->seq_cnt = cnt; in hfi1_set_seq_cnt() 1546 rcd->seq_cnt = hfi1_seq_incr_wrap(rcd->seq_cnt); in hfi1_seq_incr() 1574 return rcd->do_interrupt == rcd->slow_handler; in hfi1_is_slowpath() 1586 return rcd->do_interrupt == rcd->fast_handler; in hfi1_is_fastpath() 1595 if (unlikely(!rcd)) in hfi1_set_fast() 1598 rcd->do_interrupt = rcd->fast_handler; in hfi1_set_fast() [all …]
|
H A D | netdev_rx.c | 209 hfi1_rcd_get(rxq->rcd); in hfi1_netdev_rxq_init() 213 i, rxq->rcd->ctxt); in hfi1_netdev_rxq_init() 232 if (rxq->rcd) { in hfi1_netdev_rxq_init() 234 hfi1_rcd_put(rxq->rcd); in hfi1_netdev_rxq_init() 235 rxq->rcd = NULL; in hfi1_netdev_rxq_init() 254 hfi1_rcd_put(rxq->rcd); in hfi1_netdev_rxq_deinit() 255 rxq->rcd = NULL; in hfi1_netdev_rxq_deinit() 271 rxq->rcd->ctxt); in enable_queues() 275 rxq->rcd); in enable_queues() 289 rxq->rcd->ctxt); in disable_queues() [all …]
|
H A D | tid_rdma.c | 306 hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey); in hfi1_kern_exp_rcv_init() 750 write_uctxt_csr(rcd->dd, rcd->ctxt, in kern_set_hw_flow() 809 fqp = first_qp(rcd, &rcd->flow_queue); in hfi1_kern_setup_hw_flow() 837 fqp = first_qp(rcd, &rcd->flow_queue); in hfi1_kern_clear_hw_flow() 1466 struct hfi1_ctxtdata *rcd = req->rcd; in hfi1_kern_exp_rcv_setup() local 1530 fqp = first_qp(rcd, &rcd->rarr_queue); in hfi1_kern_exp_rcv_setup() 1557 struct hfi1_ctxtdata *rcd = req->rcd; in hfi1_kern_exp_rcv_clear() local 1574 fqp = first_qp(rcd, &rcd->rarr_queue); in hfi1_kern_exp_rcv_clear() 1670 req->rcd = qpriv->rcd; in hfi1_init_trdma_req() 3468 struct hfi1_ctxtdata *rcd = qpriv->rcd; in hfi1_tid_write_alloc_resources() local [all …]
|
H A D | intr.c | 202 void handle_user_interrupt(struct hfi1_ctxtdata *rcd) in handle_user_interrupt() argument 204 struct hfi1_devdata *dd = rcd->dd; in handle_user_interrupt() 208 if (bitmap_empty(rcd->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) in handle_user_interrupt() 211 if (test_and_clear_bit(HFI1_CTXT_WAITING_RCV, &rcd->event_flags)) { in handle_user_interrupt() 212 wake_up_interruptible(&rcd->wait); in handle_user_interrupt() 213 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd); in handle_user_interrupt() 215 &rcd->event_flags)) { in handle_user_interrupt() 216 rcd->urgent++; in handle_user_interrupt() 217 wake_up_interruptible(&rcd->wait); in handle_user_interrupt()
|
H A D | trace_rx.h | 27 TP_STRUCT__entry(DD_DEV_ENTRY(packet->rcd->dd) 36 TP_fast_assign(DD_DEV_ASSIGN(packet->rcd->dd); 38 __entry->ctxt = packet->rcd->ctxt; 59 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd), 60 TP_ARGS(dd, rcd), 67 __entry->ctxt = rcd->ctxt; 68 __entry->slow_path = hfi1_is_slowpath(rcd); 69 __entry->dma_rtail = get_dma_rtail_setting(rcd);
|
H A D | aspm.h | 22 void __aspm_ctx_disable(struct hfi1_ctxtdata *rcd); 26 static inline void aspm_ctx_disable(struct hfi1_ctxtdata *rcd) in aspm_ctx_disable() argument 29 if (likely(!rcd->aspm_intr_supported)) in aspm_ctx_disable() 32 __aspm_ctx_disable(rcd); in aspm_ctx_disable()
|
H A D | exp_rcv.h | 151 hfi1_tid_group_to_idx(struct hfi1_ctxtdata *rcd, struct tid_group *grp) in hfi1_tid_group_to_idx() argument 153 return grp - &rcd->groups[0]; in hfi1_tid_group_to_idx() 162 hfi1_idx_to_tid_group(struct hfi1_ctxtdata *rcd, u16 idx) in hfi1_idx_to_tid_group() argument 164 return &rcd->groups[idx]; in hfi1_idx_to_tid_group() 167 int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd); 168 void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd); 169 void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd);
|
H A D | rc.h | 25 static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd, in rc_defered_ack() argument 31 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); in rc_defered_ack() 55 struct hfi1_ctxtdata *rcd);
|
H A D | fault.c | 50 struct hfi1_ctxtdata *rcd; in _fault_stats_seq_show() local 53 rcd = hfi1_rcd_get_by_index(dd, j); in _fault_stats_seq_show() 54 if (rcd) { in _fault_stats_seq_show() 55 n_packets += rcd->opstats->stats[i].n_packets; in _fault_stats_seq_show() 56 n_bytes += rcd->opstats->stats[i].n_bytes; in _fault_stats_seq_show() 58 hfi1_rcd_put(rcd); in _fault_stats_seq_show() 321 struct hfi1_ibdev *ibd = &packet->rcd->dd->verbs_dev; in hfi1_dbg_should_fault_rx()
|
H A D | msix.h | 16 int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd); 22 int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd);
|
/openbmc/linux/drivers/infiniband/hw/qib/ |
H A D | qib_file_ops.c | 1029 (rcd->piocnt % rcd->subctxt_cnt); in qib_mmapf() 1090 if (rcd->urgent != rcd->urgent_poll) { in qib_poll_urgent() 1092 rcd->urgent_poll = rcd->urgent; in qib_poll_urgent() 1492 if (!rcd || !rcd->cnt) in find_shared_ctxt() 1500 rcd->cnt >= rcd->subctxt_cnt) { in find_shared_ctxt() 1564 struct qib_ctxtdata *rcd = fd->rcd; in do_qib_user_sdma_queue_create() local 1669 rcd->pio_base = rcd->piocnt * uctxt; in qib_do_user_init() 1672 rcd->pio_base = rcd->piocnt * uctxt + in qib_do_user_init() 1692 rcd->ctxt, rcd->piocnt); in qib_do_user_init() 1801 rcd = fd->rcd; in qib_close() [all …]
|
H A D | qib_init.c | 172 if (rcd) { in qib_create_ctxtdata() 179 dd->rcd[ctxt] = rcd; in qib_create_ctxtdata() 182 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), in qib_create_ctxtdata() 208 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + in qib_create_ctxtdata() 487 struct qib_ctxtdata *rcd = dd->rcd[i]; in enable_chip() local 489 if (rcd) in enable_chip() 674 rcd = dd->rcd[i]; in qib_init() 907 if (!rcd) in qib_free_ctxtdata() 912 rcd->rcvhdrq, rcd->rcvhdrq_phys); in qib_free_ctxtdata() 1587 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size); in qib_create_rcvhdrq() [all …]
|
H A D | qib_tx.c | 86 last = rcd->pio_base + rcd->piocnt; in qib_disarm_piobufs_ifneeded() 92 if (rcd->user_event_mask) { in qib_disarm_piobufs_ifneeded() 131 struct qib_ctxtdata *rcd; in find_ctxt() local 137 rcd = dd->rcd[ctxt]; in find_ctxt() 138 if (!rcd || bufn < rcd->pio_base || in find_ctxt() 139 bufn >= rcd->pio_base + rcd->piocnt) in find_ctxt() 454 struct qib_ctxtdata *rcd; in qib_cancel_sends() local 470 rcd = dd->rcd[ctxt]; in qib_cancel_sends() 471 if (rcd && rcd->ppd == ppd) { in qib_cancel_sends() 472 last = rcd->pio_base + rcd->piocnt; in qib_cancel_sends() [all …]
|
H A D | qib_intr.c | 191 struct qib_ctxtdata *rcd; in qib_handle_urcv() local 196 for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) { in qib_handle_urcv() 199 rcd = dd->rcd[i]; in qib_handle_urcv() 200 if (!rcd || !rcd->cnt) in qib_handle_urcv() 203 if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) { in qib_handle_urcv() 204 wake_up_interruptible(&rcd->wait); in qib_handle_urcv() 205 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS, in qib_handle_urcv() 206 rcd->ctxt); in qib_handle_urcv() 208 &rcd->flag)) { in qib_handle_urcv() 209 rcd->urgent++; in qib_handle_urcv() [all …]
|
H A D | qib_driver.c | 285 return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift); in qib_get_egrbuf() 406 &rcd->qp_wait_list); in qib_rcv_hdrerr() 456 l = rcd->head; in qib_kreceive() 461 if (seq != rcd->seq_cnt) in qib_kreceive() 509 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l, in qib_kreceive() 529 if (++rcd->seq_cnt > 13) in qib_kreceive() 530 rcd->seq_cnt = 1; in qib_kreceive() 531 if (seq != rcd->seq_cnt) in qib_kreceive() 548 rcd->head = l; in qib_kreceive() 763 if (dd->rcd) in qib_reset_device() [all …]
|
H A D | qib_debugfs.c | 103 if (!dd->rcd[j]) in _opcode_stats_seq_show() 105 n_packets += dd->rcd[j]->opstats->stats[i].n_packets; in _opcode_stats_seq_show() 106 n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes; in _opcode_stats_seq_show() 166 if (!dd->rcd[i]) in _ctx_stats_seq_show() 169 for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++) in _ctx_stats_seq_show() 170 n_packets += dd->rcd[i]->opstats->stats[j].n_packets; in _ctx_stats_seq_show()
|
/openbmc/linux/arch/x86/kernel/cpu/mce/ |
H A D | apei.c | 146 struct cper_mce_record rcd; in apei_write_mce() local 148 memset(&rcd, 0, sizeof(rcd)); in apei_write_mce() 152 rcd.hdr.section_count = 1; in apei_write_mce() 155 rcd.hdr.validation_bits = 0; in apei_write_mce() 156 rcd.hdr.record_length = sizeof(rcd); in apei_write_mce() 162 rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd; in apei_write_mce() 163 rcd.sec_hdr.section_length = sizeof(rcd.mce); in apei_write_mce() 166 rcd.sec_hdr.validation_bits = 0; in apei_write_mce() 173 return erst_write(&rcd.hdr); in apei_write_mce() 178 struct cper_mce_record rcd; in apei_read_mce() local [all …]
|
/openbmc/linux/drivers/acpi/apei/ |
H A D | erst.c | 1041 struct cper_pstore_record *rcd; in erst_reader() local 1048 if (!rcd) { in erst_reader() 1063 len = erst_read_record(record_id, &rcd->hdr, rcd_len, sizeof(*rcd), in erst_reader() 1076 memcpy(record->buf, rcd->data, len - sizeof(*rcd)); in erst_reader() 1097 kfree(rcd); in erst_reader() 1107 memset(rcd, 0, sizeof(*rcd)); in erst_writer() 1111 rcd->hdr.section_count = 1; in erst_writer() 1116 rcd->hdr.record_length = sizeof(*rcd) + record->size; in erst_writer() 1122 rcd->sec_hdr.section_offset = sizeof(*rcd); in erst_writer() 1143 ret = erst_write(&rcd->hdr); in erst_writer() [all …]
|
/openbmc/linux/drivers/net/vmxnet3/ |
H A D | vmxnet3_drv.c | 332 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd) argument 1387 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && in vmxnet3_rx_csum() 1400 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && in vmxnet3_rx_csum() 1425 if (!rcd->fcs) in vmxnet3_rx_error() 1545 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && in vmxnet3_rq_rx_complete() 1557 if (unlikely(rcd->eop && rcd->err)) { in vmxnet3_rq_rx_complete() 1562 if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) { in vmxnet3_rq_rx_complete() 1601 BUG_ON(!(rcd->sop && rcd->eop)); in vmxnet3_rq_rx_complete() 1623 rcd->len, in vmxnet3_rq_rx_complete() 1722 if (rcd->len) { in vmxnet3_rq_rx_complete() [all …]
|
/openbmc/linux/drivers/cpufreq/ |
H A D | sa1110-cpufreq.c | 130 static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd) in set_mdcas() argument 134 rcd = 2 * rcd - 1; in set_mdcas() 135 shift = delayed + 1 + rcd; in set_mdcas() 137 mdcas[0] = (1 << rcd) - 1; in set_mdcas()
|