Lines Matching refs:pr

194 				struct ehea_port_res *pr = &port->port_res[l];  in ehea_update_firmware_handles()  local
197 arr[i++].fwh = pr->qp->fw_handle; in ehea_update_firmware_handles()
199 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles()
201 arr[i++].fwh = pr->recv_cq->fw_handle; in ehea_update_firmware_handles()
203 arr[i++].fwh = pr->eq->fw_handle; in ehea_update_firmware_handles()
205 arr[i++].fwh = pr->send_mr.handle; in ehea_update_firmware_handles()
207 arr[i++].fwh = pr->recv_mr.handle; in ehea_update_firmware_handles()
383 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) in ehea_refill_rq1() argument
385 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_refill_rq1()
386 struct net_device *dev = pr->port->netdev; in ehea_refill_rq1()
387 int max_index_mask = pr->rq1_skba.len - 1; in ehea_refill_rq1()
388 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; in ehea_refill_rq1()
392 pr->rq1_skba.os_skbs = 0; in ehea_refill_rq1()
396 pr->rq1_skba.index = index; in ehea_refill_rq1()
397 pr->rq1_skba.os_skbs = fill_wqes; in ehea_refill_rq1()
406 pr->rq1_skba.os_skbs = fill_wqes - i; in ehea_refill_rq1()
419 ehea_update_rq1a(pr->qp, adder); in ehea_refill_rq1()
422 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) in ehea_init_fill_rq1() argument
424 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_init_fill_rq1()
425 struct net_device *dev = pr->port->netdev; in ehea_init_fill_rq1()
428 if (nr_rq1a > pr->rq1_skba.len) { in ehea_init_fill_rq1()
439 ehea_update_rq1a(pr->qp, i - 1); in ehea_init_fill_rq1()
442 static int ehea_refill_rq_def(struct ehea_port_res *pr, in ehea_refill_rq_def() argument
446 struct net_device *dev = pr->port->netdev; in ehea_refill_rq_def()
447 struct ehea_qp *qp = pr->qp; in ehea_refill_rq_def()
472 netdev_info(pr->port->netdev, in ehea_refill_rq_def()
492 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; in ehea_refill_rq_def()
509 ehea_update_rq2a(pr->qp, adder); in ehea_refill_rq_def()
511 ehea_update_rq3a(pr->qp, adder); in ehea_refill_rq_def()
517 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes) in ehea_refill_rq2() argument
519 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, in ehea_refill_rq2()
525 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes) in ehea_refill_rq3() argument
527 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, in ehea_refill_rq3()
545 struct ehea_port_res *pr) in ehea_fill_skb() argument
560 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]); in ehea_fill_skb()
617 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, in ehea_treat_poll_error() argument
624 pr->p_stats.err_tcp_cksum++; in ehea_treat_poll_error()
626 pr->p_stats.err_ip_cksum++; in ehea_treat_poll_error()
628 pr->p_stats.err_frame_crc++; in ehea_treat_poll_error()
632 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); in ehea_treat_poll_error()
636 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); in ehea_treat_poll_error()
641 if (netif_msg_rx_err(pr->port)) { in ehea_treat_poll_error()
643 pr->qp->init_attr.qp_nr); in ehea_treat_poll_error()
646 ehea_schedule_port_reset(pr->port); in ehea_treat_poll_error()
654 struct ehea_port_res *pr, in ehea_proc_rwqes() argument
657 struct ehea_port *port = pr->port; in ehea_proc_rwqes()
658 struct ehea_qp *qp = pr->qp; in ehea_proc_rwqes()
661 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_proc_rwqes()
662 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; in ehea_proc_rwqes()
663 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; in ehea_proc_rwqes()
664 int skb_arr_rq1_len = pr->rq1_skba.len; in ehea_proc_rwqes()
665 int skb_arr_rq2_len = pr->rq2_skba.len; in ehea_proc_rwqes()
666 int skb_arr_rq3_len = pr->rq3_skba.len; in ehea_proc_rwqes()
701 ehea_fill_skb(dev, skb, cqe, pr); in ehea_proc_rwqes()
711 ehea_fill_skb(dev, skb, cqe, pr); in ehea_proc_rwqes()
722 ehea_fill_skb(dev, skb, cqe, pr); in ehea_proc_rwqes()
732 napi_gro_receive(&pr->napi, skb); in ehea_proc_rwqes()
734 pr->p_stats.poll_receive_errors++; in ehea_proc_rwqes()
735 port_reset = ehea_treat_poll_error(pr, rq, cqe, in ehea_proc_rwqes()
744 pr->rx_packets += processed; in ehea_proc_rwqes()
745 pr->rx_bytes += processed_bytes; in ehea_proc_rwqes()
747 ehea_refill_rq1(pr, last_wqe_index, processed_rq1); in ehea_proc_rwqes()
748 ehea_refill_rq2(pr, processed_rq2); in ehea_proc_rwqes()
749 ehea_refill_rq3(pr, processed_rq3); in ehea_proc_rwqes()
761 struct ehea_port_res *pr = &port->port_res[i]; in reset_sq_restart_flag() local
762 pr->sq_restart_flag = 0; in reset_sq_restart_flag()
774 struct ehea_port_res *pr = &port->port_res[i]; in check_sqs() local
776 swqe = ehea_get_swqe(pr->qp, &swqe_index); in check_sqs()
778 atomic_dec(&pr->swqe_avail); in check_sqs()
786 ehea_post_swqe(pr->qp, swqe); in check_sqs()
789 pr->sq_restart_flag == 0, in check_sqs()
794 ehea_schedule_port_reset(pr->port); in check_sqs()
801 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) in ehea_proc_cqes() argument
804 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes()
810 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev, in ehea_proc_cqes()
811 pr - &pr->port->port_res[0]); in ehea_proc_cqes()
821 pr->sq_restart_flag = 1; in ehea_proc_cqes()
830 if (netif_msg_tx_err(pr->port)) in ehea_proc_cqes()
835 ehea_schedule_port_reset(pr->port); in ehea_proc_cqes()
840 if (netif_msg_tx_done(pr->port)) in ehea_proc_cqes()
847 skb = pr->sq_skba.arr[index]; in ehea_proc_cqes()
849 pr->sq_skba.arr[index] = NULL; in ehea_proc_cqes()
859 atomic_add(swqe_av, &pr->swqe_avail); in ehea_proc_cqes()
862 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) { in ehea_proc_cqes()
865 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th)) in ehea_proc_cqes()
870 wake_up(&pr->port->swqe_avail_wq); in ehea_proc_cqes()
879 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, in ehea_poll() local
881 struct net_device *dev = pr->port->netdev; in ehea_poll()
887 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); in ehea_poll()
888 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
892 ehea_reset_cq_ep(pr->recv_cq); in ehea_poll()
893 ehea_reset_cq_ep(pr->send_cq); in ehea_poll()
894 ehea_reset_cq_n1(pr->recv_cq); in ehea_poll()
895 ehea_reset_cq_n1(pr->send_cq); in ehea_poll()
897 cqe = ehea_poll_rq1(pr->qp, &wqe_index); in ehea_poll()
898 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll()
906 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); in ehea_poll()
907 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
915 struct ehea_port_res *pr = param; in ehea_recv_irq_handler() local
917 napi_schedule(&pr->napi); in ehea_recv_irq_handler()
1251 static int ehea_fill_port_res(struct ehea_port_res *pr) in ehea_fill_port_res() argument
1254 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; in ehea_fill_port_res()
1256 ehea_init_fill_rq1(pr, pr->rq1_skba.len); in ehea_fill_port_res()
1258 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); in ehea_fill_port_res()
1260 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); in ehea_fill_port_res()
1268 struct ehea_port_res *pr; in ehea_reg_interrupts() local
1290 pr = &port->port_res[i]; in ehea_reg_interrupts()
1291 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, in ehea_reg_interrupts()
1293 ret = ibmebus_request_irq(pr->eq->attr.ist1, in ehea_reg_interrupts()
1295 0, pr->int_send_name, pr); in ehea_reg_interrupts()
1298 i, pr->eq->attr.ist1); in ehea_reg_interrupts()
1303 pr->eq->attr.ist1, i); in ehea_reg_interrupts()
1326 struct ehea_port_res *pr; in ehea_free_interrupts() local
1332 pr = &port->port_res[i]; in ehea_free_interrupts()
1333 ibmebus_free_irq(pr->eq->attr.ist1, pr); in ehea_free_interrupts()
1336 i, pr->eq->attr.ist1); in ehea_free_interrupts()
1394 static int ehea_gen_smrs(struct ehea_port_res *pr) in ehea_gen_smrs() argument
1397 struct ehea_adapter *adapter = pr->port->adapter; in ehea_gen_smrs()
1399 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); in ehea_gen_smrs()
1403 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); in ehea_gen_smrs()
1410 ehea_rem_mr(&pr->send_mr); in ehea_gen_smrs()
1416 static int ehea_rem_smrs(struct ehea_port_res *pr) in ehea_rem_smrs() argument
1418 if ((ehea_rem_mr(&pr->send_mr)) || in ehea_rem_smrs()
1419 (ehea_rem_mr(&pr->recv_mr))) in ehea_rem_smrs()
1440 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, in ehea_init_port_res() argument
1449 tx_bytes = pr->tx_bytes; in ehea_init_port_res()
1450 tx_packets = pr->tx_packets; in ehea_init_port_res()
1451 rx_bytes = pr->rx_bytes; in ehea_init_port_res()
1452 rx_packets = pr->rx_packets; in ehea_init_port_res()
1454 memset(pr, 0, sizeof(struct ehea_port_res)); in ehea_init_port_res()
1456 pr->tx_bytes = tx_bytes; in ehea_init_port_res()
1457 pr->tx_packets = tx_packets; in ehea_init_port_res()
1458 pr->rx_bytes = rx_bytes; in ehea_init_port_res()
1459 pr->rx_packets = rx_packets; in ehea_init_port_res()
1461 pr->port = port; in ehea_init_port_res()
1463 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); in ehea_init_port_res()
1464 if (!pr->eq) { in ehea_init_port_res()
1469 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, in ehea_init_port_res()
1470 pr->eq->fw_handle, in ehea_init_port_res()
1472 if (!pr->recv_cq) { in ehea_init_port_res()
1477 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res()
1478 pr->eq->fw_handle, in ehea_init_port_res()
1480 if (!pr->send_cq) { in ehea_init_port_res()
1487 pr->send_cq->attr.act_nr_of_cqes, in ehea_init_port_res()
1488 pr->recv_cq->attr.act_nr_of_cqes); in ehea_init_port_res()
1512 init_attr->send_cq_handle = pr->send_cq->fw_handle; in ehea_init_port_res()
1513 init_attr->recv_cq_handle = pr->recv_cq->fw_handle; in ehea_init_port_res()
1516 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); in ehea_init_port_res()
1517 if (!pr->qp) { in ehea_init_port_res()
1531 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; in ehea_init_port_res()
1533 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size); in ehea_init_port_res()
1534 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); in ehea_init_port_res()
1535 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); in ehea_init_port_res()
1536 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); in ehea_init_port_res()
1540 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; in ehea_init_port_res()
1541 if (ehea_gen_smrs(pr) != 0) { in ehea_init_port_res()
1546 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); in ehea_init_port_res()
1550 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll); in ehea_init_port_res()
1557 vfree(pr->sq_skba.arr); in ehea_init_port_res()
1558 vfree(pr->rq1_skba.arr); in ehea_init_port_res()
1559 vfree(pr->rq2_skba.arr); in ehea_init_port_res()
1560 vfree(pr->rq3_skba.arr); in ehea_init_port_res()
1561 ehea_destroy_qp(pr->qp); in ehea_init_port_res()
1562 ehea_destroy_cq(pr->send_cq); in ehea_init_port_res()
1563 ehea_destroy_cq(pr->recv_cq); in ehea_init_port_res()
1564 ehea_destroy_eq(pr->eq); in ehea_init_port_res()
1569 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) in ehea_clean_portres() argument
1573 if (pr->qp) in ehea_clean_portres()
1574 netif_napi_del(&pr->napi); in ehea_clean_portres()
1576 ret = ehea_destroy_qp(pr->qp); in ehea_clean_portres()
1579 ehea_destroy_cq(pr->send_cq); in ehea_clean_portres()
1580 ehea_destroy_cq(pr->recv_cq); in ehea_clean_portres()
1581 ehea_destroy_eq(pr->eq); in ehea_clean_portres()
1583 for (i = 0; i < pr->rq1_skba.len; i++) in ehea_clean_portres()
1584 dev_kfree_skb(pr->rq1_skba.arr[i]); in ehea_clean_portres()
1586 for (i = 0; i < pr->rq2_skba.len; i++) in ehea_clean_portres()
1587 dev_kfree_skb(pr->rq2_skba.arr[i]); in ehea_clean_portres()
1589 for (i = 0; i < pr->rq3_skba.len; i++) in ehea_clean_portres()
1590 dev_kfree_skb(pr->rq3_skba.arr[i]); in ehea_clean_portres()
1592 for (i = 0; i < pr->sq_skba.len; i++) in ehea_clean_portres()
1593 dev_kfree_skb(pr->sq_skba.arr[i]); in ehea_clean_portres()
1595 vfree(pr->rq1_skba.arr); in ehea_clean_portres()
1596 vfree(pr->rq2_skba.arr); in ehea_clean_portres()
1597 vfree(pr->rq3_skba.arr); in ehea_clean_portres()
1598 vfree(pr->sq_skba.arr); in ehea_clean_portres()
1599 ret = ehea_rem_smrs(pr); in ehea_clean_portres()
2021 struct ehea_port_res *pr; in ehea_start_xmit() local
2024 pr = &port->port_res[skb_get_queue_mapping(skb)]; in ehea_start_xmit()
2027 swqe = ehea_get_swqe(pr->qp, &swqe_index); in ehea_start_xmit()
2029 atomic_dec(&pr->swqe_avail); in ehea_start_xmit()
2036 pr->tx_packets++; in ehea_start_xmit()
2037 pr->tx_bytes += skb->len; in ehea_start_xmit()
2041 u32 swqe_num = pr->swqe_id_counter; in ehea_start_xmit()
2045 if (pr->swqe_ll_count >= (sig_iv - 1)) { in ehea_start_xmit()
2049 pr->swqe_ll_count = 0; in ehea_start_xmit()
2051 pr->swqe_ll_count += 1; in ehea_start_xmit()
2055 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) in ehea_start_xmit()
2057 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); in ehea_start_xmit()
2058 pr->sq_skba.arr[pr->sq_skba.index] = skb; in ehea_start_xmit()
2060 pr->sq_skba.index++; in ehea_start_xmit()
2061 pr->sq_skba.index &= (pr->sq_skba.len - 1); in ehea_start_xmit()
2063 lkey = pr->send_mr.lkey; in ehea_start_xmit()
2067 pr->swqe_id_counter += 1; in ehea_start_xmit()
2070 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); in ehea_start_xmit()
2079 ehea_post_swqe(pr->qp, swqe); in ehea_start_xmit()
2081 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { in ehea_start_xmit()
2082 pr->p_stats.queue_stopped++; in ehea_start_xmit()
2494 struct ehea_port_res *pr = &port->port_res[i]; in ehea_flush_sq() local
2495 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; in ehea_flush_sq()
2499 atomic_read(&pr->swqe_avail) >= swqe_max, in ehea_flush_sq()
2528 struct ehea_port_res *pr = &port->port_res[i]; in ehea_stop_qps() local
2529 struct ehea_qp *qp = pr->qp; in ehea_stop_qps()
2564 dret = ehea_rem_smrs(pr); in ehea_stop_qps()
2578 static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) in ehea_update_rqs() argument
2583 struct sk_buff **skba_rq2 = pr->rq2_skba.arr; in ehea_update_rqs()
2584 struct sk_buff **skba_rq3 = pr->rq3_skba.arr; in ehea_update_rqs()
2586 u32 lkey = pr->recv_mr.lkey; in ehea_update_rqs()
2628 struct ehea_port_res *pr = &port->port_res[i]; in ehea_restart_qps() local
2629 struct ehea_qp *qp = pr->qp; in ehea_restart_qps()
2631 ret = ehea_gen_smrs(pr); in ehea_restart_qps()
2637 ehea_update_rqs(qp, pr); in ehea_restart_qps()
2672 ehea_refill_rq1(pr, pr->rq1_skba.index, 0); in ehea_restart_qps()
2673 ehea_refill_rq2(pr, 0); in ehea_restart_qps()
2674 ehea_refill_rq3(pr, 0); in ehea_restart_qps()