Lines Matching +full:mbox +full:-
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
26 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
28 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
135 tasklet_schedule(&q->tasklet); in mlxsw_pci_queue_tasklet_schedule()
141 return q->mem_item.buf + (elem_size * elem_index); in __mlxsw_pci_queue_elem_get()
147 return &q->elem_info[elem_index]; in mlxsw_pci_queue_elem_info_get()
153 int index = q->producer_counter & (q->count - 1); in mlxsw_pci_queue_elem_info_producer_get()
155 if ((u16) (q->producer_counter - q->consumer_counter) == q->count) in mlxsw_pci_queue_elem_info_producer_get()
163 int index = q->consumer_counter & (q->count - 1); in mlxsw_pci_queue_elem_info_consumer_get()
170 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem; in mlxsw_pci_queue_elem_get()
175 return owner_bit != !!(q->consumer_counter & q->count); in mlxsw_pci_elem_hw_owned()
182 return &mlxsw_pci->queues[q_type]; in mlxsw_pci_queue_type_group_get()
191 return queue_group->count; in __mlxsw_pci_queue_count()
208 return &mlxsw_pci->queues[q_type].q[q_num]; in __mlxsw_pci_queue_get()
242 DOORBELL(mlxsw_pci->doorbell_offset, in __mlxsw_pci_queue_doorbell_set()
243 mlxsw_pci_doorbell_type_offset[q->type], in __mlxsw_pci_queue_doorbell_set()
244 q->num), val); in __mlxsw_pci_queue_doorbell_set()
252 DOORBELL(mlxsw_pci->doorbell_offset, in __mlxsw_pci_queue_doorbell_arm_set()
253 mlxsw_pci_doorbell_arm_type_offset[q->type], in __mlxsw_pci_queue_doorbell_arm_set()
254 q->num), val); in __mlxsw_pci_queue_doorbell_arm_set()
261 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter); in mlxsw_pci_queue_doorbell_producer_ring()
269 q->consumer_counter + q->count); in mlxsw_pci_queue_doorbell_consumer_ring()
277 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter); in mlxsw_pci_queue_doorbell_arm_consumer_ring()
283 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index; in __mlxsw_pci_queue_page_get()
286 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, in mlxsw_pci_sdq_init() argument
294 q->producer_counter = 0; in mlxsw_pci_sdq_init()
295 q->consumer_counter = 0; in mlxsw_pci_sdq_init()
296 tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC : in mlxsw_pci_sdq_init()
298 lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE : in mlxsw_pci_sdq_init()
302 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); in mlxsw_pci_sdq_init()
303 mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp); in mlxsw_pci_sdq_init()
304 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass); in mlxsw_pci_sdq_init()
305 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ in mlxsw_pci_sdq_init()
309 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr); in mlxsw_pci_sdq_init()
312 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num); in mlxsw_pci_sdq_init()
322 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num); in mlxsw_pci_sdq_fini()
329 struct pci_dev *pdev = mlxsw_pci->pdev; in mlxsw_pci_wqe_frag_map()
332 mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction); in mlxsw_pci_wqe_frag_map()
333 if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) { in mlxsw_pci_wqe_frag_map()
334 dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n"); in mlxsw_pci_wqe_frag_map()
335 return -EIO; in mlxsw_pci_wqe_frag_map()
345 struct pci_dev *pdev = mlxsw_pci->pdev; in mlxsw_pci_wqe_frag_unmap()
351 dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction); in mlxsw_pci_wqe_frag_unmap()
358 char *wqe = elem_info->elem; in mlxsw_pci_rdq_skb_alloc()
364 return -ENOMEM; in mlxsw_pci_rdq_skb_alloc()
366 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, in mlxsw_pci_rdq_skb_alloc()
371 elem_info->u.rdq.skb = skb; in mlxsw_pci_rdq_skb_alloc()
385 skb = elem_info->u.rdq.skb; in mlxsw_pci_rdq_skb_free()
386 wqe = elem_info->elem; in mlxsw_pci_rdq_skb_free()
392 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, in mlxsw_pci_rdq_init() argument
400 q->producer_counter = 0; in mlxsw_pci_rdq_init()
401 q->consumer_counter = 0; in mlxsw_pci_rdq_init()
406 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num); in mlxsw_pci_rdq_init()
407 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ in mlxsw_pci_rdq_init()
411 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr); in mlxsw_pci_rdq_init()
414 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num); in mlxsw_pci_rdq_init()
420 for (i = 0; i < q->count; i++) { in mlxsw_pci_rdq_init()
427 q->producer_counter++; in mlxsw_pci_rdq_init()
434 for (i--; i >= 0; i--) { in mlxsw_pci_rdq_init()
438 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); in mlxsw_pci_rdq_init()
449 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); in mlxsw_pci_rdq_fini()
450 for (i = 0; i < q->count; i++) { in mlxsw_pci_rdq_fini()
459 q->u.cq.v = mlxsw_pci->max_cqe_ver; in mlxsw_pci_cq_pre_init()
461 if (q->u.cq.v == MLXSW_PCI_CQE_V2 && in mlxsw_pci_cq_pre_init()
462 q->num < mlxsw_pci->num_sdq_cqs && in mlxsw_pci_cq_pre_init()
463 !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core)) in mlxsw_pci_cq_pre_init()
464 q->u.cq.v = MLXSW_PCI_CQE_V1; in mlxsw_pci_cq_pre_init()
467 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, in mlxsw_pci_cq_init() argument
473 q->consumer_counter = 0; in mlxsw_pci_cq_init()
475 for (i = 0; i < q->count; i++) { in mlxsw_pci_cq_init()
478 mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1); in mlxsw_pci_cq_init()
481 if (q->u.cq.v == MLXSW_PCI_CQE_V1) in mlxsw_pci_cq_init()
482 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, in mlxsw_pci_cq_init()
484 else if (q->u.cq.v == MLXSW_PCI_CQE_V2) in mlxsw_pci_cq_init()
485 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, in mlxsw_pci_cq_init()
488 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); in mlxsw_pci_cq_init()
489 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); in mlxsw_pci_cq_init()
490 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); in mlxsw_pci_cq_init()
494 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr); in mlxsw_pci_cq_init()
496 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num); in mlxsw_pci_cq_init()
507 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); in mlxsw_pci_cq_fini()
513 return ioread32be(mlxsw_pci->hw_addr + off); in mlxsw_pci_read32_off()
531 mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe); in mlxsw_pci_skb_cb_ts_set()
532 mlxsw_skb_cb(skb)->cqe_ts.nsec = in mlxsw_pci_skb_cb_ts_set()
542 struct pci_dev *pdev = mlxsw_pci->pdev; in mlxsw_pci_cqe_sdq_handle()
549 spin_lock(&q->lock); in mlxsw_pci_cqe_sdq_handle()
551 tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info; in mlxsw_pci_cqe_sdq_handle()
552 skb = elem_info->u.sdq.skb; in mlxsw_pci_cqe_sdq_handle()
553 wqe = elem_info->elem; in mlxsw_pci_cqe_sdq_handle()
558 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { in mlxsw_pci_cqe_sdq_handle()
560 mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb, in mlxsw_pci_cqe_sdq_handle()
567 elem_info->u.sdq.skb = NULL; in mlxsw_pci_cqe_sdq_handle()
569 if (q->consumer_counter++ != consumer_counter_limit) in mlxsw_pci_cqe_sdq_handle()
570 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n"); in mlxsw_pci_cqe_sdq_handle()
571 spin_unlock(&q->lock); in mlxsw_pci_cqe_sdq_handle()
580 cb->rx_md_info.tx_port_is_lag = true; in mlxsw_pci_cqe_rdq_md_tx_port_init()
581 cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe); in mlxsw_pci_cqe_rdq_md_tx_port_init()
582 cb->rx_md_info.tx_lag_port_index = in mlxsw_pci_cqe_rdq_md_tx_port_init()
585 cb->rx_md_info.tx_port_is_lag = false; in mlxsw_pci_cqe_rdq_md_tx_port_init()
586 cb->rx_md_info.tx_sys_port = in mlxsw_pci_cqe_rdq_md_tx_port_init()
590 if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT && in mlxsw_pci_cqe_rdq_md_tx_port_init()
591 cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID) in mlxsw_pci_cqe_rdq_md_tx_port_init()
592 cb->rx_md_info.tx_port_valid = 1; in mlxsw_pci_cqe_rdq_md_tx_port_init()
594 cb->rx_md_info.tx_port_valid = 0; in mlxsw_pci_cqe_rdq_md_tx_port_init()
601 cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe); in mlxsw_pci_cqe_rdq_md_init()
602 if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID) in mlxsw_pci_cqe_rdq_md_init()
603 cb->rx_md_info.tx_congestion_valid = 1; in mlxsw_pci_cqe_rdq_md_init()
605 cb->rx_md_info.tx_congestion_valid = 0; in mlxsw_pci_cqe_rdq_md_init()
606 cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT; in mlxsw_pci_cqe_rdq_md_init()
608 cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe); in mlxsw_pci_cqe_rdq_md_init()
609 if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID) in mlxsw_pci_cqe_rdq_md_init()
610 cb->rx_md_info.latency_valid = 1; in mlxsw_pci_cqe_rdq_md_init()
612 cb->rx_md_info.latency_valid = 0; in mlxsw_pci_cqe_rdq_md_init()
614 cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe); in mlxsw_pci_cqe_rdq_md_init()
615 if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID) in mlxsw_pci_cqe_rdq_md_init()
616 cb->rx_md_info.tx_tc_valid = 1; in mlxsw_pci_cqe_rdq_md_init()
618 cb->rx_md_info.tx_tc_valid = 0; in mlxsw_pci_cqe_rdq_md_init()
628 struct pci_dev *pdev = mlxsw_pci->pdev; in mlxsw_pci_cqe_rdq_handle()
637 skb = elem_info->u.rdq.skb; in mlxsw_pci_cqe_rdq_handle()
638 memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE); in mlxsw_pci_cqe_rdq_handle()
640 if (q->consumer_counter++ != consumer_counter_limit) in mlxsw_pci_cqe_rdq_handle()
641 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); in mlxsw_pci_cqe_rdq_handle()
645 dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); in mlxsw_pci_cqe_rdq_handle()
667 if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) in mlxsw_pci_cqe_rdq_handle()
669 mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index; in mlxsw_pci_cqe_rdq_handle()
672 mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { in mlxsw_pci_cqe_rdq_handle()
676 mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { in mlxsw_pci_cqe_rdq_handle()
684 byte_count -= ETH_FCS_LEN; in mlxsw_pci_cqe_rdq_handle()
686 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); in mlxsw_pci_cqe_rdq_handle()
690 q->producer_counter++; in mlxsw_pci_cqe_rdq_handle()
702 elem = elem_info->elem; in mlxsw_pci_cq_sw_cqe_get()
703 owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem); in mlxsw_pci_cq_sw_cqe_get()
706 q->consumer_counter++; in mlxsw_pci_cq_sw_cqe_get()
714 struct mlxsw_pci *mlxsw_pci = q->pci; in mlxsw_pci_cq_tasklet()
717 int credits = q->count >> 1; in mlxsw_pci_cq_tasklet()
721 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); in mlxsw_pci_cq_tasklet()
722 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); in mlxsw_pci_cq_tasklet()
725 memcpy(ncqe, cqe, q->elem_size); in mlxsw_pci_cq_tasklet()
733 wqe_counter, q->u.cq.v, ncqe); in mlxsw_pci_cq_tasklet()
734 q->u.cq.comp_sdq_count++; in mlxsw_pci_cq_tasklet()
740 wqe_counter, q->u.cq.v, ncqe); in mlxsw_pci_cq_tasklet()
741 q->u.cq.comp_rdq_count++; in mlxsw_pci_cq_tasklet()
752 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT : in mlxsw_pci_cq_elem_count()
758 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE : in mlxsw_pci_cq_elem_size()
762 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, in mlxsw_pci_eq_init() argument
768 q->consumer_counter = 0; in mlxsw_pci_eq_init()
770 for (i = 0; i < q->count; i++) { in mlxsw_pci_eq_init()
776 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */ in mlxsw_pci_eq_init()
777 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */ in mlxsw_pci_eq_init()
778 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); in mlxsw_pci_eq_init()
782 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr); in mlxsw_pci_eq_init()
784 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num); in mlxsw_pci_eq_init()
795 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num); in mlxsw_pci_eq_fini()
800 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe); in mlxsw_pci_eq_cmd_event()
801 mlxsw_pci->cmd.comp.out_param = in mlxsw_pci_eq_cmd_event()
804 mlxsw_pci->cmd.wait_done = true; in mlxsw_pci_eq_cmd_event()
805 wake_up(&mlxsw_pci->cmd.wait); in mlxsw_pci_eq_cmd_event()
815 elem = elem_info->elem; in mlxsw_pci_eq_sw_eqe_get()
819 q->consumer_counter++; in mlxsw_pci_eq_sw_eqe_get()
827 struct mlxsw_pci *mlxsw_pci = q->pci; in mlxsw_pci_eq_tasklet()
834 int credits = q->count >> 1; in mlxsw_pci_eq_tasklet()
844 switch (q->num) { in mlxsw_pci_eq_tasklet()
847 q->u.eq.ev_cmd_count++; in mlxsw_pci_eq_tasklet()
853 q->u.eq.ev_comp_count++; in mlxsw_pci_eq_tasklet()
856 q->u.eq.ev_other_count++; in mlxsw_pci_eq_tasklet()
879 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
925 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, in mlxsw_pci_queue_init() argument
929 struct mlxsw_pci_mem_item *mem_item = &q->mem_item; in mlxsw_pci_queue_init()
933 q->num = q_num; in mlxsw_pci_queue_init()
934 if (q_ops->pre_init) in mlxsw_pci_queue_init()
935 q_ops->pre_init(mlxsw_pci, q); in mlxsw_pci_queue_init()
937 spin_lock_init(&q->lock); in mlxsw_pci_queue_init()
938 q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) : in mlxsw_pci_queue_init()
939 q_ops->elem_count; in mlxsw_pci_queue_init()
940 q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) : in mlxsw_pci_queue_init()
941 q_ops->elem_size; in mlxsw_pci_queue_init()
942 q->type = q_ops->type; in mlxsw_pci_queue_init()
943 q->pci = mlxsw_pci; in mlxsw_pci_queue_init()
945 if (q_ops->tasklet) in mlxsw_pci_queue_init()
946 tasklet_setup(&q->tasklet, q_ops->tasklet); in mlxsw_pci_queue_init()
948 mem_item->size = MLXSW_PCI_AQ_SIZE; in mlxsw_pci_queue_init()
949 mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev, in mlxsw_pci_queue_init()
950 mem_item->size, &mem_item->mapaddr, in mlxsw_pci_queue_init()
952 if (!mem_item->buf) in mlxsw_pci_queue_init()
953 return -ENOMEM; in mlxsw_pci_queue_init()
955 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL); in mlxsw_pci_queue_init()
956 if (!q->elem_info) { in mlxsw_pci_queue_init()
957 err = -ENOMEM; in mlxsw_pci_queue_init()
964 for (i = 0; i < q->count; i++) { in mlxsw_pci_queue_init()
968 elem_info->elem = in mlxsw_pci_queue_init()
969 __mlxsw_pci_queue_elem_get(q, q->elem_size, i); in mlxsw_pci_queue_init()
972 mlxsw_cmd_mbox_zero(mbox); in mlxsw_pci_queue_init()
973 err = q_ops->init(mlxsw_pci, mbox, q); in mlxsw_pci_queue_init()
979 kfree(q->elem_info); in mlxsw_pci_queue_init()
981 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size, in mlxsw_pci_queue_init()
982 mem_item->buf, mem_item->mapaddr); in mlxsw_pci_queue_init()
990 struct mlxsw_pci_mem_item *mem_item = &q->mem_item; in mlxsw_pci_queue_fini()
992 q_ops->fini(mlxsw_pci, q); in mlxsw_pci_queue_fini()
993 kfree(q->elem_info); in mlxsw_pci_queue_fini()
994 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size, in mlxsw_pci_queue_fini()
995 mem_item->buf, mem_item->mapaddr); in mlxsw_pci_queue_fini()
998 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox, in mlxsw_pci_queue_group_init() argument
1006 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type); in mlxsw_pci_queue_group_init()
1007 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL); in mlxsw_pci_queue_group_init()
1008 if (!queue_group->q) in mlxsw_pci_queue_group_init()
1009 return -ENOMEM; in mlxsw_pci_queue_group_init()
1012 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops, in mlxsw_pci_queue_group_init()
1013 &queue_group->q[i], i); in mlxsw_pci_queue_group_init()
1017 queue_group->count = num_qs; in mlxsw_pci_queue_group_init()
1022 for (i--; i >= 0; i--) in mlxsw_pci_queue_group_init()
1023 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]); in mlxsw_pci_queue_group_init()
1024 kfree(queue_group->q); in mlxsw_pci_queue_group_init()
1034 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type); in mlxsw_pci_queue_group_fini()
1035 for (i = 0; i < queue_group->count; i++) in mlxsw_pci_queue_group_fini()
1036 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]); in mlxsw_pci_queue_group_fini()
1037 kfree(queue_group->q); in mlxsw_pci_queue_group_fini()
1040 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) in mlxsw_pci_aqs_init() argument
1042 struct pci_dev *pdev = mlxsw_pci->pdev; in mlxsw_pci_aqs_init()
1054 mlxsw_cmd_mbox_zero(mbox); in mlxsw_pci_aqs_init()
1055 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox); in mlxsw_pci_aqs_init()
1059 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox); in mlxsw_pci_aqs_init()
1060 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox); in mlxsw_pci_aqs_init()
1061 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox); in mlxsw_pci_aqs_init()
1062 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox); in mlxsw_pci_aqs_init()
1063 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox); in mlxsw_pci_aqs_init()
1064 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox); in mlxsw_pci_aqs_init()
1065 cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox); in mlxsw_pci_aqs_init()
1066 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox); in mlxsw_pci_aqs_init()
1067 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox); in mlxsw_pci_aqs_init()
1072 dev_err(&pdev->dev, "Unsupported number of queues\n"); in mlxsw_pci_aqs_init()
1073 return -EINVAL; in mlxsw_pci_aqs_init()
1079 (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 && in mlxsw_pci_aqs_init()
1082 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n"); in mlxsw_pci_aqs_init()
1083 return -EINVAL; in mlxsw_pci_aqs_init()
1086 mlxsw_pci->num_sdq_cqs = num_sdqs; in mlxsw_pci_aqs_init()
1088 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops, in mlxsw_pci_aqs_init()
1091 dev_err(&pdev->dev, "Failed to initialize event queues\n"); in mlxsw_pci_aqs_init()
1095 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops, in mlxsw_pci_aqs_init()
1098 dev_err(&pdev->dev, "Failed to initialize completion queues\n"); in mlxsw_pci_aqs_init()
1102 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops, in mlxsw_pci_aqs_init()
1105 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n"); in mlxsw_pci_aqs_init()
1109 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops, in mlxsw_pci_aqs_init()
1112 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n"); in mlxsw_pci_aqs_init()
1117 mlxsw_pci->cmd.nopoll = true; in mlxsw_pci_aqs_init()
1131 mlxsw_pci->cmd.nopoll = false; in mlxsw_pci_aqs_fini()
1140 char *mbox, int index, in mlxsw_pci_config_profile_swid_config() argument
1145 if (swid->used_type) { in mlxsw_pci_config_profile_swid_config()
1147 mbox, index, swid->type); in mlxsw_pci_config_profile_swid_config()
1150 if (swid->used_properties) { in mlxsw_pci_config_profile_swid_config()
1152 mbox, index, swid->properties); in mlxsw_pci_config_profile_swid_config()
1155 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask); in mlxsw_pci_config_profile_swid_config()
1166 err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile, in mlxsw_pci_profile_get_kvd_sizes()
1179 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, in mlxsw_pci_config_profile() argument
1186 mlxsw_cmd_mbox_zero(mbox); in mlxsw_pci_config_profile()
1188 if (profile->used_max_vepa_channels) { in mlxsw_pci_config_profile()
1190 mbox, 1); in mlxsw_pci_config_profile()
1192 mbox, profile->max_vepa_channels); in mlxsw_pci_config_profile()
1194 if (profile->used_max_lag) { in mlxsw_pci_config_profile()
1195 mlxsw_cmd_mbox_config_profile_set_max_lag_set(mbox, 1); in mlxsw_pci_config_profile()
1196 mlxsw_cmd_mbox_config_profile_max_lag_set(mbox, in mlxsw_pci_config_profile()
1197 profile->max_lag); in mlxsw_pci_config_profile()
1199 if (profile->used_max_mid) { in mlxsw_pci_config_profile()
1201 mbox, 1); in mlxsw_pci_config_profile()
1203 mbox, profile->max_mid); in mlxsw_pci_config_profile()
1205 if (profile->used_max_pgt) { in mlxsw_pci_config_profile()
1207 mbox, 1); in mlxsw_pci_config_profile()
1209 mbox, profile->max_pgt); in mlxsw_pci_config_profile()
1211 if (profile->used_max_system_port) { in mlxsw_pci_config_profile()
1213 mbox, 1); in mlxsw_pci_config_profile()
1215 mbox, profile->max_system_port); in mlxsw_pci_config_profile()
1217 if (profile->used_max_vlan_groups) { in mlxsw_pci_config_profile()
1219 mbox, 1); in mlxsw_pci_config_profile()
1221 mbox, profile->max_vlan_groups); in mlxsw_pci_config_profile()
1223 if (profile->used_max_regions) { in mlxsw_pci_config_profile()
1225 mbox, 1); in mlxsw_pci_config_profile()
1227 mbox, profile->max_regions); in mlxsw_pci_config_profile()
1229 if (profile->used_flood_tables) { in mlxsw_pci_config_profile()
1231 mbox, 1); in mlxsw_pci_config_profile()
1233 mbox, profile->max_flood_tables); in mlxsw_pci_config_profile()
1235 mbox, profile->max_vid_flood_tables); in mlxsw_pci_config_profile()
1237 mbox, profile->max_fid_offset_flood_tables); in mlxsw_pci_config_profile()
1239 mbox, profile->fid_offset_flood_table_size); in mlxsw_pci_config_profile()
1241 mbox, profile->max_fid_flood_tables); in mlxsw_pci_config_profile()
1243 mbox, profile->fid_flood_table_size); in mlxsw_pci_config_profile()
1245 if (profile->used_flood_mode) { in mlxsw_pci_config_profile()
1247 mbox, 1); in mlxsw_pci_config_profile()
1249 mbox, profile->flood_mode); in mlxsw_pci_config_profile()
1251 if (profile->used_max_ib_mc) { in mlxsw_pci_config_profile()
1253 mbox, 1); in mlxsw_pci_config_profile()
1255 mbox, profile->max_ib_mc); in mlxsw_pci_config_profile()
1257 if (profile->used_max_pkey) { in mlxsw_pci_config_profile()
1259 mbox, 1); in mlxsw_pci_config_profile()
1261 mbox, profile->max_pkey); in mlxsw_pci_config_profile()
1263 if (profile->used_ar_sec) { in mlxsw_pci_config_profile()
1265 mbox, 1); in mlxsw_pci_config_profile()
1267 mbox, profile->ar_sec); in mlxsw_pci_config_profile()
1269 if (profile->used_adaptive_routing_group_cap) { in mlxsw_pci_config_profile()
1271 mbox, 1); in mlxsw_pci_config_profile()
1273 mbox, profile->adaptive_routing_group_cap); in mlxsw_pci_config_profile()
1275 if (profile->used_ubridge) { in mlxsw_pci_config_profile()
1276 mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1); in mlxsw_pci_config_profile()
1277 mlxsw_cmd_mbox_config_profile_ubridge_set(mbox, in mlxsw_pci_config_profile()
1278 profile->ubridge); in mlxsw_pci_config_profile()
1280 if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) { in mlxsw_pci_config_profile()
1285 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1); in mlxsw_pci_config_profile()
1286 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox, in mlxsw_pci_config_profile()
1288 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox, in mlxsw_pci_config_profile()
1290 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox, in mlxsw_pci_config_profile()
1293 mbox, 1); in mlxsw_pci_config_profile()
1294 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox, in mlxsw_pci_config_profile()
1299 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i, in mlxsw_pci_config_profile()
1300 &profile->swid_config[i]); in mlxsw_pci_config_profile()
1302 if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) { in mlxsw_pci_config_profile()
1303 mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1); in mlxsw_pci_config_profile()
1304 mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1); in mlxsw_pci_config_profile()
1307 if (profile->used_cqe_time_stamp_type) { in mlxsw_pci_config_profile()
1308 mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(mbox, in mlxsw_pci_config_profile()
1310 mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(mbox, in mlxsw_pci_config_profile()
1311 profile->cqe_time_stamp_type); in mlxsw_pci_config_profile()
1314 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox); in mlxsw_pci_config_profile()
1317 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox) in mlxsw_pci_boardinfo() argument
1319 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info; in mlxsw_pci_boardinfo()
1322 mlxsw_cmd_mbox_zero(mbox); in mlxsw_pci_boardinfo()
1323 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox); in mlxsw_pci_boardinfo()
1326 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd); in mlxsw_pci_boardinfo()
1327 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid); in mlxsw_pci_boardinfo()
1331 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, in mlxsw_pci_fw_area_init() argument
1339 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item), in mlxsw_pci_fw_area_init()
1341 if (!mlxsw_pci->fw_area.items) in mlxsw_pci_fw_area_init()
1342 return -ENOMEM; in mlxsw_pci_fw_area_init()
1343 mlxsw_pci->fw_area.count = num_pages; in mlxsw_pci_fw_area_init()
1345 mlxsw_cmd_mbox_zero(mbox); in mlxsw_pci_fw_area_init()
1347 mem_item = &mlxsw_pci->fw_area.items[i]; in mlxsw_pci_fw_area_init()
1349 mem_item->size = MLXSW_PCI_PAGE_SIZE; in mlxsw_pci_fw_area_init()
1350 mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev, in mlxsw_pci_fw_area_init()
1351 mem_item->size, in mlxsw_pci_fw_area_init()
1352 &mem_item->mapaddr, GFP_KERNEL); in mlxsw_pci_fw_area_init()
1353 if (!mem_item->buf) { in mlxsw_pci_fw_area_init()
1354 err = -ENOMEM; in mlxsw_pci_fw_area_init()
1357 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr); in mlxsw_pci_fw_area_init()
1358 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */ in mlxsw_pci_fw_area_init()
1360 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); in mlxsw_pci_fw_area_init()
1364 mlxsw_cmd_mbox_zero(mbox); in mlxsw_pci_fw_area_init()
1369 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); in mlxsw_pci_fw_area_init()
1378 for (i--; i >= 0; i--) { in mlxsw_pci_fw_area_init()
1379 mem_item = &mlxsw_pci->fw_area.items[i]; in mlxsw_pci_fw_area_init()
1381 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size, in mlxsw_pci_fw_area_init()
1382 mem_item->buf, mem_item->mapaddr); in mlxsw_pci_fw_area_init()
1384 kfree(mlxsw_pci->fw_area.items); in mlxsw_pci_fw_area_init()
1393 mlxsw_cmd_unmap_fa(mlxsw_pci->core); in mlxsw_pci_fw_area_fini()
1395 for (i = 0; i < mlxsw_pci->fw_area.count; i++) { in mlxsw_pci_fw_area_fini()
1396 mem_item = &mlxsw_pci->fw_area.items[i]; in mlxsw_pci_fw_area_fini()
1398 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size, in mlxsw_pci_fw_area_fini()
1399 mem_item->buf, mem_item->mapaddr); in mlxsw_pci_fw_area_fini()
1401 kfree(mlxsw_pci->fw_area.items); in mlxsw_pci_fw_area_fini()
1418 struct mlxsw_pci_mem_item *mbox) in mlxsw_pci_mbox_alloc() argument
1420 struct pci_dev *pdev = mlxsw_pci->pdev; in mlxsw_pci_mbox_alloc()
1423 mbox->size = MLXSW_CMD_MBOX_SIZE; in mlxsw_pci_mbox_alloc()
1424 mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, in mlxsw_pci_mbox_alloc()
1425 &mbox->mapaddr, GFP_KERNEL); in mlxsw_pci_mbox_alloc()
1426 if (!mbox->buf) { in mlxsw_pci_mbox_alloc()
1427 dev_err(&pdev->dev, "Failed allocating memory for mailbox\n"); in mlxsw_pci_mbox_alloc()
1428 err = -ENOMEM; in mlxsw_pci_mbox_alloc()
1435 struct mlxsw_pci_mem_item *mbox) in mlxsw_pci_mbox_free() argument
1437 struct pci_dev *pdev = mlxsw_pci->pdev; in mlxsw_pci_mbox_free()
1439 dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf, in mlxsw_pci_mbox_free()
1440 mbox->mapaddr); in mlxsw_pci_mbox_free()
1463 return -EBUSY; in mlxsw_pci_sys_ready_wait()
1469 struct pci_dev *pdev = mlxsw_pci->pdev; in mlxsw_pci_sw_reset()
1476 dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n", in mlxsw_pci_sw_reset()
1482 err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); in mlxsw_pci_sw_reset()
1488 dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n", in mlxsw_pci_sw_reset()
1500 err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX); in mlxsw_pci_alloc_irq_vectors()
1502 dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n"); in mlxsw_pci_alloc_irq_vectors()
1508 pci_free_irq_vectors(mlxsw_pci->pdev); in mlxsw_pci_free_irq_vectors()
1516 struct pci_dev *pdev = mlxsw_pci->pdev; in mlxsw_pci_init()
1517 char *mbox; in mlxsw_pci_init() local
1521 mlxsw_pci->core = mlxsw_core; in mlxsw_pci_init()
1523 mbox = mlxsw_cmd_mbox_alloc(); in mlxsw_pci_init()
1524 if (!mbox) in mlxsw_pci_init()
1525 return -ENOMEM; in mlxsw_pci_init()
1527 err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); in mlxsw_pci_init()
1533 dev_err(&pdev->dev, "MSI-X init failed\n"); in mlxsw_pci_init()
1537 err = mlxsw_cmd_query_fw(mlxsw_core, mbox); in mlxsw_pci_init()
1541 mlxsw_pci->bus_info.fw_rev.major = in mlxsw_pci_init()
1542 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox); in mlxsw_pci_init()
1543 mlxsw_pci->bus_info.fw_rev.minor = in mlxsw_pci_init()
1544 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox); in mlxsw_pci_init()
1545 mlxsw_pci->bus_info.fw_rev.subminor = in mlxsw_pci_init()
1546 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox); in mlxsw_pci_init()
1548 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) { in mlxsw_pci_init()
1549 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n"); in mlxsw_pci_init()
1550 err = -EINVAL; in mlxsw_pci_init()
1553 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) { in mlxsw_pci_init()
1554 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n"); in mlxsw_pci_init()
1555 err = -EINVAL; in mlxsw_pci_init()
1559 mlxsw_pci->doorbell_offset = in mlxsw_pci_init()
1560 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox); in mlxsw_pci_init()
1562 if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) { in mlxsw_pci_init()
1563 dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n"); in mlxsw_pci_init()
1564 err = -EINVAL; in mlxsw_pci_init()
1568 mlxsw_pci->free_running_clock_offset = in mlxsw_pci_init()
1569 mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox); in mlxsw_pci_init()
1571 if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) { in mlxsw_pci_init()
1572 dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n"); in mlxsw_pci_init()
1573 err = -EINVAL; in mlxsw_pci_init()
1577 mlxsw_pci->utc_sec_offset = in mlxsw_pci_init()
1578 mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox); in mlxsw_pci_init()
1580 if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) { in mlxsw_pci_init()
1581 dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n"); in mlxsw_pci_init()
1582 err = -EINVAL; in mlxsw_pci_init()
1586 mlxsw_pci->utc_nsec_offset = in mlxsw_pci_init()
1587 mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox); in mlxsw_pci_init()
1589 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox); in mlxsw_pci_init()
1590 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages); in mlxsw_pci_init()
1594 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox); in mlxsw_pci_init()
1598 err = mlxsw_core_resources_query(mlxsw_core, mbox, res); in mlxsw_pci_init()
1604 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2; in mlxsw_pci_init()
1607 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1; in mlxsw_pci_init()
1611 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0; in mlxsw_pci_init()
1613 dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n"); in mlxsw_pci_init()
1617 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res); in mlxsw_pci_init()
1625 err = mlxsw_core_resources_query(mlxsw_core, mbox, res); in mlxsw_pci_init()
1629 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox); in mlxsw_pci_init()
1635 mlxsw_pci->bus_info.device_kind, mlxsw_pci); in mlxsw_pci_init()
1637 dev_err(&pdev->dev, "IRQ request failed\n"); in mlxsw_pci_init()
1663 mlxsw_cmd_mbox_free(mbox); in mlxsw_pci_init()
1671 free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci); in mlxsw_pci_fini()
1681 u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1; in mlxsw_pci_sdq_pick()
1684 if (tx_info->is_emad) { in mlxsw_pci_sdq_pick()
1688 sdqn = 1 + (tx_info->local_port % ctl_sdq_count); in mlxsw_pci_sdq_pick()
1713 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) { in mlxsw_pci_skb_transmit()
1720 spin_lock_bh(&q->lock); in mlxsw_pci_skb_transmit()
1724 err = -EAGAIN; in mlxsw_pci_skb_transmit()
1727 mlxsw_skb_cb(skb)->tx_info = *tx_info; in mlxsw_pci_skb_transmit()
1728 elem_info->u.sdq.skb = skb; in mlxsw_pci_skb_transmit()
1730 wqe = elem_info->elem; in mlxsw_pci_skb_transmit()
1735 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, in mlxsw_pci_skb_transmit()
1740 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mlxsw_pci_skb_transmit()
1741 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in mlxsw_pci_skb_transmit()
1751 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in mlxsw_pci_skb_transmit()
1752 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in mlxsw_pci_skb_transmit()
1759 q->producer_counter++; in mlxsw_pci_skb_transmit()
1765 for (; i >= 0; i--) in mlxsw_pci_skb_transmit()
1768 spin_unlock_bh(&q->lock); in mlxsw_pci_skb_transmit()
1780 bool evreq = mlxsw_pci->cmd.nopoll; in mlxsw_pci_cmd_exec()
1782 bool *p_wait_done = &mlxsw_pci->cmd.wait_done; in mlxsw_pci_cmd_exec()
1787 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock); in mlxsw_pci_cmd_exec()
1792 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size); in mlxsw_pci_cmd_exec()
1793 in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr; in mlxsw_pci_cmd_exec()
1799 out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr; in mlxsw_pci_cmd_exec()
1830 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout); in mlxsw_pci_cmd_exec()
1831 *p_status = mlxsw_pci->cmd.comp.status; in mlxsw_pci_cmd_exec()
1837 err = -EIO; in mlxsw_pci_cmd_exec()
1839 err = -ETIMEDOUT; in mlxsw_pci_cmd_exec()
1845 * copy registers into mbox buffer. in mlxsw_pci_cmd_exec()
1858 memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size); in mlxsw_pci_cmd_exec()
1861 mutex_unlock(&mlxsw_pci->cmd.lock); in mlxsw_pci_cmd_exec()
1871 frc_offset_h = mlxsw_pci->free_running_clock_offset; in mlxsw_pci_read_frc_h()
1880 frc_offset_l = mlxsw_pci->free_running_clock_offset + 4; in mlxsw_pci_read_frc_l()
1888 return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset); in mlxsw_pci_read_utc_sec()
1895 return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset); in mlxsw_pci_read_utc_nsec()
1916 mutex_init(&mlxsw_pci->cmd.lock); in mlxsw_pci_cmd_init()
1917 init_waitqueue_head(&mlxsw_pci->cmd.wait); in mlxsw_pci_cmd_init()
1919 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); in mlxsw_pci_cmd_init()
1923 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); in mlxsw_pci_cmd_init()
1930 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); in mlxsw_pci_cmd_init()
1932 mutex_destroy(&mlxsw_pci->cmd.lock); in mlxsw_pci_cmd_init()
1938 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); in mlxsw_pci_cmd_fini()
1939 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); in mlxsw_pci_cmd_fini()
1940 mutex_destroy(&mlxsw_pci->cmd.lock); in mlxsw_pci_cmd_fini()
1945 const char *driver_name = dev_driver_string(&pdev->dev); in mlxsw_pci_probe()
1951 return -ENOMEM; in mlxsw_pci_probe()
1955 dev_err(&pdev->dev, "pci_enable_device failed\n"); in mlxsw_pci_probe()
1961 dev_err(&pdev->dev, "pci_request_regions failed\n"); in mlxsw_pci_probe()
1965 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in mlxsw_pci_probe()
1967 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); in mlxsw_pci_probe()
1969 dev_err(&pdev->dev, "dma_set_mask failed\n"); in mlxsw_pci_probe()
1975 dev_err(&pdev->dev, "invalid PCI region size\n"); in mlxsw_pci_probe()
1976 err = -EINVAL; in mlxsw_pci_probe()
1980 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0), in mlxsw_pci_probe()
1982 if (!mlxsw_pci->hw_addr) { in mlxsw_pci_probe()
1983 dev_err(&pdev->dev, "ioremap failed\n"); in mlxsw_pci_probe()
1984 err = -EIO; in mlxsw_pci_probe()
1989 mlxsw_pci->pdev = pdev; in mlxsw_pci_probe()
1996 mlxsw_pci->bus_info.device_kind = driver_name; in mlxsw_pci_probe()
1997 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); in mlxsw_pci_probe()
1998 mlxsw_pci->bus_info.dev = &pdev->dev; in mlxsw_pci_probe()
1999 mlxsw_pci->bus_info.read_clock_capable = true; in mlxsw_pci_probe()
2000 mlxsw_pci->id = id; in mlxsw_pci_probe()
2002 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, in mlxsw_pci_probe()
2006 dev_err(&pdev->dev, "cannot register bus device\n"); in mlxsw_pci_probe()
2015 iounmap(mlxsw_pci->hw_addr); in mlxsw_pci_probe()
2031 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); in mlxsw_pci_remove()
2033 iounmap(mlxsw_pci->hw_addr); in mlxsw_pci_remove()
2034 pci_release_regions(mlxsw_pci->pdev); in mlxsw_pci_remove()
2035 pci_disable_device(mlxsw_pci->pdev); in mlxsw_pci_remove()
2041 pci_driver->probe = mlxsw_pci_probe; in mlxsw_pci_driver_register()
2042 pci_driver->remove = mlxsw_pci_remove; in mlxsw_pci_driver_register()
2043 pci_driver->shutdown = mlxsw_pci_remove; in mlxsw_pci_driver_register()