Home
last modified time | relevance | path

Searched full:tx_desc (Results 1 – 25 of 153) sorted by relevance

1234567

/openbmc/u-boot/drivers/net/pfe_eth/
H A Dpfe_driver.c126 struct tx_desc_s *tx_desc = g_tx_desc; in pfe_send() local
132 data, length, tx_desc->tx_base, tx_desc->tx_to_send); in pfe_send()
134 bd = tx_desc->tx_base + tx_desc->tx_to_send; in pfe_send()
181 return tx_desc->tx_to_send; in pfe_send()
195 struct tx_desc_s *tx_desc = g_tx_desc; in pfe_tx_done() local
198 debug("%s:tx_base: %p, tx_to_send: %d\n", __func__, tx_desc->tx_base, in pfe_tx_done()
199 tx_desc->tx_to_send); in pfe_tx_done()
201 bd = tx_desc->tx_base + tx_desc->tx_to_send; in pfe_tx_done()
215 tx_desc->tx_to_send = (tx_desc->tx_to_send + 1) in pfe_tx_done()
216 & (tx_desc->tx_ring_size - 1); in pfe_tx_done()
[all …]
/openbmc/linux/drivers/infiniband/ulp/isert/
H A Dib_isert.c783 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) in isert_login_post_send() argument
789 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, in isert_login_post_send()
792 tx_desc->tx_cqe.done = isert_login_send_done; in isert_login_post_send()
795 send_wr.wr_cqe = &tx_desc->tx_cqe; in isert_login_post_send()
796 send_wr.sg_list = tx_desc->tx_sg; in isert_login_post_send()
797 send_wr.num_sge = tx_desc->num_sge; in isert_login_post_send()
810 struct iser_tx_desc *tx_desc) in __isert_create_send_desc() argument
813 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); in __isert_create_send_desc()
814 tx_desc->iser_header.flags = ISCSI_CTRL; in __isert_create_send_desc()
816 tx_desc->num_sge = 1; in __isert_create_send_desc()
[all …]
/openbmc/linux/drivers/crypto/ccp/
H A Dccp-dmaengine.c111 if (!async_tx_test_ack(&desc->tx_desc)) in ccp_cleanup_desc_resources()
148 desc->tx_desc.cookie, cmd); in ccp_issue_next_cmd()
155 ret, desc->tx_desc.cookie, cmd); in ccp_issue_next_cmd()
170 __func__, desc->tx_desc.cookie, cmd); in ccp_free_active_cmd()
193 struct dma_async_tx_descriptor *tx_desc; in ccp_handle_active_desc() local
212 tx_desc = &desc->tx_desc; in ccp_handle_active_desc()
214 tx_desc = NULL; in ccp_handle_active_desc()
225 desc->tx_desc.cookie, desc->status); in ccp_handle_active_desc()
227 dma_cookie_complete(tx_desc); in ccp_handle_active_desc()
228 dma_descriptor_unmap(tx_desc); in ccp_handle_active_desc()
[all …]
/openbmc/u-boot/drivers/net/
H A Dpch_gbe.c137 struct pch_gbe_tx_desc *tx_desc = &priv->tx_desc[0]; in pch_gbe_tx_descs_init() local
139 memset(tx_desc, 0, sizeof(struct pch_gbe_tx_desc) * PCH_GBE_DESC_NUM); in pch_gbe_tx_descs_init()
141 flush_dcache_range((ulong)tx_desc, (ulong)&tx_desc[PCH_GBE_DESC_NUM]); in pch_gbe_tx_descs_init()
143 writel(dm_pci_virt_to_mem(priv->dev, tx_desc), in pch_gbe_tx_descs_init()
147 writel(dm_pci_virt_to_mem(priv->dev, tx_desc + 1), in pch_gbe_tx_descs_init()
246 struct pch_gbe_tx_desc *tx_head, *tx_desc; in pch_gbe_send() local
253 tx_head = &priv->tx_desc[0]; in pch_gbe_send()
254 tx_desc = &priv->tx_desc[priv->tx_idx]; in pch_gbe_send()
259 tx_desc->buffer_addr = dm_pci_virt_to_mem(priv->dev, packet); in pch_gbe_send()
260 tx_desc->length = length; in pch_gbe_send()
[all …]
H A Daltera_tse.c158 struct alt_sgdma_descriptor *tx_desc = priv->tx_desc; in altera_tse_send_sgdma() local
161 tx_desc, in altera_tse_send_sgdma()
162 tx_desc + 1, in altera_tse_send_sgdma()
172 alt_sgdma_start_transfer(priv->sgdma_tx, tx_desc); in altera_tse_send_sgdma()
174 debug("sent %d bytes\n", tx_desc->actual_bytes_transferred); in altera_tse_send_sgdma()
176 return tx_desc->actual_bytes_transferred; in altera_tse_send_sgdma()
314 struct msgdma_extended_desc *desc = priv->tx_desc; in altera_tse_send_msgdma()
623 else if (strcmp(list, "tx_desc") == 0) in altera_tse_probe()
624 priv->tx_desc = base; in altera_tse_probe()
650 priv->tx_desc = desc_mem; in altera_tse_probe()
[all …]
/openbmc/linux/drivers/net/ethernet/mellanox/mlx4/
H A Den_tx.c241 struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE); in mlx4_en_stamp_wqe() local
244 __be32 *ptr = (__be32 *)tx_desc; in mlx4_en_stamp_wqe()
248 if (likely((void *)tx_desc + in mlx4_en_stamp_wqe()
281 struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE); in mlx4_en_free_tx_desc() local
282 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; in mlx4_en_free_tx_desc()
312 if (likely((void *)tx_desc + in mlx4_en_free_tx_desc()
405 struct mlx4_en_tx_desc *tx_desc; in mlx4_en_handle_err_cqe() local
419 tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE); in mlx4_en_handle_err_cqe()
420 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false); in mlx4_en_handle_err_cqe()
690 static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, in build_inline_wqe() argument
[all …]
/openbmc/linux/drivers/net/ethernet/sunplus/
H A Dspl2sw_desc.c37 if (!comm->tx_desc) in spl2sw_tx_descs_clean()
41 comm->tx_desc[i].cmd1 = 0; in spl2sw_tx_descs_clean()
43 comm->tx_desc[i].cmd2 = 0; in spl2sw_tx_descs_clean()
44 comm->tx_desc[i].addr1 = 0; in spl2sw_tx_descs_clean()
45 comm->tx_desc[i].addr2 = 0; in spl2sw_tx_descs_clean()
103 comm->tx_desc = NULL; in spl2sw_descs_free()
119 memset(comm->tx_desc, '\0', sizeof(struct spl2sw_mac_desc) * in spl2sw_tx_descs_init()
187 comm->tx_desc = comm->desc_base; in spl2sw_descs_alloc()
190 comm->rx_desc[0] = &comm->tx_desc[TX_DESC_NUM + MAC_GUARD_DESC_NUM]; in spl2sw_descs_alloc()
214 comm->tx_desc = NULL; in spl2sw_descs_init()
/openbmc/linux/drivers/infiniband/ulp/iser/
H A Diser_initiator.c144 struct iser_tx_desc *tx_desc, enum iser_desc_type type, in iser_create_send_desc() argument
149 tx_desc->type = type; in iser_create_send_desc()
150 tx_desc->cqe.done = done; in iser_create_send_desc()
153 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); in iser_create_send_desc()
155 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); in iser_create_send_desc()
156 tx_desc->iser_header.flags = ISER_VER; in iser_create_send_desc()
157 tx_desc->num_sge = 1; in iser_create_send_desc()
351 struct iser_tx_desc *tx_desc = &iser_task->desc; in iser_send_command() local
356 iser_create_send_desc(iser_conn, tx_desc, ISCSI_TX_SCSI_COMMAND, in iser_send_command()
397 err = iser_post_send(&iser_conn->ib_conn, tx_desc); in iser_send_command()
[all …]
H A Diser_memory.c253 struct iser_tx_desc *tx_desc = &iser_task->desc; in iser_reg_sig_mr() local
257 struct ib_reg_wr *wr = &tx_desc->reg_wr; in iser_reg_sig_mr()
268 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); in iser_reg_sig_mr()
281 wr->wr.next = &tx_desc->send_wr; in iser_reg_sig_mr()
310 struct iser_tx_desc *tx_desc = &iser_task->desc; in iser_fast_reg_mr() local
313 struct ib_reg_wr *wr = &tx_desc->reg_wr; in iser_fast_reg_mr()
317 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); in iser_fast_reg_mr()
328 wr->wr.next = &tx_desc->send_wr; in iser_fast_reg_mr()
H A Discsi_iser.c187 * @tx_desc: iser tx descriptor
196 struct iser_tx_desc *tx_desc) in iser_initialize_task_headers() argument
206 dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc, in iser_initialize_task_headers()
211 tx_desc->inv_wr.next = NULL; in iser_initialize_task_headers()
212 tx_desc->reg_wr.wr.next = NULL; in iser_initialize_task_headers()
213 tx_desc->mapped = true; in iser_initialize_task_headers()
214 tx_desc->dma_addr = dma_addr; in iser_initialize_task_headers()
215 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; in iser_initialize_task_headers()
216 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; in iser_initialize_task_headers()
217 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; in iser_initialize_task_headers()
[all …]
/openbmc/qemu/hw/net/
H A Dnpcm_gmac.c517 struct NPCMGMACTxDesc tx_desc; in gmac_try_send_next_packet() local
534 if (gmac_read_tx_desc(desc_addr, &tx_desc)) { in gmac_try_send_next_packet()
544 trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &tx_desc, in gmac_try_send_next_packet()
545 tx_desc.tdes0, tx_desc.tdes1, tx_desc.tdes2, tx_desc.tdes3); in gmac_try_send_next_packet()
548 if (!(tx_desc.tdes0 & TX_DESC_TDES0_OWN)) { in gmac_try_send_next_packet()
561 tx_desc.tdes0 &= ~TX_DESC_TDES0_OWN; in gmac_try_send_next_packet()
563 if (tx_desc.tdes1 & TX_DESC_TDES1_FIRST_SEG_MASK) { in gmac_try_send_next_packet()
564 csum = gmac_tx_get_csum(tx_desc.tdes1); in gmac_try_send_next_packet()
568 tx_buf_addr = tx_desc.tdes2; in gmac_try_send_next_packet()
570 tx_buf_len = TX_DESC_TDES1_BFFR1_SZ_MASK(tx_desc.tdes1); in gmac_try_send_next_packet()
[all …]
H A Dnpcm7xx_emc.c306 const NPCM7xxEMCTxDesc *tx_desc, in emc_set_next_tx_descriptor() argument
310 if (emc_write_tx_desc(tx_desc, desc_addr)) { in emc_set_next_tx_descriptor()
317 emc->regs[REG_CTXDSA] = TX_DESC_NTXDSA(tx_desc->ntxdsa); in emc_set_next_tx_descriptor()
341 NPCM7xxEMCTxDesc tx_desc; in emc_try_send_next_packet() local
346 if (emc_read_tx_desc(desc_addr, &tx_desc)) { in emc_try_send_next_packet()
354 if (!(tx_desc.flags & TX_DESC_FLAG_OWNER_MASK)) { in emc_try_send_next_packet()
362 tx_desc.flags &= ~TX_DESC_FLAG_OWNER_MASK; in emc_try_send_next_packet()
363 tx_desc.status_and_length &= 0xffff; in emc_try_send_next_packet()
371 next_buf_addr = tx_desc.txbsa; in emc_try_send_next_packet()
373 length = TX_DESC_PKT_LEN(tx_desc.status_and_length); in emc_try_send_next_packet()
[all …]
/openbmc/linux/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_xsk.c399 union ixgbe_adv_tx_desc *tx_desc = NULL; in ixgbe_xmit_zc() local
426 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); in ixgbe_xmit_zc()
427 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbe_xmit_zc()
434 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in ixgbe_xmit_zc()
435 tx_desc->read.olinfo_status = in ixgbe_xmit_zc()
443 if (tx_desc) { in ixgbe_xmit_zc()
467 union ixgbe_adv_tx_desc *tx_desc; in ixgbe_clean_xdp_tx_irq() local
472 tx_desc = IXGBE_TX_DESC(tx_ring, ntc); in ixgbe_clean_xdp_tx_irq()
475 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) in ixgbe_clean_xdp_tx_irq()
489 tx_desc++; in ixgbe_clean_xdp_tx_irq()
[all …]
/openbmc/linux/drivers/dma/ptdma/
H A Dptdma-dmaengine.c81 struct dma_async_tx_descriptor *tx_desc; in pt_handle_active_desc() local
94 tx_desc = &desc->vd.tx; in pt_handle_active_desc()
97 tx_desc = NULL; in pt_handle_active_desc()
107 dma_cookie_complete(tx_desc); in pt_handle_active_desc()
108 dma_descriptor_unmap(tx_desc); in pt_handle_active_desc()
112 tx_desc = NULL; in pt_handle_active_desc()
120 if (tx_desc) { in pt_handle_active_desc()
121 dmaengine_desc_get_callback_invoke(tx_desc, NULL); in pt_handle_active_desc()
122 dma_run_dependencies(tx_desc); in pt_handle_active_desc()
/openbmc/linux/drivers/net/ethernet/intel/ice/
H A Dice_txrx_lib.c258 struct ice_tx_desc *tx_desc; in ice_clean_xdp_irq() local
267 tx_desc = ICE_TX_DESC(xdp_ring, idx); in ice_clean_xdp_irq()
268 if (tx_desc->cmd_type_offset_bsz & in ice_clean_xdp_irq()
314 tx_desc->cmd_type_offset_bsz = 0; in ice_clean_xdp_irq()
335 struct ice_tx_desc *tx_desc; in __ice_xmit_xdp_ring() local
358 tx_desc = ICE_TX_DESC(xdp_ring, ntu); in __ice_xmit_xdp_ring()
380 tx_desc->buf_addr = cpu_to_le64(dma); in __ice_xmit_xdp_ring()
381 tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0); in __ice_xmit_xdp_ring()
390 tx_desc = ICE_TX_DESC(xdp_ring, ntu); in __ice_xmit_xdp_ring()
408 tx_desc->cmd_type_offset_bsz |= in __ice_xmit_xdp_ring()
H A Dice_xsk.c617 struct ice_tx_desc *tx_desc; in ice_clean_xdp_irq_zc() local
626 tx_desc = ICE_TX_DESC(xdp_ring, last_rs); in ice_clean_xdp_irq_zc()
627 if (tx_desc->cmd_type_offset_bsz & in ice_clean_xdp_irq_zc()
660 tx_desc->cmd_type_offset_bsz = 0; in ice_clean_xdp_irq_zc()
688 struct ice_tx_desc *tx_desc; in ice_xmit_xdp_tx_zc() local
709 tx_desc = ICE_TX_DESC(xdp_ring, ntu); in ice_xmit_xdp_tx_zc()
721 tx_desc->buf_addr = cpu_to_le64(dma); in ice_xmit_xdp_tx_zc()
722 tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0); in ice_xmit_xdp_tx_zc()
732 tx_desc = ICE_TX_DESC(xdp_ring, ntu); in ice_xmit_xdp_tx_zc()
742 tx_desc->cmd_type_offset_bsz |= in ice_xmit_xdp_tx_zc()
[all …]
H A Dice_txrx.c38 struct ice_tx_desc *tx_desc; in ice_prgm_fdir_fltr() local
74 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_prgm_fdir_fltr()
84 tx_desc->buf_addr = cpu_to_le64(dma); in ice_prgm_fdir_fltr()
91 tx_desc->cmd_type_offset_bsz = in ice_prgm_fdir_fltr()
100 first->next_to_watch = tx_desc; in ice_prgm_fdir_fltr()
223 struct ice_tx_desc *tx_desc; in ice_clean_tx_irq() local
230 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_clean_tx_irq()
247 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); in ice_clean_tx_irq()
274 while (tx_desc != eop_desc) { in ice_clean_tx_irq()
275 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); in ice_clean_tx_irq()
[all …]
/openbmc/linux/drivers/net/wireless/realtek/rtw88/
H A Dtx.c37 struct rtw_tx_desc *tx_desc = (struct rtw_tx_desc *)skb->data; in rtw_tx_fill_tx_desc() local
43 tx_desc->w0 = le32_encode_bits(pkt_info->tx_pkt_size, RTW_TX_DESC_W0_TXPKTSIZE) | in rtw_tx_fill_tx_desc()
49 tx_desc->w1 = le32_encode_bits(pkt_info->qsel, RTW_TX_DESC_W1_QSEL) | in rtw_tx_fill_tx_desc()
55 tx_desc->w2 = le32_encode_bits(pkt_info->ampdu_en, RTW_TX_DESC_W2_AGG_EN) | in rtw_tx_fill_tx_desc()
60 tx_desc->w3 = le32_encode_bits(pkt_info->hw_ssn_sel, RTW_TX_DESC_W3_HW_SSN_SEL) | in rtw_tx_fill_tx_desc()
67 tx_desc->w4 = le32_encode_bits(pkt_info->rate, RTW_TX_DESC_W4_DATARATE); in rtw_tx_fill_tx_desc()
69 tx_desc->w5 = le32_encode_bits(pkt_info->short_gi, RTW_TX_DESC_W5_DATA_SHORT) | in rtw_tx_fill_tx_desc()
74 tx_desc->w6 = le32_encode_bits(pkt_info->sn, RTW_TX_DESC_W6_SW_DEFINE); in rtw_tx_fill_tx_desc()
76 tx_desc->w8 = le32_encode_bits(pkt_info->en_hwseq, RTW_TX_DESC_W8_EN_HWSEQ); in rtw_tx_fill_tx_desc()
78 tx_desc->w9 = le32_encode_bits(pkt_info->seq, RTW_TX_DESC_W9_SW_SEQ); in rtw_tx_fill_tx_desc()
[all …]
/openbmc/linux/drivers/net/ethernet/intel/fm10k/
H A Dfm10k_main.c747 struct fm10k_tx_desc *tx_desc; in fm10k_tso() local
776 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tso()
777 tx_desc->hdrlen = hdrlen; in fm10k_tso()
778 tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); in fm10k_tso()
794 struct fm10k_tx_desc *tx_desc; in fm10k_tx_csum() local
870 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tx_csum()
871 tx_desc->hdrlen = 0; in fm10k_tx_csum()
872 tx_desc->mss = 0; in fm10k_tx_csum()
893 struct fm10k_tx_desc *tx_desc, u16 i, in fm10k_tx_desc_push() argument
901 tx_desc->buffer_addr = cpu_to_le64(dma); in fm10k_tx_desc_push()
[all …]
/openbmc/linux/drivers/net/wireless/ath/ath12k/
H A Ddp_tx.c80 struct ath12k_tx_desc_info *tx_desc, in ath12k_dp_tx_release_txbuf() argument
84 list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]); in ath12k_dp_tx_release_txbuf()
134 struct ath12k_tx_desc_info *tx_desc; in ath12k_dp_tx() local
177 tx_desc = ath12k_dp_tx_assign_buffer(dp, pool_id); in ath12k_dp_tx()
178 if (!tx_desc) in ath12k_dp_tx()
252 tx_desc->skb = skb; in ath12k_dp_tx()
253 tx_desc->mac_id = ar->pdev_idx; in ath12k_dp_tx()
254 ti.desc_id = tx_desc->desc_id; in ath12k_dp_tx()
340 ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id); in ath12k_dp_tx()
538 struct ath12k_tx_desc_info *tx_desc = NULL; in ath12k_dp_tx_completion_handler() local
[all …]
/openbmc/linux/tools/testing/selftests/bpf/prog_tests/
H A Dxdp_metadata.c92 printf("%p: tx_desc[%d] -> %lx\n", xsk, i, addr); in open_xsk()
141 struct xdp_desc *tx_desc; in generate_packet() local
153 tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx); in generate_packet()
154 tx_desc->addr = idx % (UMEM_NUM / 2) * UMEM_FRAME_SIZE; in generate_packet()
155 printf("%p: tx_desc[%u]->addr=%llx\n", xsk, idx, tx_desc->addr); in generate_packet()
156 data = xsk_umem__get_data(xsk->umem_area, tx_desc->addr); in generate_packet()
185 tx_desc->len = sizeof(*eth) + sizeof(*iph) + sizeof(*udph) + UDP_PAYLOAD_BYTES; in generate_packet()
/openbmc/linux/drivers/net/ethernet/seeq/
H A Dsgiseeq.c96 struct sgiseeq_tx_desc *tx_desc; member
196 sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; in seeq_init_ring()
197 dma_sync_desc_dev(dev, &sp->tx_desc[i]); in seeq_init_ring()
230 if (sp->tx_desc[i].skb) { in seeq_purge_ring()
231 dev_kfree_skb(sp->tx_desc[i].skb); in seeq_purge_ring()
232 sp->tx_desc[i].skb = NULL; in seeq_purge_ring()
253 struct sgiseeq_tx_desc *t = gpriv->tx_desc; in sgiseeq_dump_rings()
312 hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc); in init_seeq()
443 td = &sp->tx_desc[i]; in kick_tx()
448 td = &sp->tx_desc[i]; in kick_tx()
[all …]
/openbmc/linux/drivers/net/ethernet/intel/i40e/
H A Di40e_xsk.c537 struct i40e_tx_desc *tx_desc; in i40e_xmit_pkt() local
543 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); in i40e_xmit_pkt()
544 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_xmit_pkt()
545 tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc->len, 0); in i40e_xmit_pkt()
554 struct i40e_tx_desc *tx_desc; in i40e_xmit_pkt_batch() local
564 tx_desc = I40E_TX_DESC(xdp_ring, ntu++); in i40e_xmit_pkt_batch()
565 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_xmit_pkt_batch()
566 tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc[i].len, 0); in i40e_xmit_pkt_batch()
590 struct i40e_tx_desc *tx_desc; in i40e_set_rs_bit() local
592 tx_desc = I40E_TX_DESC(xdp_ring, ntu); in i40e_set_rs_bit()
[all …]
/openbmc/linux/drivers/staging/rtl8712/
H A Drtl8712_xmit.c251 struct tx_desc *ptx_desc = (struct tx_desc *)pxmitbuf->pbuf; in r8712_construct_txaggr_cmd_desc()
285 struct tx_desc *ptx_desc = (struct tx_desc *)pxmitbuf->pbuf; in r8712_append_mpdu_unit()
344 (((struct tx_desc *)pxmitbuf->pbuf)->txdw0 & 0x0000ffff); in r8712_xmitframe_aggr_next()
351 (((struct tx_desc *)pxmitbuf->pbuf)->txdw0 & 0x0000ffff); in r8712_xmitframe_aggr_next()
359 struct tx_desc *ptxdesc = pxmitbuf->pbuf; in r8712_dump_aggr_xframe()
404 struct tx_desc *ptxdesc = (struct tx_desc *)pmem; in update_txdesc()
412 struct tx_desc txdesc_mp; in update_txdesc()
414 memcpy(&txdesc_mp, ptxdesc, sizeof(struct tx_desc)); in update_txdesc()
415 memset(ptxdesc, 0, sizeof(struct tx_desc)); in update_txdesc()
536 struct tx_desc *ptxdesc_mp; in update_txdesc()
/openbmc/linux/drivers/net/ethernet/hisilicon/
H A Dhip04_eth.c173 struct tx_desc { struct
226 struct tx_desc *tx_desc; member
451 struct tx_desc *desc; in hip04_tx_reclaim()
461 desc = &priv->tx_desc[tx_tail]; in hip04_tx_reclaim()
511 struct tx_desc *desc = &priv->tx_desc[tx_head]; in hip04_mac_start_xmit()
540 phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc); in hip04_mac_start_xmit()
542 offsetof(struct tx_desc, send_addr)); in hip04_mac_start_xmit()
859 priv->tx_desc = dma_alloc_coherent(d, in hip04_alloc_ring()
860 TX_DESC_NUM * sizeof(struct tx_desc), in hip04_alloc_ring()
862 if (!priv->tx_desc) in hip04_alloc_ring()
[all …]

1234567