10891d6d4SKrzysztof Kazimierczak /* SPDX-License-Identifier: GPL-2.0 */
20891d6d4SKrzysztof Kazimierczak /* Copyright (c) 2019, Intel Corporation. */
30891d6d4SKrzysztof Kazimierczak 
40891d6d4SKrzysztof Kazimierczak #ifndef _ICE_TXRX_LIB_H_
50891d6d4SKrzysztof Kazimierczak #define _ICE_TXRX_LIB_H_
60891d6d4SKrzysztof Kazimierczak #include "ice.h"
70891d6d4SKrzysztof Kazimierczak 
80891d6d4SKrzysztof Kazimierczak /**
92fba7dc5SMaciej Fijalkowski  * ice_set_rx_bufs_act - propagate Rx buffer action to frags
102fba7dc5SMaciej Fijalkowski  * @xdp: XDP buffer representing frame (linear and frags part)
112fba7dc5SMaciej Fijalkowski  * @rx_ring: Rx ring struct
122fba7dc5SMaciej Fijalkowski  * act: action to store onto Rx buffers related to XDP buffer parts
132fba7dc5SMaciej Fijalkowski  *
142fba7dc5SMaciej Fijalkowski  * Set action that should be taken before putting Rx buffer from first frag
15*728e112dSMaciej Fijalkowski  * to the last.
162fba7dc5SMaciej Fijalkowski  */
172fba7dc5SMaciej Fijalkowski static inline void
ice_set_rx_bufs_act(struct xdp_buff * xdp,const struct ice_rx_ring * rx_ring,const unsigned int act)182fba7dc5SMaciej Fijalkowski ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
192fba7dc5SMaciej Fijalkowski 		    const unsigned int act)
202fba7dc5SMaciej Fijalkowski {
21*728e112dSMaciej Fijalkowski 	u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
22*728e112dSMaciej Fijalkowski 	u32 nr_frags = rx_ring->nr_frags + 1;
23*728e112dSMaciej Fijalkowski 	u32 idx = rx_ring->first_desc;
242fba7dc5SMaciej Fijalkowski 	u32 cnt = rx_ring->count;
252fba7dc5SMaciej Fijalkowski 	struct ice_rx_buf *buf;
262fba7dc5SMaciej Fijalkowski 
272fba7dc5SMaciej Fijalkowski 	for (int i = 0; i < nr_frags; i++) {
28*728e112dSMaciej Fijalkowski 		buf = &rx_ring->rx_buf[idx];
292fba7dc5SMaciej Fijalkowski 		buf->act = act;
302fba7dc5SMaciej Fijalkowski 
31*728e112dSMaciej Fijalkowski 		if (++idx == cnt)
32*728e112dSMaciej Fijalkowski 			idx = 0;
33*728e112dSMaciej Fijalkowski 	}
34*728e112dSMaciej Fijalkowski 
35*728e112dSMaciej Fijalkowski 	/* adjust pagecnt_bias on frags freed by XDP prog */
36*728e112dSMaciej Fijalkowski 	if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
37*728e112dSMaciej Fijalkowski 		u32 delta = rx_ring->nr_frags - sinfo_frags;
38*728e112dSMaciej Fijalkowski 
39*728e112dSMaciej Fijalkowski 		while (delta) {
40*728e112dSMaciej Fijalkowski 			if (idx == 0)
41*728e112dSMaciej Fijalkowski 				idx = cnt - 1;
42*728e112dSMaciej Fijalkowski 			else
43*728e112dSMaciej Fijalkowski 				idx--;
44*728e112dSMaciej Fijalkowski 			buf = &rx_ring->rx_buf[idx];
45*728e112dSMaciej Fijalkowski 			buf->pagecnt_bias--;
46*728e112dSMaciej Fijalkowski 			delta--;
47*728e112dSMaciej Fijalkowski 		}
482fba7dc5SMaciej Fijalkowski 	}
492fba7dc5SMaciej Fijalkowski }
502fba7dc5SMaciej Fijalkowski 
512fba7dc5SMaciej Fijalkowski /**
520891d6d4SKrzysztof Kazimierczak  * ice_test_staterr - tests bits in Rx descriptor status and error fields
530d54d8f7SBrett Creeley  * @status_err_n: Rx descriptor status_error0 or status_error1 bits
540891d6d4SKrzysztof Kazimierczak  * @stat_err_bits: value to mask
550891d6d4SKrzysztof Kazimierczak  *
560891d6d4SKrzysztof Kazimierczak  * This function does some fast chicanery in order to return the
570891d6d4SKrzysztof Kazimierczak  * value of the mask which is really only used for boolean tests.
580891d6d4SKrzysztof Kazimierczak  * The status_error_len doesn't need to be shifted because it begins
590891d6d4SKrzysztof Kazimierczak  * at offset zero.
600891d6d4SKrzysztof Kazimierczak  */
610891d6d4SKrzysztof Kazimierczak static inline bool
ice_test_staterr(__le16 status_err_n,const u16 stat_err_bits)620d54d8f7SBrett Creeley ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
630891d6d4SKrzysztof Kazimierczak {
640d54d8f7SBrett Creeley 	return !!(status_err_n & cpu_to_le16(stat_err_bits));
650891d6d4SKrzysztof Kazimierczak }
660891d6d4SKrzysztof Kazimierczak 
67e44f4790SMaciej Fijalkowski /**
68e44f4790SMaciej Fijalkowski  * ice_is_non_eop - process handling of non-EOP buffers
69e44f4790SMaciej Fijalkowski  * @rx_ring: Rx ring being processed
70e44f4790SMaciej Fijalkowski  * @rx_desc: Rx descriptor for current buffer
71e44f4790SMaciej Fijalkowski  *
72e44f4790SMaciej Fijalkowski  * If the buffer is an EOP buffer, this function exits returning false,
73e44f4790SMaciej Fijalkowski  * otherwise return true indicating that this is in fact a non-EOP buffer.
74e44f4790SMaciej Fijalkowski  */
75e44f4790SMaciej Fijalkowski static inline bool
ice_is_non_eop(const struct ice_rx_ring * rx_ring,const union ice_32b_rx_flex_desc * rx_desc)76e44f4790SMaciej Fijalkowski ice_is_non_eop(const struct ice_rx_ring *rx_ring,
77e44f4790SMaciej Fijalkowski 	       const union ice_32b_rx_flex_desc *rx_desc)
78e44f4790SMaciej Fijalkowski {
79e44f4790SMaciej Fijalkowski 	/* if we are the last buffer then there is nothing else to do */
80e44f4790SMaciej Fijalkowski #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
81e44f4790SMaciej Fijalkowski 	if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
82e44f4790SMaciej Fijalkowski 		return false;
83e44f4790SMaciej Fijalkowski 
84e44f4790SMaciej Fijalkowski 	rx_ring->ring_stats->rx_stats.non_eop_descs++;
85e44f4790SMaciej Fijalkowski 
86e44f4790SMaciej Fijalkowski 	return true;
87e44f4790SMaciej Fijalkowski }
88e44f4790SMaciej Fijalkowski 
890891d6d4SKrzysztof Kazimierczak static inline __le64
ice_build_ctob(u64 td_cmd,u64 td_offset,unsigned int size,u64 td_tag)905757cc7cSTony Nguyen ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
910891d6d4SKrzysztof Kazimierczak {
920891d6d4SKrzysztof Kazimierczak 	return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
930891d6d4SKrzysztof Kazimierczak 			   (td_cmd    << ICE_TXD_QW1_CMD_S) |
940891d6d4SKrzysztof Kazimierczak 			   (td_offset << ICE_TXD_QW1_OFFSET_S) |
950891d6d4SKrzysztof Kazimierczak 			   ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
960891d6d4SKrzysztof Kazimierczak 			   (td_tag    << ICE_TXD_QW1_L2TAG1_S));
970891d6d4SKrzysztof Kazimierczak }
980891d6d4SKrzysztof Kazimierczak 
990891d6d4SKrzysztof Kazimierczak /**
1000d54d8f7SBrett Creeley  * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor
1010d54d8f7SBrett Creeley  * @rx_desc: Rx 32b flex descriptor with RXDID=2
1020d54d8f7SBrett Creeley  *
1030d54d8f7SBrett Creeley  * The OS and current PF implementation only support stripping a single VLAN tag
1040d54d8f7SBrett Creeley  * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If
1050d54d8f7SBrett Creeley  * one is found return the tag, else return 0 to mean no VLAN tag was found.
1060d54d8f7SBrett Creeley  */
1070d54d8f7SBrett Creeley static inline u16
ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc * rx_desc)1080d54d8f7SBrett Creeley ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc)
1090d54d8f7SBrett Creeley {
1100d54d8f7SBrett Creeley 	u16 stat_err_bits;
1110d54d8f7SBrett Creeley 
1120d54d8f7SBrett Creeley 	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1130d54d8f7SBrett Creeley 	if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
1140d54d8f7SBrett Creeley 		return le16_to_cpu(rx_desc->wb.l2tag1);
1150d54d8f7SBrett Creeley 
1160d54d8f7SBrett Creeley 	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
1170d54d8f7SBrett Creeley 	if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
1180d54d8f7SBrett Creeley 		return le16_to_cpu(rx_desc->wb.l2tag2_2nd);
1190d54d8f7SBrett Creeley 
1200d54d8f7SBrett Creeley 	return 0;
1210d54d8f7SBrett Creeley }
1220d54d8f7SBrett Creeley 
1230d54d8f7SBrett Creeley /**
1240891d6d4SKrzysztof Kazimierczak  * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
1250891d6d4SKrzysztof Kazimierczak  * @xdp_ring: XDP Tx ring
1260891d6d4SKrzysztof Kazimierczak  *
1270891d6d4SKrzysztof Kazimierczak  * This function updates the XDP Tx ring tail register.
1280891d6d4SKrzysztof Kazimierczak  */
ice_xdp_ring_update_tail(struct ice_tx_ring * xdp_ring)129e72bba21SMaciej Fijalkowski static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
1300891d6d4SKrzysztof Kazimierczak {
1310891d6d4SKrzysztof Kazimierczak 	/* Force memory writes to complete before letting h/w
1320891d6d4SKrzysztof Kazimierczak 	 * know there are new descriptors to fetch.
1330891d6d4SKrzysztof Kazimierczak 	 */
1340891d6d4SKrzysztof Kazimierczak 	wmb();
1350891d6d4SKrzysztof Kazimierczak 	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
1360891d6d4SKrzysztof Kazimierczak }
1370891d6d4SKrzysztof Kazimierczak 
1383246a107SMaciej Fijalkowski /**
1393246a107SMaciej Fijalkowski  * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
1403246a107SMaciej Fijalkowski  * @xdp_ring: XDP ring to produce the HW Tx descriptors on
1413246a107SMaciej Fijalkowski  *
1423246a107SMaciej Fijalkowski  * returns index of descriptor that had RS bit produced on
1433246a107SMaciej Fijalkowski  */
ice_set_rs_bit(const struct ice_tx_ring * xdp_ring)1443246a107SMaciej Fijalkowski static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
1453246a107SMaciej Fijalkowski {
1463246a107SMaciej Fijalkowski 	u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
1473246a107SMaciej Fijalkowski 	struct ice_tx_desc *tx_desc;
1483246a107SMaciej Fijalkowski 
1493246a107SMaciej Fijalkowski 	tx_desc = ICE_TX_DESC(xdp_ring, rs_idx);
1503246a107SMaciej Fijalkowski 	tx_desc->cmd_type_offset_bsz |=
1513246a107SMaciej Fijalkowski 		cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
1523246a107SMaciej Fijalkowski 
1533246a107SMaciej Fijalkowski 	return rs_idx;
1543246a107SMaciej Fijalkowski }
1553246a107SMaciej Fijalkowski 
1563246a107SMaciej Fijalkowski void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
157e72bba21SMaciej Fijalkowski int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
158055d0920SAlexander Lobakin int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
159055d0920SAlexander Lobakin 			bool frame);
160e72bba21SMaciej Fijalkowski void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
1610891d6d4SKrzysztof Kazimierczak void
162e72bba21SMaciej Fijalkowski ice_process_skb_fields(struct ice_rx_ring *rx_ring,
1630891d6d4SKrzysztof Kazimierczak 		       union ice_32b_rx_flex_desc *rx_desc,
164dda90cb9SJesse Brandeburg 		       struct sk_buff *skb, u16 ptype);
1650891d6d4SKrzysztof Kazimierczak void
166e72bba21SMaciej Fijalkowski ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
1670891d6d4SKrzysztof Kazimierczak #endif /* !_ICE_TXRX_LIB_H_ */
168