13ea9bd5dSMichal Swiatkowski /* SPDX-License-Identifier: GPL-2.0 */
23ea9bd5dSMichal Swiatkowski /* Copyright (C) 2019-2021, Intel Corporation. */
33ea9bd5dSMichal Swiatkowski 
43ea9bd5dSMichal Swiatkowski #ifndef _ICE_ESWITCH_H_
53ea9bd5dSMichal Swiatkowski #define _ICE_ESWITCH_H_
63ea9bd5dSMichal Swiatkowski 
73ea9bd5dSMichal Swiatkowski #include <net/devlink.h>
83ea9bd5dSMichal Swiatkowski 
93ea9bd5dSMichal Swiatkowski #ifdef CONFIG_ICE_SWITCHDEV
101a1c40dfSGrzegorz Nitka void ice_eswitch_release(struct ice_pf *pf);
111a1c40dfSGrzegorz Nitka int ice_eswitch_configure(struct ice_pf *pf);
12b3be918dSGrzegorz Nitka int ice_eswitch_rebuild(struct ice_pf *pf);
131a1c40dfSGrzegorz Nitka 
143ea9bd5dSMichal Swiatkowski int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
153ea9bd5dSMichal Swiatkowski int
163ea9bd5dSMichal Swiatkowski ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
173ea9bd5dSMichal Swiatkowski 		     struct netlink_ext_ack *extack);
183ea9bd5dSMichal Swiatkowski bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
191c54c839SGrzegorz Nitka 
201c54c839SGrzegorz Nitka void ice_eswitch_update_repr(struct ice_vsi *vsi);
21b3be918dSGrzegorz Nitka 
22b3be918dSGrzegorz Nitka void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
23f5396b8aSGrzegorz Nitka 
24f5396b8aSGrzegorz Nitka void ice_eswitch_set_target_vsi(struct sk_buff *skb,
25f5396b8aSGrzegorz Nitka 				struct ice_tx_offload_params *off);
26f5396b8aSGrzegorz Nitka netdev_tx_t
27f5396b8aSGrzegorz Nitka ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
283ea9bd5dSMichal Swiatkowski #else /* CONFIG_ICE_SWITCHDEV */
ice_eswitch_release(struct ice_pf * pf)291a1c40dfSGrzegorz Nitka static inline void ice_eswitch_release(struct ice_pf *pf) { }
301a1c40dfSGrzegorz Nitka 
ice_eswitch_stop_all_tx_queues(struct ice_pf * pf)31b3be918dSGrzegorz Nitka static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
32b3be918dSGrzegorz Nitka 
33f5396b8aSGrzegorz Nitka static inline void
ice_eswitch_set_target_vsi(struct sk_buff * skb,struct ice_tx_offload_params * off)34f5396b8aSGrzegorz Nitka ice_eswitch_set_target_vsi(struct sk_buff *skb,
35f5396b8aSGrzegorz Nitka 			   struct ice_tx_offload_params *off) { }
36f5396b8aSGrzegorz Nitka 
ice_eswitch_update_repr(struct ice_vsi * vsi)371c54c839SGrzegorz Nitka static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
381c54c839SGrzegorz Nitka 
ice_eswitch_configure(struct ice_pf * pf)391a1c40dfSGrzegorz Nitka static inline int ice_eswitch_configure(struct ice_pf *pf)
401a1c40dfSGrzegorz Nitka {
41*aacca7a8SMaciej Fijalkowski 	return 0;
421a1c40dfSGrzegorz Nitka }
431a1c40dfSGrzegorz Nitka 
ice_eswitch_rebuild(struct ice_pf * pf)44b3be918dSGrzegorz Nitka static inline int ice_eswitch_rebuild(struct ice_pf *pf)
45b3be918dSGrzegorz Nitka {
46b3be918dSGrzegorz Nitka 	return -EOPNOTSUPP;
47b3be918dSGrzegorz Nitka }
48b3be918dSGrzegorz Nitka 
ice_eswitch_mode_get(struct devlink * devlink,u16 * mode)491a1c40dfSGrzegorz Nitka static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
503ea9bd5dSMichal Swiatkowski {
513ea9bd5dSMichal Swiatkowski 	return DEVLINK_ESWITCH_MODE_LEGACY;
523ea9bd5dSMichal Swiatkowski }
533ea9bd5dSMichal Swiatkowski 
543ea9bd5dSMichal Swiatkowski static inline int
ice_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)553ea9bd5dSMichal Swiatkowski ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
563ea9bd5dSMichal Swiatkowski 		     struct netlink_ext_ack *extack)
573ea9bd5dSMichal Swiatkowski {
583ea9bd5dSMichal Swiatkowski 	return -EOPNOTSUPP;
593ea9bd5dSMichal Swiatkowski }
603ea9bd5dSMichal Swiatkowski 
ice_is_eswitch_mode_switchdev(struct ice_pf * pf)611a1c40dfSGrzegorz Nitka static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
623ea9bd5dSMichal Swiatkowski {
633ea9bd5dSMichal Swiatkowski 	return false;
643ea9bd5dSMichal Swiatkowski }
65f5396b8aSGrzegorz Nitka 
66f5396b8aSGrzegorz Nitka static inline netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff * skb,struct net_device * netdev)67f5396b8aSGrzegorz Nitka ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
68f5396b8aSGrzegorz Nitka {
69f5396b8aSGrzegorz Nitka 	return NETDEV_TX_BUSY;
70f5396b8aSGrzegorz Nitka }
713ea9bd5dSMichal Swiatkowski #endif /* CONFIG_ICE_SWITCHDEV */
723ea9bd5dSMichal Swiatkowski #endif /* _ICE_ESWITCH_H_ */
73