xref: /openbmc/linux/drivers/vdpa/mlx5/net/mlx5_vnet.h (revision 7df1f14c)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #ifndef __MLX5_VNET_H__
5 #define __MLX5_VNET_H__
6 
7 #include "mlx5_vdpa.h"
8 
9 #define to_mlx5_vdpa_ndev(__mvdev)                                             \
10 	container_of(__mvdev, struct mlx5_vdpa_net, mvdev)
11 #define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev)
12 
13 struct mlx5_vdpa_net_resources {
14 	u32 tisn;
15 	u32 tdn;
16 	u32 tirn;
17 	u32 rqtn;
18 	bool valid;
19 	struct dentry *tirn_dent;
20 };
21 
22 #define MLX5V_MACVLAN_SIZE 256
23 
24 static inline u16 key2vid(u64 key)
25 {
26 	return (u16)(key >> 48) & 0xfff;
27 }
28 
29 #define MLX5_VDPA_IRQ_NAME_LEN 32
30 
31 struct mlx5_vdpa_irq_pool_entry {
32 	struct msi_map map;
33 	bool used;
34 	char name[MLX5_VDPA_IRQ_NAME_LEN];
35 	void *dev_id;
36 };
37 
38 struct mlx5_vdpa_irq_pool {
39 	int num_ent;
40 	struct mlx5_vdpa_irq_pool_entry *entries;
41 };
42 
43 struct mlx5_vdpa_net {
44 	struct mlx5_vdpa_dev mvdev;
45 	struct mlx5_vdpa_net_resources res;
46 	struct virtio_net_config config;
47 	struct mlx5_vdpa_virtqueue *vqs;
48 	struct vdpa_callback *event_cbs;
49 
50 	/* Serialize vq resources creation and destruction. This is required
51 	 * since memory map might change and we need to destroy and create
52 	 * resources while driver in operational.
53 	 */
54 	struct rw_semaphore reslock;
55 	struct mlx5_flow_table *rxft;
56 	struct dentry *rx_dent;
57 	struct dentry *rx_table_dent;
58 	bool setup;
59 	u32 cur_num_vqs;
60 	u32 rqt_size;
61 	bool nb_registered;
62 	struct notifier_block nb;
63 	struct vdpa_callback config_cb;
64 	struct mlx5_vdpa_wq_ent cvq_ent;
65 	struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
66 	struct mlx5_vdpa_irq_pool irqp;
67 	struct dentry *debugfs;
68 };
69 
70 struct mlx5_vdpa_counter {
71 	struct mlx5_fc *counter;
72 	struct dentry *dent;
73 	struct mlx5_core_dev *mdev;
74 };
75 
76 struct macvlan_node {
77 	struct hlist_node hlist;
78 	struct mlx5_flow_handle *ucast_rule;
79 	struct mlx5_flow_handle *mcast_rule;
80 	u64 macvlan;
81 	struct mlx5_vdpa_net *ndev;
82 	bool tagged;
83 #if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
84 	struct dentry *dent;
85 	struct mlx5_vdpa_counter ucast_counter;
86 	struct mlx5_vdpa_counter mcast_counter;
87 #endif
88 };
89 
90 void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev);
91 void mlx5_vdpa_remove_debugfs(struct dentry *dbg);
92 void mlx5_vdpa_add_rx_flow_table(struct mlx5_vdpa_net *ndev);
93 void mlx5_vdpa_remove_rx_flow_table(struct mlx5_vdpa_net *ndev);
94 void mlx5_vdpa_add_tirn(struct mlx5_vdpa_net *ndev);
95 void mlx5_vdpa_remove_tirn(struct mlx5_vdpa_net *ndev);
96 #if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
97 void mlx5_vdpa_add_rx_counters(struct mlx5_vdpa_net *ndev,
98 			       struct macvlan_node *node);
99 void mlx5_vdpa_remove_rx_counters(struct mlx5_vdpa_net *ndev,
100 				  struct macvlan_node *node);
101 #else
102 static inline void mlx5_vdpa_add_rx_counters(struct mlx5_vdpa_net *ndev,
103 					     struct macvlan_node *node) {}
104 static inline void mlx5_vdpa_remove_rx_counters(struct mlx5_vdpa_net *ndev,
105 						struct macvlan_node *node) {}
106 #endif
107 
108 
109 #endif /* __MLX5_VNET_H__ */
110