1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #ifndef _QED_SRIOV_H 10 #define _QED_SRIOV_H 11 #include <linux/types.h> 12 #include "qed_vf.h" 13 #define QED_VF_ARRAY_LENGTH (3) 14 15 #define IS_VF(cdev) ((cdev)->b_is_vf) 16 #define IS_PF(cdev) (!((cdev)->b_is_vf)) 17 #ifdef CONFIG_QED_SRIOV 18 #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) 19 #else 20 #define IS_PF_SRIOV(p_hwfn) (0) 21 #endif 22 #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) 23 24 #define QED_MAX_VF_CHAINS_PER_PF 16 25 #define QED_ETH_VF_NUM_VLAN_FILTERS 2 26 27 #define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \ 28 (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS) 29 30 enum qed_iov_vport_update_flag { 31 QED_IOV_VP_UPDATE_ACTIVATE, 32 QED_IOV_VP_UPDATE_VLAN_STRIP, 33 QED_IOV_VP_UPDATE_TX_SWITCH, 34 QED_IOV_VP_UPDATE_MCAST, 35 QED_IOV_VP_UPDATE_ACCEPT_PARAM, 36 QED_IOV_VP_UPDATE_RSS, 37 QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN, 38 QED_IOV_VP_UPDATE_SGE_TPA, 39 QED_IOV_VP_UPDATE_MAX, 40 }; 41 42 struct qed_public_vf_info { 43 /* These copies will later be reflected in the bulletin board, 44 * but this copy should be newer. 45 */ 46 u8 forced_mac[ETH_ALEN]; 47 u16 forced_vlan; 48 u8 mac[ETH_ALEN]; 49 50 /* IFLA_VF_LINK_STATE_<X> */ 51 int link_state; 52 53 /* Currently configured Tx rate in MB/sec. 0 if unconfigured */ 54 int tx_rate; 55 }; 56 57 /* This struct is part of qed_dev and contains data relevant to all hwfns; 58 * Initialized only if SR-IOV cpabability is exposed in PCIe config space. 59 */ 60 struct qed_hw_sriov_info { 61 int pos; /* capability position */ 62 int nres; /* number of resources */ 63 u32 cap; /* SR-IOV Capabilities */ 64 u16 ctrl; /* SR-IOV Control */ 65 u16 total_vfs; /* total VFs associated with the PF */ 66 u16 num_vfs; /* number of vfs that have been started */ 67 u16 initial_vfs; /* initial VFs associated with the PF */ 68 u16 nr_virtfn; /* number of VFs available */ 69 u16 offset; /* first VF Routing ID offset */ 70 u16 stride; /* following VF stride */ 71 u16 vf_device_id; /* VF device id */ 72 u32 pgsz; /* page size for BAR alignment */ 73 u8 link; /* Function Dependency Link */ 74 75 u32 first_vf_in_pf; 76 }; 77 78 /* This mailbox is maintained per VF in its PF contains all information 79 * required for sending / receiving a message. 80 */ 81 struct qed_iov_vf_mbx { 82 union vfpf_tlvs *req_virt; 83 dma_addr_t req_phys; 84 union pfvf_tlvs *reply_virt; 85 dma_addr_t reply_phys; 86 87 /* Address in VF where a pending message is located */ 88 dma_addr_t pending_req; 89 90 u8 *offset; 91 92 /* saved VF request header */ 93 struct vfpf_first_tlv first_tlv; 94 }; 95 96 struct qed_vf_q_info { 97 u16 fw_rx_qid; 98 u16 fw_tx_qid; 99 u8 fw_cid; 100 u8 rxq_active; 101 u8 txq_active; 102 }; 103 104 enum vf_state { 105 VF_FREE = 0, /* VF ready to be acquired holds no resc */ 106 VF_ACQUIRED, /* VF, acquired, but not initalized */ 107 VF_ENABLED, /* VF, Enabled */ 108 VF_RESET, /* VF, FLR'd, pending cleanup */ 109 VF_STOPPED /* VF, Stopped */ 110 }; 111 112 struct qed_vf_vlan_shadow { 113 bool used; 114 u16 vid; 115 }; 116 117 struct qed_vf_shadow_config { 118 /* Shadow copy of all guest vlans */ 119 struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1]; 120 121 u8 inner_vlan_removal; 122 }; 123 124 /* PFs maintain an array of this structure, per VF */ 125 struct qed_vf_info { 126 struct qed_iov_vf_mbx vf_mbx; 127 enum vf_state state; 128 bool b_init; 129 u8 to_disable; 130 131 struct qed_bulletin bulletin; 132 dma_addr_t vf_bulletin; 133 134 u32 concrete_fid; 135 u16 opaque_fid; 136 u16 mtu; 137 138 u8 vport_id; 139 u8 relative_vf_id; 140 u8 abs_vf_id; 141 #define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \ 142 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ 143 (p_vf)->abs_vf_id) 144 145 u8 vport_instance; 146 u8 num_rxqs; 147 u8 num_txqs; 148 149 u8 num_sbs; 150 151 u8 num_mac_filters; 152 u8 num_vlan_filters; 153 struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF]; 154 u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; 155 u8 num_active_rxqs; 156 struct qed_public_vf_info p_vf_info; 157 bool spoof_chk; 158 bool req_spoofchk_val; 159 160 /* Stores the configuration requested by VF */ 161 struct qed_vf_shadow_config shadow_config; 162 163 /* A bitfield using bulletin's valid-map bits, used to indicate 164 * which of the bulletin board features have been configured. 165 */ 166 u64 configured_features; 167 #define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \ 168 (1 << VLAN_ADDR_FORCED)) 169 }; 170 171 /* This structure is part of qed_hwfn and used only for PFs that have sriov 172 * capability enabled. 173 */ 174 struct qed_pf_iov { 175 struct qed_vf_info vfs_array[MAX_NUM_VFS]; 176 u64 pending_events[QED_VF_ARRAY_LENGTH]; 177 u64 pending_flr[QED_VF_ARRAY_LENGTH]; 178 179 /* Allocate message address continuosuly and split to each VF */ 180 void *mbx_msg_virt_addr; 181 dma_addr_t mbx_msg_phys_addr; 182 u32 mbx_msg_size; 183 void *mbx_reply_virt_addr; 184 dma_addr_t mbx_reply_phys_addr; 185 u32 mbx_reply_size; 186 void *p_bulletins; 187 dma_addr_t bulletins_phys; 188 u32 bulletins_size; 189 }; 190 191 enum qed_iov_wq_flag { 192 QED_IOV_WQ_MSG_FLAG, 193 QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, 194 QED_IOV_WQ_BULLETIN_UPDATE_FLAG, 195 QED_IOV_WQ_STOP_WQ_FLAG, 196 QED_IOV_WQ_FLR_FLAG, 197 }; 198 199 #ifdef CONFIG_QED_SRIOV 200 /** 201 * @brief - Given a VF index, return index of next [including that] active VF. 202 * 203 * @param p_hwfn 204 * @param rel_vf_id 205 * 206 * @return MAX_NUM_VFS in case no further active VFs, otherwise index. 207 */ 208 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); 209 210 /** 211 * @brief Read sriov related information and allocated resources 212 * reads from configuraiton space, shmem, etc. 213 * 214 * @param p_hwfn 215 * 216 * @return int 217 */ 218 int qed_iov_hw_info(struct qed_hwfn *p_hwfn); 219 220 /** 221 * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset 222 * 223 * @param p_hwfn 224 * @param p_iov 225 * @param type 226 * @param length 227 * 228 * @return pointer to the newly placed tlv 229 */ 230 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); 231 232 /** 233 * @brief list the types and lengths of the tlvs on the buffer 234 * 235 * @param p_hwfn 236 * @param tlvs_list 237 */ 238 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); 239 240 /** 241 * @brief qed_iov_alloc - allocate sriov related resources 242 * 243 * @param p_hwfn 244 * 245 * @return int 246 */ 247 int qed_iov_alloc(struct qed_hwfn *p_hwfn); 248 249 /** 250 * @brief qed_iov_setup - setup sriov related resources 251 * 252 * @param p_hwfn 253 * @param p_ptt 254 */ 255 void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 256 257 /** 258 * @brief qed_iov_free - free sriov related resources 259 * 260 * @param p_hwfn 261 */ 262 void qed_iov_free(struct qed_hwfn *p_hwfn); 263 264 /** 265 * @brief free sriov related memory that was allocated during hw_prepare 266 * 267 * @param cdev 268 */ 269 void qed_iov_free_hw_info(struct qed_dev *cdev); 270 271 /** 272 * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe. 273 * 274 * @param p_hwfn 275 * @param opcode 276 * @param echo 277 * @param data 278 */ 279 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 280 u8 opcode, __le16 echo, union event_ring_data *data); 281 282 /** 283 * @brief Mark structs of vfs that have been FLR-ed. 284 * 285 * @param p_hwfn 286 * @param disabled_vfs - bitmask of all VFs on path that were FLRed 287 * 288 * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise. 289 */ 290 int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); 291 292 /** 293 * @brief Search extended TLVs in request/reply buffer. 294 * 295 * @param p_hwfn 296 * @param p_tlvs_list - Pointer to tlvs list 297 * @param req_type - Type of TLV 298 * 299 * @return pointer to tlv type if found, otherwise returns NULL. 300 */ 301 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, 302 void *p_tlvs_list, u16 req_type); 303 304 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first); 305 int qed_iov_wq_start(struct qed_dev *cdev); 306 307 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag); 308 void qed_vf_start_iov_wq(struct qed_dev *cdev); 309 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled); 310 void qed_inform_vf_link_state(struct qed_hwfn *hwfn); 311 #else 312 static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, 313 u16 rel_vf_id) 314 { 315 return MAX_NUM_VFS; 316 } 317 318 static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn) 319 { 320 return 0; 321 } 322 323 static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn) 324 { 325 return 0; 326 } 327 328 static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 329 { 330 } 331 332 static inline void qed_iov_free(struct qed_hwfn *p_hwfn) 333 { 334 } 335 336 static inline void qed_iov_free_hw_info(struct qed_dev *cdev) 337 { 338 } 339 340 static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 341 u8 opcode, 342 __le16 echo, union event_ring_data *data) 343 { 344 return -EINVAL; 345 } 346 347 static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, 348 u32 *disabled_vfs) 349 { 350 return 0; 351 } 352 353 static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) 354 { 355 } 356 357 static inline int qed_iov_wq_start(struct qed_dev *cdev) 358 { 359 return 0; 360 } 361 362 static inline void qed_schedule_iov(struct qed_hwfn *hwfn, 363 enum qed_iov_wq_flag flag) 364 { 365 } 366 367 static inline void qed_vf_start_iov_wq(struct qed_dev *cdev) 368 { 369 } 370 371 static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) 372 { 373 return 0; 374 } 375 376 static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn) 377 { 378 } 379 #endif 380 381 #define qed_for_each_vf(_p_hwfn, _i) \ 382 for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \ 383 _i < MAX_NUM_VFS; \ 384 _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1)) 385 386 #endif 387