1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #ifndef _QED_SRIOV_H 10 #define _QED_SRIOV_H 11 #include <linux/types.h> 12 #include "qed_vf.h" 13 14 #define QED_ETH_VF_NUM_MAC_FILTERS 1 15 #define QED_ETH_VF_NUM_VLAN_FILTERS 2 16 #define QED_VF_ARRAY_LENGTH (3) 17 18 #ifdef CONFIG_QED_SRIOV 19 #define IS_VF(cdev) ((cdev)->b_is_vf) 20 #define IS_PF(cdev) (!((cdev)->b_is_vf)) 21 #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) 22 #else 23 #define IS_VF(cdev) (0) 24 #define IS_PF(cdev) (1) 25 #define IS_PF_SRIOV(p_hwfn) (0) 26 #endif 27 #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) 28 29 #define QED_MAX_VF_CHAINS_PER_PF 16 30 31 #define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \ 32 (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS) 33 34 enum qed_iov_vport_update_flag { 35 QED_IOV_VP_UPDATE_ACTIVATE, 36 QED_IOV_VP_UPDATE_VLAN_STRIP, 37 QED_IOV_VP_UPDATE_TX_SWITCH, 38 QED_IOV_VP_UPDATE_MCAST, 39 QED_IOV_VP_UPDATE_ACCEPT_PARAM, 40 QED_IOV_VP_UPDATE_RSS, 41 QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN, 42 QED_IOV_VP_UPDATE_SGE_TPA, 43 QED_IOV_VP_UPDATE_MAX, 44 }; 45 46 struct qed_public_vf_info { 47 /* These copies will later be reflected in the bulletin board, 48 * but this copy should be newer. 49 */ 50 u8 forced_mac[ETH_ALEN]; 51 u16 forced_vlan; 52 u8 mac[ETH_ALEN]; 53 54 /* IFLA_VF_LINK_STATE_<X> */ 55 int link_state; 56 57 /* Currently configured Tx rate in MB/sec. 0 if unconfigured */ 58 int tx_rate; 59 }; 60 61 /* This struct is part of qed_dev and contains data relevant to all hwfns; 62 * Initialized only if SR-IOV cpabability is exposed in PCIe config space. 63 */ 64 struct qed_hw_sriov_info { 65 int pos; /* capability position */ 66 int nres; /* number of resources */ 67 u32 cap; /* SR-IOV Capabilities */ 68 u16 ctrl; /* SR-IOV Control */ 69 u16 total_vfs; /* total VFs associated with the PF */ 70 u16 num_vfs; /* number of vfs that have been started */ 71 u16 initial_vfs; /* initial VFs associated with the PF */ 72 u16 nr_virtfn; /* number of VFs available */ 73 u16 offset; /* first VF Routing ID offset */ 74 u16 stride; /* following VF stride */ 75 u16 vf_device_id; /* VF device id */ 76 u32 pgsz; /* page size for BAR alignment */ 77 u8 link; /* Function Dependency Link */ 78 79 u32 first_vf_in_pf; 80 }; 81 82 /* This mailbox is maintained per VF in its PF contains all information 83 * required for sending / receiving a message. 84 */ 85 struct qed_iov_vf_mbx { 86 union vfpf_tlvs *req_virt; 87 dma_addr_t req_phys; 88 union pfvf_tlvs *reply_virt; 89 dma_addr_t reply_phys; 90 91 /* Address in VF where a pending message is located */ 92 dma_addr_t pending_req; 93 94 u8 *offset; 95 96 /* saved VF request header */ 97 struct vfpf_first_tlv first_tlv; 98 }; 99 100 struct qed_vf_q_info { 101 u16 fw_rx_qid; 102 u16 fw_tx_qid; 103 u8 fw_cid; 104 u8 rxq_active; 105 u8 txq_active; 106 }; 107 108 enum vf_state { 109 VF_FREE = 0, /* VF ready to be acquired holds no resc */ 110 VF_ACQUIRED, /* VF, acquired, but not initalized */ 111 VF_ENABLED, /* VF, Enabled */ 112 VF_RESET, /* VF, FLR'd, pending cleanup */ 113 VF_STOPPED /* VF, Stopped */ 114 }; 115 116 struct qed_vf_vlan_shadow { 117 bool used; 118 u16 vid; 119 }; 120 121 struct qed_vf_shadow_config { 122 /* Shadow copy of all guest vlans */ 123 struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1]; 124 125 /* Shadow copy of all configured MACs; Empty if forcing MACs */ 126 u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN]; 127 u8 inner_vlan_removal; 128 }; 129 130 /* PFs maintain an array of this structure, per VF */ 131 struct qed_vf_info { 132 struct qed_iov_vf_mbx vf_mbx; 133 enum vf_state state; 134 bool b_init; 135 u8 to_disable; 136 137 struct qed_bulletin bulletin; 138 dma_addr_t vf_bulletin; 139 140 /* PF saves a copy of the last VF acquire message */ 141 struct vfpf_acquire_tlv acquire; 142 143 u32 concrete_fid; 144 u16 opaque_fid; 145 u16 mtu; 146 147 u8 vport_id; 148 u8 relative_vf_id; 149 u8 abs_vf_id; 150 #define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \ 151 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ 152 (p_vf)->abs_vf_id) 153 154 u8 vport_instance; 155 u8 num_rxqs; 156 u8 num_txqs; 157 158 u8 num_sbs; 159 160 u8 num_mac_filters; 161 u8 num_vlan_filters; 162 struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF]; 163 u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; 164 u8 num_active_rxqs; 165 struct qed_public_vf_info p_vf_info; 166 bool spoof_chk; 167 bool req_spoofchk_val; 168 169 /* Stores the configuration requested by VF */ 170 struct qed_vf_shadow_config shadow_config; 171 172 /* A bitfield using bulletin's valid-map bits, used to indicate 173 * which of the bulletin board features have been configured. 174 */ 175 u64 configured_features; 176 #define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \ 177 (1 << VLAN_ADDR_FORCED)) 178 }; 179 180 /* This structure is part of qed_hwfn and used only for PFs that have sriov 181 * capability enabled. 182 */ 183 struct qed_pf_iov { 184 struct qed_vf_info vfs_array[MAX_NUM_VFS]; 185 u64 pending_events[QED_VF_ARRAY_LENGTH]; 186 u64 pending_flr[QED_VF_ARRAY_LENGTH]; 187 188 /* Allocate message address continuosuly and split to each VF */ 189 void *mbx_msg_virt_addr; 190 dma_addr_t mbx_msg_phys_addr; 191 u32 mbx_msg_size; 192 void *mbx_reply_virt_addr; 193 dma_addr_t mbx_reply_phys_addr; 194 u32 mbx_reply_size; 195 void *p_bulletins; 196 dma_addr_t bulletins_phys; 197 u32 bulletins_size; 198 }; 199 200 enum qed_iov_wq_flag { 201 QED_IOV_WQ_MSG_FLAG, 202 QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, 203 QED_IOV_WQ_BULLETIN_UPDATE_FLAG, 204 QED_IOV_WQ_STOP_WQ_FLAG, 205 QED_IOV_WQ_FLR_FLAG, 206 }; 207 208 #ifdef CONFIG_QED_SRIOV 209 /** 210 * @brief - Given a VF index, return index of next [including that] active VF. 211 * 212 * @param p_hwfn 213 * @param rel_vf_id 214 * 215 * @return MAX_NUM_VFS in case no further active VFs, otherwise index. 216 */ 217 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); 218 219 /** 220 * @brief Read sriov related information and allocated resources 221 * reads from configuraiton space, shmem, etc. 222 * 223 * @param p_hwfn 224 * 225 * @return int 226 */ 227 int qed_iov_hw_info(struct qed_hwfn *p_hwfn); 228 229 /** 230 * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset 231 * 232 * @param p_hwfn 233 * @param p_iov 234 * @param type 235 * @param length 236 * 237 * @return pointer to the newly placed tlv 238 */ 239 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); 240 241 /** 242 * @brief list the types and lengths of the tlvs on the buffer 243 * 244 * @param p_hwfn 245 * @param tlvs_list 246 */ 247 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); 248 249 /** 250 * @brief qed_iov_alloc - allocate sriov related resources 251 * 252 * @param p_hwfn 253 * 254 * @return int 255 */ 256 int qed_iov_alloc(struct qed_hwfn *p_hwfn); 257 258 /** 259 * @brief qed_iov_setup - setup sriov related resources 260 * 261 * @param p_hwfn 262 * @param p_ptt 263 */ 264 void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 265 266 /** 267 * @brief qed_iov_free - free sriov related resources 268 * 269 * @param p_hwfn 270 */ 271 void qed_iov_free(struct qed_hwfn *p_hwfn); 272 273 /** 274 * @brief free sriov related memory that was allocated during hw_prepare 275 * 276 * @param cdev 277 */ 278 void qed_iov_free_hw_info(struct qed_dev *cdev); 279 280 /** 281 * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe. 282 * 283 * @param p_hwfn 284 * @param opcode 285 * @param echo 286 * @param data 287 */ 288 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 289 u8 opcode, __le16 echo, union event_ring_data *data); 290 291 /** 292 * @brief Mark structs of vfs that have been FLR-ed. 293 * 294 * @param p_hwfn 295 * @param disabled_vfs - bitmask of all VFs on path that were FLRed 296 * 297 * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise. 298 */ 299 int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); 300 301 /** 302 * @brief Search extended TLVs in request/reply buffer. 303 * 304 * @param p_hwfn 305 * @param p_tlvs_list - Pointer to tlvs list 306 * @param req_type - Type of TLV 307 * 308 * @return pointer to tlv type if found, otherwise returns NULL. 309 */ 310 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, 311 void *p_tlvs_list, u16 req_type); 312 313 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first); 314 int qed_iov_wq_start(struct qed_dev *cdev); 315 316 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag); 317 void qed_vf_start_iov_wq(struct qed_dev *cdev); 318 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled); 319 void qed_inform_vf_link_state(struct qed_hwfn *hwfn); 320 #else 321 static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, 322 u16 rel_vf_id) 323 { 324 return MAX_NUM_VFS; 325 } 326 327 static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn) 328 { 329 return 0; 330 } 331 332 static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn) 333 { 334 return 0; 335 } 336 337 static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 338 { 339 } 340 341 static inline void qed_iov_free(struct qed_hwfn *p_hwfn) 342 { 343 } 344 345 static inline void qed_iov_free_hw_info(struct qed_dev *cdev) 346 { 347 } 348 349 static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 350 u8 opcode, 351 __le16 echo, union event_ring_data *data) 352 { 353 return -EINVAL; 354 } 355 356 static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, 357 u32 *disabled_vfs) 358 { 359 return 0; 360 } 361 362 static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) 363 { 364 } 365 366 static inline int qed_iov_wq_start(struct qed_dev *cdev) 367 { 368 return 0; 369 } 370 371 static inline void qed_schedule_iov(struct qed_hwfn *hwfn, 372 enum qed_iov_wq_flag flag) 373 { 374 } 375 376 static inline void qed_vf_start_iov_wq(struct qed_dev *cdev) 377 { 378 } 379 380 static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) 381 { 382 return 0; 383 } 384 385 static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn) 386 { 387 } 388 #endif 389 390 #define qed_for_each_vf(_p_hwfn, _i) \ 391 for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \ 392 _i < MAX_NUM_VFS; \ 393 _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1)) 394 395 #endif 396