1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef _QED_SRIOV_H 34 #define _QED_SRIOV_H 35 #include <linux/types.h> 36 #include "qed_vf.h" 37 38 #define QED_ETH_VF_NUM_MAC_FILTERS 1 39 #define QED_ETH_VF_NUM_VLAN_FILTERS 2 40 #define QED_VF_ARRAY_LENGTH (3) 41 42 #ifdef CONFIG_QED_SRIOV 43 #define IS_VF(cdev) ((cdev)->b_is_vf) 44 #define IS_PF(cdev) (!((cdev)->b_is_vf)) 45 #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) 46 #else 47 #define IS_VF(cdev) (0) 48 #define IS_PF(cdev) (1) 49 #define IS_PF_SRIOV(p_hwfn) (0) 50 #endif 51 #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) 52 53 #define QED_MAX_VF_CHAINS_PER_PF 16 54 55 #define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \ 56 (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS) 57 58 enum qed_iov_vport_update_flag { 59 QED_IOV_VP_UPDATE_ACTIVATE, 60 QED_IOV_VP_UPDATE_VLAN_STRIP, 61 QED_IOV_VP_UPDATE_TX_SWITCH, 62 QED_IOV_VP_UPDATE_MCAST, 63 QED_IOV_VP_UPDATE_ACCEPT_PARAM, 64 QED_IOV_VP_UPDATE_RSS, 65 QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN, 66 QED_IOV_VP_UPDATE_SGE_TPA, 67 QED_IOV_VP_UPDATE_MAX, 68 }; 69 70 struct qed_public_vf_info { 71 /* These copies will later be reflected in the bulletin board, 72 * but this copy should be newer. 73 */ 74 u8 forced_mac[ETH_ALEN]; 75 u16 forced_vlan; 76 u8 mac[ETH_ALEN]; 77 78 /* IFLA_VF_LINK_STATE_<X> */ 79 int link_state; 80 81 /* Currently configured Tx rate in MB/sec. 0 if unconfigured */ 82 int tx_rate; 83 84 /* Trusted VFs can configure promiscuous mode. 85 * Also store shadow promisc configuration if needed. 86 */ 87 bool is_trusted_configured; 88 bool is_trusted_request; 89 u8 rx_accept_mode; 90 u8 tx_accept_mode; 91 }; 92 93 struct qed_iov_vf_init_params { 94 u16 rel_vf_id; 95 96 /* Number of requested Queues; Currently, don't support different 97 * number of Rx/Tx queues. 98 */ 99 100 u16 num_queues; 101 102 /* Allow the client to choose which qzones to use for Rx/Tx, 103 * and which queue_base to use for Tx queues on a per-queue basis. 104 * Notice values should be relative to the PF resources. 105 */ 106 u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF]; 107 u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF]; 108 }; 109 110 /* This struct is part of qed_dev and contains data relevant to all hwfns; 111 * Initialized only if SR-IOV cpabability is exposed in PCIe config space. 112 */ 113 struct qed_hw_sriov_info { 114 int pos; /* capability position */ 115 int nres; /* number of resources */ 116 u32 cap; /* SR-IOV Capabilities */ 117 u16 ctrl; /* SR-IOV Control */ 118 u16 total_vfs; /* total VFs associated with the PF */ 119 u16 num_vfs; /* number of vfs that have been started */ 120 u16 initial_vfs; /* initial VFs associated with the PF */ 121 u16 nr_virtfn; /* number of VFs available */ 122 u16 offset; /* first VF Routing ID offset */ 123 u16 stride; /* following VF stride */ 124 u16 vf_device_id; /* VF device id */ 125 u32 pgsz; /* page size for BAR alignment */ 126 u8 link; /* Function Dependency Link */ 127 128 u32 first_vf_in_pf; 129 }; 130 131 /* This mailbox is maintained per VF in its PF contains all information 132 * required for sending / receiving a message. 133 */ 134 struct qed_iov_vf_mbx { 135 union vfpf_tlvs *req_virt; 136 dma_addr_t req_phys; 137 union pfvf_tlvs *reply_virt; 138 dma_addr_t reply_phys; 139 140 /* Address in VF where a pending message is located */ 141 dma_addr_t pending_req; 142 143 /* Message from VF awaits handling */ 144 bool b_pending_msg; 145 146 u8 *offset; 147 148 /* saved VF request header */ 149 struct vfpf_first_tlv first_tlv; 150 }; 151 152 #define QED_IOV_LEGACY_QID_RX (0) 153 #define QED_IOV_LEGACY_QID_TX (1) 154 #define QED_IOV_QID_INVALID (0xFE) 155 156 struct qed_vf_queue_cid { 157 bool b_is_tx; 158 struct qed_queue_cid *p_cid; 159 }; 160 161 /* Describes a qzone associated with the VF */ 162 struct qed_vf_queue { 163 u16 fw_rx_qid; 164 u16 fw_tx_qid; 165 166 struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE]; 167 }; 168 169 enum vf_state { 170 VF_FREE = 0, /* VF ready to be acquired holds no resc */ 171 VF_ACQUIRED, /* VF, acquired, but not initalized */ 172 VF_ENABLED, /* VF, Enabled */ 173 VF_RESET, /* VF, FLR'd, pending cleanup */ 174 VF_STOPPED /* VF, Stopped */ 175 }; 176 177 struct qed_vf_vlan_shadow { 178 bool used; 179 u16 vid; 180 }; 181 182 struct qed_vf_shadow_config { 183 /* Shadow copy of all guest vlans */ 184 struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1]; 185 186 /* Shadow copy of all configured MACs; Empty if forcing MACs */ 187 u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN]; 188 u8 inner_vlan_removal; 189 }; 190 191 /* PFs maintain an array of this structure, per VF */ 192 struct qed_vf_info { 193 struct qed_iov_vf_mbx vf_mbx; 194 enum vf_state state; 195 bool b_init; 196 bool b_malicious; 197 u8 to_disable; 198 199 struct qed_bulletin bulletin; 200 dma_addr_t vf_bulletin; 201 202 /* PF saves a copy of the last VF acquire message */ 203 struct vfpf_acquire_tlv acquire; 204 205 u32 concrete_fid; 206 u16 opaque_fid; 207 u16 mtu; 208 209 u8 vport_id; 210 u8 relative_vf_id; 211 u8 abs_vf_id; 212 #define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \ 213 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ 214 (p_vf)->abs_vf_id) 215 216 u8 vport_instance; 217 u8 num_rxqs; 218 u8 num_txqs; 219 220 u16 rx_coal; 221 u16 tx_coal; 222 223 u8 num_sbs; 224 225 u8 num_mac_filters; 226 u8 num_vlan_filters; 227 228 struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF]; 229 u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; 230 u8 num_active_rxqs; 231 struct qed_public_vf_info p_vf_info; 232 bool spoof_chk; 233 bool req_spoofchk_val; 234 235 /* Stores the configuration requested by VF */ 236 struct qed_vf_shadow_config shadow_config; 237 238 /* A bitfield using bulletin's valid-map bits, used to indicate 239 * which of the bulletin board features have been configured. 240 */ 241 u64 configured_features; 242 #define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \ 243 (1 << VLAN_ADDR_FORCED)) 244 }; 245 246 /* This structure is part of qed_hwfn and used only for PFs that have sriov 247 * capability enabled. 248 */ 249 struct qed_pf_iov { 250 struct qed_vf_info vfs_array[MAX_NUM_VFS]; 251 u64 pending_flr[QED_VF_ARRAY_LENGTH]; 252 253 /* Allocate message address continuosuly and split to each VF */ 254 void *mbx_msg_virt_addr; 255 dma_addr_t mbx_msg_phys_addr; 256 u32 mbx_msg_size; 257 void *mbx_reply_virt_addr; 258 dma_addr_t mbx_reply_phys_addr; 259 u32 mbx_reply_size; 260 void *p_bulletins; 261 dma_addr_t bulletins_phys; 262 u32 bulletins_size; 263 }; 264 265 enum qed_iov_wq_flag { 266 QED_IOV_WQ_MSG_FLAG, 267 QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, 268 QED_IOV_WQ_BULLETIN_UPDATE_FLAG, 269 QED_IOV_WQ_STOP_WQ_FLAG, 270 QED_IOV_WQ_FLR_FLAG, 271 QED_IOV_WQ_TRUST_FLAG, 272 QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, 273 }; 274 275 #ifdef CONFIG_QED_SRIOV 276 /** 277 * @brief Check if given VF ID @vfid is valid 278 * w.r.t. @b_enabled_only value 279 * if b_enabled_only = true - only enabled VF id is valid 280 * else any VF id less than max_vfs is valid 281 * 282 * @param p_hwfn 283 * @param rel_vf_id - Relative VF ID 284 * @param b_enabled_only - consider only enabled VF 285 * @param b_non_malicious - true iff we want to validate vf isn't malicious. 286 * 287 * @return bool - true for valid VF ID 288 */ 289 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, 290 int rel_vf_id, 291 bool b_enabled_only, bool b_non_malicious); 292 293 /** 294 * @brief - Given a VF index, return index of next [including that] active VF. 295 * 296 * @param p_hwfn 297 * @param rel_vf_id 298 * 299 * @return MAX_NUM_VFS in case no further active VFs, otherwise index. 300 */ 301 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); 302 303 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, 304 int vfid, u16 vxlan_port, u16 geneve_port); 305 306 /** 307 * @brief Read sriov related information and allocated resources 308 * reads from configuraiton space, shmem, etc. 309 * 310 * @param p_hwfn 311 * 312 * @return int 313 */ 314 int qed_iov_hw_info(struct qed_hwfn *p_hwfn); 315 316 /** 317 * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset 318 * 319 * @param p_hwfn 320 * @param p_iov 321 * @param type 322 * @param length 323 * 324 * @return pointer to the newly placed tlv 325 */ 326 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); 327 328 /** 329 * @brief list the types and lengths of the tlvs on the buffer 330 * 331 * @param p_hwfn 332 * @param tlvs_list 333 */ 334 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); 335 336 /** 337 * @brief qed_iov_alloc - allocate sriov related resources 338 * 339 * @param p_hwfn 340 * 341 * @return int 342 */ 343 int qed_iov_alloc(struct qed_hwfn *p_hwfn); 344 345 /** 346 * @brief qed_iov_setup - setup sriov related resources 347 * 348 * @param p_hwfn 349 */ 350 void qed_iov_setup(struct qed_hwfn *p_hwfn); 351 352 /** 353 * @brief qed_iov_free - free sriov related resources 354 * 355 * @param p_hwfn 356 */ 357 void qed_iov_free(struct qed_hwfn *p_hwfn); 358 359 /** 360 * @brief free sriov related memory that was allocated during hw_prepare 361 * 362 * @param cdev 363 */ 364 void qed_iov_free_hw_info(struct qed_dev *cdev); 365 366 /** 367 * @brief Mark structs of vfs that have been FLR-ed. 368 * 369 * @param p_hwfn 370 * @param disabled_vfs - bitmask of all VFs on path that were FLRed 371 * 372 * @return true iff one of the PF's vfs got FLRed. false otherwise. 373 */ 374 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); 375 376 /** 377 * @brief Search extended TLVs in request/reply buffer. 378 * 379 * @param p_hwfn 380 * @param p_tlvs_list - Pointer to tlvs list 381 * @param req_type - Type of TLV 382 * 383 * @return pointer to tlv type if found, otherwise returns NULL. 384 */ 385 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, 386 void *p_tlvs_list, u16 req_type); 387 388 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first); 389 int qed_iov_wq_start(struct qed_dev *cdev); 390 391 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag); 392 void qed_vf_start_iov_wq(struct qed_dev *cdev); 393 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled); 394 void qed_inform_vf_link_state(struct qed_hwfn *hwfn); 395 #else 396 static inline bool 397 qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, 398 int rel_vf_id, bool b_enabled_only, bool b_non_malicious) 399 { 400 return false; 401 } 402 403 static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, 404 u16 rel_vf_id) 405 { 406 return MAX_NUM_VFS; 407 } 408 409 static inline void 410 qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid, 411 u16 vxlan_port, u16 geneve_port) 412 { 413 } 414 415 static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn) 416 { 417 return 0; 418 } 419 420 static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn) 421 { 422 return 0; 423 } 424 425 static inline void qed_iov_setup(struct qed_hwfn *p_hwfn) 426 { 427 } 428 429 static inline void qed_iov_free(struct qed_hwfn *p_hwfn) 430 { 431 } 432 433 static inline void qed_iov_free_hw_info(struct qed_dev *cdev) 434 { 435 } 436 437 static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, 438 u32 *disabled_vfs) 439 { 440 return false; 441 } 442 443 static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) 444 { 445 } 446 447 static inline int qed_iov_wq_start(struct qed_dev *cdev) 448 { 449 return 0; 450 } 451 452 static inline void qed_schedule_iov(struct qed_hwfn *hwfn, 453 enum qed_iov_wq_flag flag) 454 { 455 } 456 457 static inline void qed_vf_start_iov_wq(struct qed_dev *cdev) 458 { 459 } 460 461 static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) 462 { 463 return 0; 464 } 465 466 static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn) 467 { 468 } 469 #endif 470 471 #define qed_for_each_vf(_p_hwfn, _i) \ 472 for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \ 473 _i < MAX_NUM_VFS; \ 474 _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1)) 475 476 #endif 477