1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _MANA_H 5 #define _MANA_H 6 7 #include <net/xdp.h> 8 9 #include "gdma.h" 10 #include "hw_channel.h" 11 12 /* Microsoft Azure Network Adapter (MANA)'s definitions 13 * 14 * Structures labeled with "HW DATA" are exchanged with the hardware. All of 15 * them are naturally aligned and hence don't need __packed. 16 */ 17 18 /* MANA protocol version */ 19 #define MANA_MAJOR_VERSION 0 20 #define MANA_MINOR_VERSION 1 21 #define MANA_MICRO_VERSION 1 22 23 typedef u64 mana_handle_t; 24 #define INVALID_MANA_HANDLE ((mana_handle_t)-1) 25 26 enum TRI_STATE { 27 TRI_STATE_UNKNOWN = -1, 28 TRI_STATE_FALSE = 0, 29 TRI_STATE_TRUE = 1 30 }; 31 32 /* Number of entries for hardware indirection table must be in power of 2 */ 33 #define MANA_INDIRECT_TABLE_SIZE 64 34 #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1) 35 36 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */ 37 #define MANA_HASH_KEY_SIZE 40 38 39 #define COMP_ENTRY_SIZE 64 40 41 #define RX_BUFFERS_PER_QUEUE 512 42 #define MANA_RX_DATA_ALIGN 64 43 44 #define MAX_SEND_BUFFERS_PER_QUEUE 256 45 46 #define EQ_SIZE (8 * PAGE_SIZE) 47 #define LOG2_EQ_THROTTLE 3 48 49 #define MAX_PORTS_IN_MANA_DEV 256 50 51 /* Update this count whenever the respective structures are changed */ 52 #define MANA_STATS_RX_COUNT 5 53 #define MANA_STATS_TX_COUNT 11 54 55 struct mana_stats_rx { 56 u64 packets; 57 u64 bytes; 58 u64 xdp_drop; 59 u64 xdp_tx; 60 u64 xdp_redirect; 61 struct u64_stats_sync syncp; 62 }; 63 64 struct mana_stats_tx { 65 u64 packets; 66 u64 bytes; 67 u64 xdp_xmit; 68 u64 tso_packets; 69 u64 tso_bytes; 70 u64 tso_inner_packets; 71 u64 tso_inner_bytes; 72 u64 short_pkt_fmt; 73 u64 long_pkt_fmt; 74 u64 csum_partial; 75 u64 mana_map_err; 76 struct u64_stats_sync syncp; 77 }; 78 79 struct mana_txq { 80 struct gdma_queue *gdma_sq; 81 82 union { 83 u32 gdma_txq_id; 84 struct { 85 u32 reserved1 : 10; 86 u32 vsq_frame : 14; 87 u32 reserved2 : 8; 88 }; 89 }; 90 91 u16 vp_offset; 92 93 struct net_device *ndev; 94 95 /* The SKBs are sent to the HW and we are waiting for the CQEs. */ 96 struct sk_buff_head pending_skbs; 97 struct netdev_queue *net_txq; 98 99 atomic_t pending_sends; 100 101 struct mana_stats_tx stats; 102 }; 103 104 /* skb data and frags dma mappings */ 105 struct mana_skb_head { 106 dma_addr_t dma_handle[MAX_SKB_FRAGS + 1]; 107 108 u32 size[MAX_SKB_FRAGS + 1]; 109 }; 110 111 #define MANA_HEADROOM sizeof(struct mana_skb_head) 112 113 enum mana_tx_pkt_format { 114 MANA_SHORT_PKT_FMT = 0, 115 MANA_LONG_PKT_FMT = 1, 116 }; 117 118 struct mana_tx_short_oob { 119 u32 pkt_fmt : 2; 120 u32 is_outer_ipv4 : 1; 121 u32 is_outer_ipv6 : 1; 122 u32 comp_iphdr_csum : 1; 123 u32 comp_tcp_csum : 1; 124 u32 comp_udp_csum : 1; 125 u32 supress_txcqe_gen : 1; 126 u32 vcq_num : 24; 127 128 u32 trans_off : 10; /* Transport header offset */ 129 u32 vsq_frame : 14; 130 u32 short_vp_offset : 8; 131 }; /* HW DATA */ 132 133 struct mana_tx_long_oob { 134 u32 is_encap : 1; 135 u32 inner_is_ipv6 : 1; 136 u32 inner_tcp_opt : 1; 137 u32 inject_vlan_pri_tag : 1; 138 u32 reserved1 : 12; 139 u32 pcp : 3; /* 802.1Q */ 140 u32 dei : 1; /* 802.1Q */ 141 u32 vlan_id : 12; /* 802.1Q */ 142 143 u32 inner_frame_offset : 10; 144 u32 inner_ip_rel_offset : 6; 145 u32 long_vp_offset : 12; 146 u32 reserved2 : 4; 147 148 u32 reserved3; 149 u32 reserved4; 150 }; /* HW DATA */ 151 152 struct mana_tx_oob { 153 struct mana_tx_short_oob s_oob; 154 struct mana_tx_long_oob l_oob; 155 }; /* HW DATA */ 156 157 enum mana_cq_type { 158 MANA_CQ_TYPE_RX, 159 MANA_CQ_TYPE_TX, 160 }; 161 162 enum mana_cqe_type { 163 CQE_INVALID = 0, 164 CQE_RX_OKAY = 1, 165 CQE_RX_COALESCED_4 = 2, 166 CQE_RX_OBJECT_FENCE = 3, 167 CQE_RX_TRUNCATED = 4, 168 169 CQE_TX_OKAY = 32, 170 CQE_TX_SA_DROP = 33, 171 CQE_TX_MTU_DROP = 34, 172 CQE_TX_INVALID_OOB = 35, 173 CQE_TX_INVALID_ETH_TYPE = 36, 174 CQE_TX_HDR_PROCESSING_ERROR = 37, 175 CQE_TX_VF_DISABLED = 38, 176 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, 177 CQE_TX_VPORT_DISABLED = 40, 178 CQE_TX_VLAN_TAGGING_VIOLATION = 41, 179 }; 180 181 #define MANA_CQE_COMPLETION 1 182 183 struct mana_cqe_header { 184 u32 cqe_type : 6; 185 u32 client_type : 2; 186 u32 vendor_err : 24; 187 }; /* HW DATA */ 188 189 /* NDIS HASH Types */ 190 #define NDIS_HASH_IPV4 BIT(0) 191 #define NDIS_HASH_TCP_IPV4 BIT(1) 192 #define NDIS_HASH_UDP_IPV4 BIT(2) 193 #define NDIS_HASH_IPV6 BIT(3) 194 #define NDIS_HASH_TCP_IPV6 BIT(4) 195 #define NDIS_HASH_UDP_IPV6 BIT(5) 196 #define NDIS_HASH_IPV6_EX BIT(6) 197 #define NDIS_HASH_TCP_IPV6_EX BIT(7) 198 #define NDIS_HASH_UDP_IPV6_EX BIT(8) 199 200 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) 201 #define MANA_HASH_L4 \ 202 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ 203 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) 204 205 struct mana_rxcomp_perpkt_info { 206 u32 pkt_len : 16; 207 u32 reserved1 : 16; 208 u32 reserved2; 209 u32 pkt_hash; 210 }; /* HW DATA */ 211 212 #define MANA_RXCOMP_OOB_NUM_PPI 4 213 214 /* Receive completion OOB */ 215 struct mana_rxcomp_oob { 216 struct mana_cqe_header cqe_hdr; 217 218 u32 rx_vlan_id : 12; 219 u32 rx_vlantag_present : 1; 220 u32 rx_outer_iphdr_csum_succeed : 1; 221 u32 rx_outer_iphdr_csum_fail : 1; 222 u32 reserved1 : 1; 223 u32 rx_hashtype : 9; 224 u32 rx_iphdr_csum_succeed : 1; 225 u32 rx_iphdr_csum_fail : 1; 226 u32 rx_tcp_csum_succeed : 1; 227 u32 rx_tcp_csum_fail : 1; 228 u32 rx_udp_csum_succeed : 1; 229 u32 rx_udp_csum_fail : 1; 230 u32 reserved2 : 1; 231 232 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; 233 234 u32 rx_wqe_offset; 235 }; /* HW DATA */ 236 237 struct mana_tx_comp_oob { 238 struct mana_cqe_header cqe_hdr; 239 240 u32 tx_data_offset; 241 242 u32 tx_sgl_offset : 5; 243 u32 tx_wqe_offset : 27; 244 245 u32 reserved[12]; 246 }; /* HW DATA */ 247 248 struct mana_rxq; 249 250 #define CQE_POLLING_BUFFER 512 251 252 struct mana_cq { 253 struct gdma_queue *gdma_cq; 254 255 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ 256 u32 gdma_id; 257 258 /* Type of the CQ: TX or RX */ 259 enum mana_cq_type type; 260 261 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. 262 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. 263 */ 264 struct mana_rxq *rxq; 265 266 /* Pointer to the mana_txq that is pushing TX CQEs to the queue. 267 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. 268 */ 269 struct mana_txq *txq; 270 271 /* Buffer which the CQ handler can copy the CQE's into. */ 272 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; 273 274 /* NAPI data */ 275 struct napi_struct napi; 276 int work_done; 277 int budget; 278 }; 279 280 struct mana_recv_buf_oob { 281 /* A valid GDMA work request representing the data buffer. */ 282 struct gdma_wqe_request wqe_req; 283 284 void *buf_va; 285 bool from_pool; /* allocated from a page pool */ 286 287 /* SGL of the buffer going to be sent has part of the work request. */ 288 u32 num_sge; 289 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES]; 290 291 /* Required to store the result of mana_gd_post_work_request. 292 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the 293 * work queue when the WQE is consumed. 294 */ 295 struct gdma_posted_wqe_info wqe_inf; 296 }; 297 298 #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \ 299 + ETH_HLEN) 300 301 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM) 302 303 struct mana_rxq { 304 struct gdma_queue *gdma_rq; 305 /* Cache the gdma receive queue id */ 306 u32 gdma_id; 307 308 /* Index of RQ in the vPort, not gdma receive queue id */ 309 u32 rxq_idx; 310 311 u32 datasize; 312 u32 alloc_size; 313 u32 headroom; 314 315 mana_handle_t rxobj; 316 317 struct mana_cq rx_cq; 318 319 struct completion fence_event; 320 321 struct net_device *ndev; 322 323 /* Total number of receive buffers to be allocated */ 324 u32 num_rx_buf; 325 326 u32 buf_index; 327 328 struct mana_stats_rx stats; 329 330 struct bpf_prog __rcu *bpf_prog; 331 struct xdp_rxq_info xdp_rxq; 332 void *xdp_save_va; /* for reusing */ 333 bool xdp_flush; 334 int xdp_rc; /* XDP redirect return code */ 335 336 struct page_pool *page_pool; 337 338 /* MUST BE THE LAST MEMBER: 339 * Each receive buffer has an associated mana_recv_buf_oob. 340 */ 341 struct mana_recv_buf_oob rx_oobs[]; 342 }; 343 344 struct mana_tx_qp { 345 struct mana_txq txq; 346 347 struct mana_cq tx_cq; 348 349 mana_handle_t tx_object; 350 }; 351 352 struct mana_ethtool_stats { 353 u64 stop_queue; 354 u64 wake_queue; 355 u64 tx_cqe_err; 356 u64 tx_cqe_unknown_type; 357 u64 rx_coalesced_err; 358 u64 rx_cqe_unknown_type; 359 }; 360 361 struct mana_context { 362 struct gdma_dev *gdma_dev; 363 364 u16 num_ports; 365 366 struct mana_eq *eqs; 367 368 struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; 369 }; 370 371 struct mana_port_context { 372 struct mana_context *ac; 373 struct net_device *ndev; 374 375 u8 mac_addr[ETH_ALEN]; 376 377 enum TRI_STATE rss_state; 378 379 mana_handle_t default_rxobj; 380 bool tx_shortform_allowed; 381 u16 tx_vp_offset; 382 383 struct mana_tx_qp *tx_qp; 384 385 /* Indirection Table for RX & TX. The values are queue indexes */ 386 u32 indir_table[MANA_INDIRECT_TABLE_SIZE]; 387 388 /* Indirection table containing RxObject Handles */ 389 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE]; 390 391 /* Hash key used by the NIC */ 392 u8 hashkey[MANA_HASH_KEY_SIZE]; 393 394 /* This points to an array of num_queues of RQ pointers. */ 395 struct mana_rxq **rxqs; 396 397 /* pre-allocated rx buffer array */ 398 void **rxbufs_pre; 399 dma_addr_t *das_pre; 400 int rxbpre_total; 401 u32 rxbpre_datasize; 402 u32 rxbpre_alloc_size; 403 u32 rxbpre_headroom; 404 405 struct bpf_prog *bpf_prog; 406 407 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ 408 unsigned int max_queues; 409 unsigned int num_queues; 410 411 mana_handle_t port_handle; 412 mana_handle_t pf_filter_handle; 413 414 /* Mutex for sharing access to vport_use_count */ 415 struct mutex vport_mutex; 416 int vport_use_count; 417 418 u16 port_idx; 419 420 bool port_is_up; 421 bool port_st_save; /* Saved port state */ 422 423 struct mana_ethtool_stats eth_stats; 424 }; 425 426 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); 427 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, 428 bool update_hash, bool update_tab); 429 430 int mana_alloc_queues(struct net_device *ndev); 431 int mana_attach(struct net_device *ndev); 432 int mana_detach(struct net_device *ndev, bool from_close); 433 434 int mana_probe(struct gdma_dev *gd, bool resuming); 435 void mana_remove(struct gdma_dev *gd, bool suspending); 436 437 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); 438 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, 439 u32 flags); 440 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, 441 struct xdp_buff *xdp, void *buf_va, uint pkt_len); 442 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); 443 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); 444 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); 445 446 extern const struct ethtool_ops mana_ethtool_ops; 447 448 /* A CQ can be created not associated with any EQ */ 449 #define GDMA_CQ_NO_EQ 0xffff 450 451 struct mana_obj_spec { 452 u32 queue_index; 453 u64 gdma_region; 454 u32 queue_size; 455 u32 attached_eq; 456 u32 modr_ctx_id; 457 }; 458 459 enum mana_command_code { 460 MANA_QUERY_DEV_CONFIG = 0x20001, 461 MANA_QUERY_GF_STAT = 0x20002, 462 MANA_CONFIG_VPORT_TX = 0x20003, 463 MANA_CREATE_WQ_OBJ = 0x20004, 464 MANA_DESTROY_WQ_OBJ = 0x20005, 465 MANA_FENCE_RQ = 0x20006, 466 MANA_CONFIG_VPORT_RX = 0x20007, 467 MANA_QUERY_VPORT_CONFIG = 0x20008, 468 469 /* Privileged commands for the PF mode */ 470 MANA_REGISTER_FILTER = 0x28000, 471 MANA_DEREGISTER_FILTER = 0x28001, 472 MANA_REGISTER_HW_PORT = 0x28003, 473 MANA_DEREGISTER_HW_PORT = 0x28004, 474 }; 475 476 /* Query Device Configuration */ 477 struct mana_query_device_cfg_req { 478 struct gdma_req_hdr hdr; 479 480 /* MANA Nic Driver Capability flags */ 481 u64 mn_drv_cap_flags1; 482 u64 mn_drv_cap_flags2; 483 u64 mn_drv_cap_flags3; 484 u64 mn_drv_cap_flags4; 485 486 u32 proto_major_ver; 487 u32 proto_minor_ver; 488 u32 proto_micro_ver; 489 490 u32 reserved; 491 }; /* HW DATA */ 492 493 struct mana_query_device_cfg_resp { 494 struct gdma_resp_hdr hdr; 495 496 u64 pf_cap_flags1; 497 u64 pf_cap_flags2; 498 u64 pf_cap_flags3; 499 u64 pf_cap_flags4; 500 501 u16 max_num_vports; 502 u16 reserved; 503 u32 max_num_eqs; 504 505 /* response v2: */ 506 u16 adapter_mtu; 507 u16 reserved2; 508 u32 reserved3; 509 }; /* HW DATA */ 510 511 /* Query vPort Configuration */ 512 struct mana_query_vport_cfg_req { 513 struct gdma_req_hdr hdr; 514 u32 vport_index; 515 }; /* HW DATA */ 516 517 struct mana_query_vport_cfg_resp { 518 struct gdma_resp_hdr hdr; 519 u32 max_num_sq; 520 u32 max_num_rq; 521 u32 num_indirection_ent; 522 u32 reserved1; 523 u8 mac_addr[6]; 524 u8 reserved2[2]; 525 mana_handle_t vport; 526 }; /* HW DATA */ 527 528 /* Configure vPort */ 529 struct mana_config_vport_req { 530 struct gdma_req_hdr hdr; 531 mana_handle_t vport; 532 u32 pdid; 533 u32 doorbell_pageid; 534 }; /* HW DATA */ 535 536 struct mana_config_vport_resp { 537 struct gdma_resp_hdr hdr; 538 u16 tx_vport_offset; 539 u8 short_form_allowed; 540 u8 reserved; 541 }; /* HW DATA */ 542 543 /* Create WQ Object */ 544 struct mana_create_wqobj_req { 545 struct gdma_req_hdr hdr; 546 mana_handle_t vport; 547 u32 wq_type; 548 u32 reserved; 549 u64 wq_gdma_region; 550 u64 cq_gdma_region; 551 u32 wq_size; 552 u32 cq_size; 553 u32 cq_moderation_ctx_id; 554 u32 cq_parent_qid; 555 }; /* HW DATA */ 556 557 struct mana_create_wqobj_resp { 558 struct gdma_resp_hdr hdr; 559 u32 wq_id; 560 u32 cq_id; 561 mana_handle_t wq_obj; 562 }; /* HW DATA */ 563 564 /* Destroy WQ Object */ 565 struct mana_destroy_wqobj_req { 566 struct gdma_req_hdr hdr; 567 u32 wq_type; 568 u32 reserved; 569 mana_handle_t wq_obj_handle; 570 }; /* HW DATA */ 571 572 struct mana_destroy_wqobj_resp { 573 struct gdma_resp_hdr hdr; 574 }; /* HW DATA */ 575 576 /* Fence RQ */ 577 struct mana_fence_rq_req { 578 struct gdma_req_hdr hdr; 579 mana_handle_t wq_obj_handle; 580 }; /* HW DATA */ 581 582 struct mana_fence_rq_resp { 583 struct gdma_resp_hdr hdr; 584 }; /* HW DATA */ 585 586 /* Configure vPort Rx Steering */ 587 struct mana_cfg_rx_steer_req_v2 { 588 struct gdma_req_hdr hdr; 589 mana_handle_t vport; 590 u16 num_indir_entries; 591 u16 indir_tab_offset; 592 u32 rx_enable; 593 u32 rss_enable; 594 u8 update_default_rxobj; 595 u8 update_hashkey; 596 u8 update_indir_tab; 597 u8 reserved; 598 mana_handle_t default_rxobj; 599 u8 hashkey[MANA_HASH_KEY_SIZE]; 600 u8 cqe_coalescing_enable; 601 u8 reserved2[7]; 602 }; /* HW DATA */ 603 604 struct mana_cfg_rx_steer_resp { 605 struct gdma_resp_hdr hdr; 606 }; /* HW DATA */ 607 608 /* Register HW vPort */ 609 struct mana_register_hw_vport_req { 610 struct gdma_req_hdr hdr; 611 u16 attached_gfid; 612 u8 is_pf_default_vport; 613 u8 reserved1; 614 u8 allow_all_ether_types; 615 u8 reserved2; 616 u8 reserved3; 617 u8 reserved4; 618 }; /* HW DATA */ 619 620 struct mana_register_hw_vport_resp { 621 struct gdma_resp_hdr hdr; 622 mana_handle_t hw_vport_handle; 623 }; /* HW DATA */ 624 625 /* Deregister HW vPort */ 626 struct mana_deregister_hw_vport_req { 627 struct gdma_req_hdr hdr; 628 mana_handle_t hw_vport_handle; 629 }; /* HW DATA */ 630 631 struct mana_deregister_hw_vport_resp { 632 struct gdma_resp_hdr hdr; 633 }; /* HW DATA */ 634 635 /* Register filter */ 636 struct mana_register_filter_req { 637 struct gdma_req_hdr hdr; 638 mana_handle_t vport; 639 u8 mac_addr[6]; 640 u8 reserved1; 641 u8 reserved2; 642 u8 reserved3; 643 u8 reserved4; 644 u16 reserved5; 645 u32 reserved6; 646 u32 reserved7; 647 u32 reserved8; 648 }; /* HW DATA */ 649 650 struct mana_register_filter_resp { 651 struct gdma_resp_hdr hdr; 652 mana_handle_t filter_handle; 653 }; /* HW DATA */ 654 655 /* Deregister filter */ 656 struct mana_deregister_filter_req { 657 struct gdma_req_hdr hdr; 658 mana_handle_t filter_handle; 659 }; /* HW DATA */ 660 661 struct mana_deregister_filter_resp { 662 struct gdma_resp_hdr hdr; 663 }; /* HW DATA */ 664 665 #define MANA_MAX_NUM_QUEUES 64 666 667 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) 668 669 struct mana_tx_package { 670 struct gdma_wqe_request wqe_req; 671 struct gdma_sge sgl_array[5]; 672 struct gdma_sge *sgl_ptr; 673 674 struct mana_tx_oob tx_oob; 675 676 struct gdma_posted_wqe_info wqe_info; 677 }; 678 679 int mana_create_wq_obj(struct mana_port_context *apc, 680 mana_handle_t vport, 681 u32 wq_type, struct mana_obj_spec *wq_spec, 682 struct mana_obj_spec *cq_spec, 683 mana_handle_t *wq_obj); 684 685 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 686 mana_handle_t wq_obj); 687 688 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 689 u32 doorbell_pg_id); 690 void mana_uncfg_vport(struct mana_port_context *apc); 691 #endif /* _MANA_H */ 692