ib_verbs.h (2257e268b1154966c5b0141b23695db1043ff39d) | ib_verbs.h (62ede7779904bc75bdd84f1ff0016113956ce3b4) |
---|---|
1/* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. --- 50 unchanged lines hidden (view full) --- 59 60#include <linux/if_link.h> 61#include <linux/atomic.h> 62#include <linux/mmu_notifier.h> 63#include <linux/uaccess.h> 64#include <linux/cgroup_rdma.h> 65#include <uapi/rdma/ib_user_verbs.h> 66 | 1/* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. --- 50 unchanged lines hidden (view full) --- 59 60#include <linux/if_link.h> 61#include <linux/atomic.h> 62#include <linux/mmu_notifier.h> 63#include <linux/uaccess.h> 64#include <linux/cgroup_rdma.h> 65#include <uapi/rdma/ib_user_verbs.h> 66 |
67#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN 68 |
|
67extern struct workqueue_struct *ib_wq; 68extern struct workqueue_struct *ib_comp_wq; 69 70union ib_gid { 71 u8 raw[16]; 72 struct { 73 __be64 subnet_prefix; 74 __be64 interface_id; --- 469 unchanged lines hidden (view full) --- 544 enum ib_mtu max_mtu; 545 enum ib_mtu active_mtu; 546 int gid_tbl_len; 547 u32 port_cap_flags; 548 u32 max_msg_sz; 549 u32 bad_pkey_cntr; 550 u32 qkey_viol_cntr; 551 u16 pkey_tbl_len; | 69extern struct workqueue_struct *ib_wq; 70extern struct workqueue_struct *ib_comp_wq; 71 72union ib_gid { 73 u8 raw[16]; 74 struct { 75 __be64 subnet_prefix; 76 __be64 interface_id; --- 469 unchanged lines hidden (view full) --- 546 enum ib_mtu max_mtu; 547 enum ib_mtu active_mtu; 548 int gid_tbl_len; 549 u32 port_cap_flags; 550 u32 max_msg_sz; 551 u32 bad_pkey_cntr; 552 u32 qkey_viol_cntr; 553 u16 pkey_tbl_len; |
552 u16 lid; 553 u16 sm_lid; | 554 u32 sm_lid; 555 u32 lid; |
554 u8 lmc; 555 u8 max_vl_num; 556 u8 sm_sl; 557 u8 subnet_timeout; 558 u8 init_type_reply; 559 u8 active_width; 560 u8 active_speed; 561 u8 phys_state; --- 10 unchanged lines hidden (view full) --- 572struct ib_device_modify { 573 u64 sys_image_guid; 574 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 575}; 576 577enum ib_port_modify_flags { 578 IB_PORT_SHUTDOWN = 1, 579 IB_PORT_INIT_TYPE = (1<<2), | 556 u8 lmc; 557 u8 max_vl_num; 558 u8 sm_sl; 559 u8 subnet_timeout; 560 u8 init_type_reply; 561 u8 active_width; 562 u8 active_speed; 563 u8 phys_state; --- 10 unchanged lines hidden (view full) --- 574struct ib_device_modify { 575 u64 sys_image_guid; 576 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 577}; 578 579enum ib_port_modify_flags { 580 IB_PORT_SHUTDOWN = 1, 581 IB_PORT_INIT_TYPE = (1<<2), |
580 IB_PORT_RESET_QKEY_CNTR = (1<<3) | 582 IB_PORT_RESET_QKEY_CNTR = (1<<3), 583 IB_PORT_OPA_MASK_CHG = (1<<4) |
581}; 582 583struct ib_port_modify { 584 u32 set_port_cap_mask; 585 u32 clr_port_cap_mask; 586 u8 init_type; 587}; 588 --- 70 unchanged lines hidden (view full) --- 659 /* The IB spec states that if it's IPv4, the header 660 * is located in the last 20 bytes of the header. 661 */ 662 u8 reserved[20]; 663 struct iphdr roce4grh; 664 }; 665}; 666 | 584}; 585 586struct ib_port_modify { 587 u32 set_port_cap_mask; 588 u32 clr_port_cap_mask; 589 u8 init_type; 590}; 591 --- 70 unchanged lines hidden (view full) --- 662 /* The IB spec states that if it's IPv4, the header 663 * is located in the last 20 bytes of the header. 664 */ 665 u8 reserved[20]; 666 struct iphdr roce4grh; 667 }; 668}; 669 |
670#define IB_QPN_MASK 0xFFFFFF 671 |
|
667enum { 668 IB_MULTICAST_QPN = 0xffffff 669}; 670 671#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 672#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 673 674enum ib_ah_flags { --- 268 unchanged lines hidden (view full) --- 943 struct ib_qp *qp; 944 union { 945 __be32 imm_data; 946 u32 invalidate_rkey; 947 } ex; 948 u32 src_qp; 949 int wc_flags; 950 u16 pkey_index; | 672enum { 673 IB_MULTICAST_QPN = 0xffffff 674}; 675 676#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 677#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 678 679enum ib_ah_flags { --- 268 unchanged lines hidden (view full) --- 948 struct ib_qp *qp; 949 union { 950 __be32 imm_data; 951 u32 invalidate_rkey; 952 } ex; 953 u32 src_qp; 954 int wc_flags; 955 u16 pkey_index; |
951 u16 slid; | 956 u32 slid; |
952 u8 sl; 953 u8 dlid_path_bits; 954 u8 port_num; /* valid only for DR SMPs on switches */ 955 u8 smac[ETH_ALEN]; 956 u16 vlan_id; 957 u8 network_hdr_type; 958}; 959 --- 94 unchanged lines hidden (view full) --- 1054 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1055 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1056 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1057 IB_QP_CREATE_NETIF_QP = 1 << 5, 1058 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 1059 /* FREE = 1 << 7, */ 1060 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1061 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, | 957 u8 sl; 958 u8 dlid_path_bits; 959 u8 port_num; /* valid only for DR SMPs on switches */ 960 u8 smac[ETH_ALEN]; 961 u16 vlan_id; 962 u8 network_hdr_type; 963}; 964 --- 94 unchanged lines hidden (view full) --- 1059 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1060 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1061 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1062 IB_QP_CREATE_NETIF_QP = 1 << 5, 1063 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 1064 /* FREE = 1 << 7, */ 1065 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1066 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, |
1067 IB_QP_CREATE_SOURCE_QPN = 1 << 10, |
|
1062 /* reserve bits 26-31 for low level drivers' internal use */ 1063 IB_QP_CREATE_RESERVED_START = 1 << 26, 1064 IB_QP_CREATE_RESERVED_END = 1 << 31, 1065}; 1066 1067/* 1068 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1069 * callback to destroy the passed in QP. --- 11 unchanged lines hidden (view full) --- 1081 enum ib_qp_type qp_type; 1082 enum ib_qp_create_flags create_flags; 1083 1084 /* 1085 * Only needed for special QP types, or when using the RW API. 1086 */ 1087 u8 port_num; 1088 struct ib_rwq_ind_table *rwq_ind_tbl; | 1068 /* reserve bits 26-31 for low level drivers' internal use */ 1069 IB_QP_CREATE_RESERVED_START = 1 << 26, 1070 IB_QP_CREATE_RESERVED_END = 1 << 31, 1071}; 1072 1073/* 1074 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1075 * callback to destroy the passed in QP. --- 11 unchanged lines hidden (view full) --- 1087 enum ib_qp_type qp_type; 1088 enum ib_qp_create_flags create_flags; 1089 1090 /* 1091 * Only needed for special QP types, or when using the RW API. 1092 */ 1093 u8 port_num; 1094 struct ib_rwq_ind_table *rwq_ind_tbl; |
1095 u32 source_qpn; |
|
1089}; 1090 1091struct ib_qp_open_attr { 1092 void (*event_handler)(struct ib_event *, void *); 1093 void *qp_context; 1094 u32 qp_num; 1095 enum ib_qp_type qp_type; 1096}; --- 444 unchanged lines hidden (view full) --- 1541 * completion is supported. 1542 */ 1543 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0), 1544 /* Scatter FCS field of an incoming packet to host memory is supported. 1545 */ 1546 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), 1547 /* Checksum offloads are supported (for both send and receive). */ 1548 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), | 1096}; 1097 1098struct ib_qp_open_attr { 1099 void (*event_handler)(struct ib_event *, void *); 1100 void *qp_context; 1101 u32 qp_num; 1102 enum ib_qp_type qp_type; 1103}; --- 444 unchanged lines hidden (view full) --- 1548 * completion is supported. 1549 */ 1550 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0), 1551 /* Scatter FCS field of an incoming packet to host memory is supported. 1552 */ 1553 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), 1554 /* Checksum offloads are supported (for both send and receive). */ 1555 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), |
1556 /* When a packet is received for an RQ with no receive WQEs, the 1557 * packet processing is delayed. 1558 */ 1559 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3), |
|
1549}; 1550 1551enum ib_wq_type { 1552 IB_WQT_RQ 1553}; 1554 1555enum ib_wq_state { 1556 IB_WQS_RESET, --- 12 unchanged lines hidden (view full) --- 1569 enum ib_wq_state state; 1570 enum ib_wq_type wq_type; 1571 atomic_t usecnt; 1572}; 1573 1574enum ib_wq_flags { 1575 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, 1576 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, | 1560}; 1561 1562enum ib_wq_type { 1563 IB_WQT_RQ 1564}; 1565 1566enum ib_wq_state { 1567 IB_WQS_RESET, --- 12 unchanged lines hidden (view full) --- 1580 enum ib_wq_state state; 1581 enum ib_wq_type wq_type; 1582 atomic_t usecnt; 1583}; 1584 1585enum ib_wq_flags { 1586 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, 1587 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, |
1588 IB_WQ_FLAGS_DELAY_DROP = 1 << 2, |
|
1577}; 1578 1579struct ib_wq_init_attr { 1580 void *wq_context; 1581 enum ib_wq_type wq_type; 1582 u32 max_wr; 1583 u32 max_sge; 1584 struct ib_cq *cq; --- 698 unchanged lines hidden (view full) --- 2283 struct ib_device_attr attrs; 2284 struct attribute_group *hw_stats_ag; 2285 struct rdma_hw_stats *hw_stats; 2286 2287#ifdef CONFIG_CGROUP_RDMA 2288 struct rdmacg_device cg_device; 2289#endif 2290 | 1589}; 1590 1591struct ib_wq_init_attr { 1592 void *wq_context; 1593 enum ib_wq_type wq_type; 1594 u32 max_wr; 1595 u32 max_sge; 1596 struct ib_cq *cq; --- 698 unchanged lines hidden (view full) --- 2295 struct ib_device_attr attrs; 2296 struct attribute_group *hw_stats_ag; 2297 struct rdma_hw_stats *hw_stats; 2298 2299#ifdef CONFIG_CGROUP_RDMA 2300 struct rdmacg_device cg_device; 2301#endif 2302 |
2303 u32 index; 2304 |
|
2291 /** 2292 * The following mandatory functions are used only at device 2293 * registration. Keep functions such as these at the end of this 2294 * structure to avoid cache line misses when accessing struct ib_device 2295 * in fast paths. 2296 */ 2297 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); | 2305 /** 2306 * The following mandatory functions are used only at device 2307 * registration. Keep functions such as these at the end of this 2308 * structure to avoid cache line misses when accessing struct ib_device 2309 * in fast paths. 2310 */ 2311 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); |
2298 void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len); | 2312 void (*get_dev_fw_str)(struct ib_device *, char *str); 2313 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, 2314 int comp_vector); |
2299}; 2300 2301struct ib_client { 2302 char *name; 2303 void (*add) (struct ib_device *); 2304 void (*remove)(struct ib_device *, void *client_data); 2305 2306 /* Returns the net_dev belonging to this ib_client and matching the --- 19 unchanged lines hidden (view full) --- 2326 const struct sockaddr *addr, 2327 void *client_data); 2328 struct list_head list; 2329}; 2330 2331struct ib_device *ib_alloc_device(size_t size); 2332void ib_dealloc_device(struct ib_device *device); 2333 | 2315}; 2316 2317struct ib_client { 2318 char *name; 2319 void (*add) (struct ib_device *); 2320 void (*remove)(struct ib_device *, void *client_data); 2321 2322 /* Returns the net_dev belonging to this ib_client and matching the --- 19 unchanged lines hidden (view full) --- 2342 const struct sockaddr *addr, 2343 void *client_data); 2344 struct list_head list; 2345}; 2346 2347struct ib_device *ib_alloc_device(size_t size); 2348void ib_dealloc_device(struct ib_device *device); 2349 |
2334void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len); | 2350void ib_get_device_fw_str(struct ib_device *device, char *str); |
2335 2336int ib_register_device(struct ib_device *device, 2337 int (*port_callback)(struct ib_device *, 2338 u8, struct kobject *)); 2339void ib_unregister_device(struct ib_device *device); 2340 2341int ib_register_client (struct ib_client *client); 2342void ib_unregister_client(struct ib_client *client); --- 1207 unchanged lines hidden (view full) --- 3550 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 3551 3552void ib_drain_rq(struct ib_qp *qp); 3553void ib_drain_sq(struct ib_qp *qp); 3554void ib_drain_qp(struct ib_qp *qp); 3555 3556int ib_resolve_eth_dmac(struct ib_device *device, 3557 struct rdma_ah_attr *ah_attr); | 2351 2352int ib_register_device(struct ib_device *device, 2353 int (*port_callback)(struct ib_device *, 2354 u8, struct kobject *)); 2355void ib_unregister_device(struct ib_device *device); 2356 2357int ib_register_client (struct ib_client *client); 2358void ib_unregister_client(struct ib_client *client); --- 1207 unchanged lines hidden (view full) --- 3566 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 3567 3568void ib_drain_rq(struct ib_qp *qp); 3569void ib_drain_sq(struct ib_qp *qp); 3570void ib_drain_qp(struct ib_qp *qp); 3571 3572int ib_resolve_eth_dmac(struct ib_device *device, 3573 struct rdma_ah_attr *ah_attr); |
3574int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width); |
|
3558 3559static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) 3560{ 3561 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE) 3562 return attr->roce.dmac; 3563 return NULL; 3564} 3565 --- 135 unchanged lines hidden (view full) --- 3701 (rdma_protocol_iwarp(dev, port_num))) 3702 return RDMA_AH_ATTR_TYPE_ROCE; 3703 else if ((rdma_protocol_ib(dev, port_num)) && 3704 (rdma_cap_opa_ah(dev, port_num))) 3705 return RDMA_AH_ATTR_TYPE_OPA; 3706 else 3707 return RDMA_AH_ATTR_TYPE_IB; 3708} | 3575 3576static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) 3577{ 3578 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE) 3579 return attr->roce.dmac; 3580 return NULL; 3581} 3582 --- 135 unchanged lines hidden (view full) --- 3718 (rdma_protocol_iwarp(dev, port_num))) 3719 return RDMA_AH_ATTR_TYPE_ROCE; 3720 else if ((rdma_protocol_ib(dev, port_num)) && 3721 (rdma_cap_opa_ah(dev, port_num))) 3722 return RDMA_AH_ATTR_TYPE_OPA; 3723 else 3724 return RDMA_AH_ATTR_TYPE_IB; 3725} |
3726 3727/** 3728 * ib_lid_cpu16 - Return lid in 16bit CPU encoding. 3729 * In the current implementation the only way to get 3730 * get the 32bit lid is from other sources for OPA. 3731 * For IB, lids will always be 16bits so cast the 3732 * value accordingly. 3733 * 3734 * @lid: A 32bit LID 3735 */ 3736static inline u16 ib_lid_cpu16(u32 lid) 3737{ 3738 WARN_ON_ONCE(lid & 0xFFFF0000); 3739 return (u16)lid; 3740} 3741 3742/** 3743 * ib_lid_be16 - Return lid in 16bit BE encoding. 3744 * 3745 * @lid: A 32bit LID 3746 */ 3747static inline __be16 ib_lid_be16(u32 lid) 3748{ 3749 WARN_ON_ONCE(lid & 0xFFFF0000); 3750 return cpu_to_be16((u16)lid); 3751} 3752 3753/** 3754 * ib_get_vector_affinity - Get the affinity mappings of a given completion 3755 * vector 3756 * @device: the rdma device 3757 * @comp_vector: index of completion vector 3758 * 3759 * Returns NULL on failure, otherwise a corresponding cpu map of the 3760 * completion vector (returns all-cpus map if the device driver doesn't 3761 * implement get_vector_affinity). 3762 */ 3763static inline const struct cpumask * 3764ib_get_vector_affinity(struct ib_device *device, int comp_vector) 3765{ 3766 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || 3767 !device->get_vector_affinity) 3768 return NULL; 3769 3770 return device->get_vector_affinity(device, comp_vector); 3771 3772} 3773 |
|
3709#endif /* IB_VERBS_H */ | 3774#endif /* IB_VERBS_H */ |