1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2ec16227eSAndy Grover #ifndef _RDS_IB_H
3ec16227eSAndy Grover #define _RDS_IB_H
4ec16227eSAndy Grover
5ec16227eSAndy Grover #include <rdma/ib_verbs.h>
6ec16227eSAndy Grover #include <rdma/rdma_cm.h>
7a6b7a407SAlexey Dobriyan #include <linux/interrupt.h>
8e4c52c98SAndy Grover #include <linux/pci.h>
9e4c52c98SAndy Grover #include <linux/slab.h>
10ec16227eSAndy Grover #include "rds.h"
11ec16227eSAndy Grover #include "rdma_transport.h"
12ec16227eSAndy Grover
13ec16227eSAndy Grover #define RDS_IB_MAX_SGE 8
14ec16227eSAndy Grover #define RDS_IB_RECV_SGE 2
15ec16227eSAndy Grover
16ec16227eSAndy Grover #define RDS_IB_DEFAULT_RECV_WR 1024
17ec16227eSAndy Grover #define RDS_IB_DEFAULT_SEND_WR 256
18a5520788SGerd Rausch #define RDS_IB_DEFAULT_FR_WR 512
19ec16227eSAndy Grover
20fab8688dSSantosh Shilimkar #define RDS_IB_DEFAULT_RETRY_COUNT 1
213ba23adeSAndy Grover
22ec16227eSAndy Grover #define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
23ec16227eSAndy Grover
2433244125SChris Mason #define RDS_IB_RECYCLE_BATCH_COUNT 32
2533244125SChris Mason
26f4f943c9SSantosh Shilimkar #define RDS_IB_WC_MAX 32
27f4f943c9SSantosh Shilimkar
28ea819867SZach Brown extern struct rw_semaphore rds_ib_devices_lock;
29ec16227eSAndy Grover extern struct list_head rds_ib_devices;
30ec16227eSAndy Grover
31ec16227eSAndy Grover /*
32ec16227eSAndy Grover * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
33ec16227eSAndy Grover * try and minimize the amount of memory tied up both the device and
34ec16227eSAndy Grover * socket receive queues.
35ec16227eSAndy Grover */
36ec16227eSAndy Grover struct rds_page_frag {
37ec16227eSAndy Grover struct list_head f_item;
3833244125SChris Mason struct list_head f_cache_entry;
390b088e00SAndy Grover struct scatterlist f_sg;
40ec16227eSAndy Grover };
41ec16227eSAndy Grover
42ec16227eSAndy Grover struct rds_ib_incoming {
43ec16227eSAndy Grover struct list_head ii_frags;
4433244125SChris Mason struct list_head ii_cache_entry;
45ec16227eSAndy Grover struct rds_incoming ii_inc;
46ec16227eSAndy Grover };
47ec16227eSAndy Grover
4833244125SChris Mason struct rds_ib_cache_head {
4933244125SChris Mason struct list_head *first;
5033244125SChris Mason unsigned long count;
5133244125SChris Mason };
5233244125SChris Mason
5333244125SChris Mason struct rds_ib_refill_cache {
54ae4b46e9SShan Wei struct rds_ib_cache_head __percpu *percpu;
5533244125SChris Mason struct list_head *xfer;
5633244125SChris Mason struct list_head *ready;
5733244125SChris Mason };
5833244125SChris Mason
59eee2fa6aSKa-Cheong Poon /* This is the common structure for the IB private data exchange in setting up
60eee2fa6aSKa-Cheong Poon * an RDS connection. The exchange is different for IPv4 and IPv6 connections.
61eee2fa6aSKa-Cheong Poon * The reason is that the address size is different and the addresses
62eee2fa6aSKa-Cheong Poon * exchanged are in the beginning of the structure. Hence it is not possible
63eee2fa6aSKa-Cheong Poon * for interoperability if same structure is used.
64eee2fa6aSKa-Cheong Poon */
65eee2fa6aSKa-Cheong Poon struct rds_ib_conn_priv_cmn {
66eee2fa6aSKa-Cheong Poon u8 ricpc_protocol_major;
67eee2fa6aSKa-Cheong Poon u8 ricpc_protocol_minor;
68eee2fa6aSKa-Cheong Poon __be16 ricpc_protocol_minor_mask; /* bitmask */
69fd261ce6SSantosh Shilimkar u8 ricpc_dp_toss;
70fd261ce6SSantosh Shilimkar u8 ripc_reserved1;
71fd261ce6SSantosh Shilimkar __be16 ripc_reserved2;
72eee2fa6aSKa-Cheong Poon __be64 ricpc_ack_seq;
73eee2fa6aSKa-Cheong Poon __be32 ricpc_credit; /* non-zero enables flow ctl */
74eee2fa6aSKa-Cheong Poon };
75eee2fa6aSKa-Cheong Poon
76ec16227eSAndy Grover struct rds_ib_connect_private {
77ec16227eSAndy Grover /* Add new fields at the end, and don't permute existing fields. */
78ec16227eSAndy Grover __be32 dp_saddr;
79ec16227eSAndy Grover __be32 dp_daddr;
80eee2fa6aSKa-Cheong Poon struct rds_ib_conn_priv_cmn dp_cmn;
81eee2fa6aSKa-Cheong Poon };
82eee2fa6aSKa-Cheong Poon
83eee2fa6aSKa-Cheong Poon struct rds6_ib_connect_private {
84eee2fa6aSKa-Cheong Poon /* Add new fields at the end, and don't permute existing fields. */
85eee2fa6aSKa-Cheong Poon struct in6_addr dp_saddr;
86eee2fa6aSKa-Cheong Poon struct in6_addr dp_daddr;
87eee2fa6aSKa-Cheong Poon struct rds_ib_conn_priv_cmn dp_cmn;
88eee2fa6aSKa-Cheong Poon };
89eee2fa6aSKa-Cheong Poon
90eee2fa6aSKa-Cheong Poon #define dp_protocol_major dp_cmn.ricpc_protocol_major
91eee2fa6aSKa-Cheong Poon #define dp_protocol_minor dp_cmn.ricpc_protocol_minor
92eee2fa6aSKa-Cheong Poon #define dp_protocol_minor_mask dp_cmn.ricpc_protocol_minor_mask
93eee2fa6aSKa-Cheong Poon #define dp_ack_seq dp_cmn.ricpc_ack_seq
94eee2fa6aSKa-Cheong Poon #define dp_credit dp_cmn.ricpc_credit
95eee2fa6aSKa-Cheong Poon
96eee2fa6aSKa-Cheong Poon union rds_ib_conn_priv {
97eee2fa6aSKa-Cheong Poon struct rds_ib_connect_private ricp_v4;
98eee2fa6aSKa-Cheong Poon struct rds6_ib_connect_private ricp_v6;
99ec16227eSAndy Grover };
100ec16227eSAndy Grover
101ec16227eSAndy Grover struct rds_ib_send_work {
102ff3d7d36SAndy Grover void *s_op;
103e622f2f4SChristoph Hellwig union {
104ec16227eSAndy Grover struct ib_send_wr s_wr;
105e622f2f4SChristoph Hellwig struct ib_rdma_wr s_rdma_wr;
106e622f2f4SChristoph Hellwig struct ib_atomic_wr s_atomic_wr;
107e622f2f4SChristoph Hellwig };
108ec16227eSAndy Grover struct ib_sge s_sge[RDS_IB_MAX_SGE];
109ec16227eSAndy Grover unsigned long s_queued;
110ec16227eSAndy Grover };
111ec16227eSAndy Grover
112ec16227eSAndy Grover struct rds_ib_recv_work {
113ec16227eSAndy Grover struct rds_ib_incoming *r_ibinc;
114ec16227eSAndy Grover struct rds_page_frag *r_frag;
115ec16227eSAndy Grover struct ib_recv_wr r_wr;
116ec16227eSAndy Grover struct ib_sge r_sge[2];
117ec16227eSAndy Grover };
118ec16227eSAndy Grover
119ec16227eSAndy Grover struct rds_ib_work_ring {
120ec16227eSAndy Grover u32 w_nr;
121ec16227eSAndy Grover u32 w_alloc_ptr;
122ec16227eSAndy Grover u32 w_alloc_ctr;
123ec16227eSAndy Grover u32 w_free_ptr;
124ec16227eSAndy Grover atomic_t w_free_ctr;
125ec16227eSAndy Grover };
126ec16227eSAndy Grover
127f4f943c9SSantosh Shilimkar /* Rings are posted with all the allocations they'll need to queue the
128f4f943c9SSantosh Shilimkar * incoming message to the receiving socket so this can't fail.
129f4f943c9SSantosh Shilimkar * All fragments start with a header, so we can make sure we're not receiving
130f4f943c9SSantosh Shilimkar * garbage, and we can tell a small 8 byte fragment from an ACK frame.
131f4f943c9SSantosh Shilimkar */
132f4f943c9SSantosh Shilimkar struct rds_ib_ack_state {
133f4f943c9SSantosh Shilimkar u64 ack_next;
134f4f943c9SSantosh Shilimkar u64 ack_recv;
135f4f943c9SSantosh Shilimkar unsigned int ack_required:1;
136f4f943c9SSantosh Shilimkar unsigned int ack_next_valid:1;
137f4f943c9SSantosh Shilimkar unsigned int ack_recv_valid:1;
138f4f943c9SSantosh Shilimkar };
139f4f943c9SSantosh Shilimkar
140f4f943c9SSantosh Shilimkar
141ec16227eSAndy Grover struct rds_ib_device;
142ec16227eSAndy Grover
143ec16227eSAndy Grover struct rds_ib_connection {
144ec16227eSAndy Grover
145ec16227eSAndy Grover struct list_head ib_node;
146ec16227eSAndy Grover struct rds_ib_device *rds_ibdev;
147ec16227eSAndy Grover struct rds_connection *conn;
148ec16227eSAndy Grover
149ec16227eSAndy Grover /* alphabet soup, IBTA style */
150ec16227eSAndy Grover struct rdma_cm_id *i_cm_id;
151ec16227eSAndy Grover struct ib_pd *i_pd;
152ec16227eSAndy Grover struct ib_cq *i_send_cq;
153ec16227eSAndy Grover struct ib_cq *i_recv_cq;
1540c28c045SSantosh Shilimkar struct ib_wc i_send_wc[RDS_IB_WC_MAX];
155f4f943c9SSantosh Shilimkar struct ib_wc i_recv_wc[RDS_IB_WC_MAX];
156f4f943c9SSantosh Shilimkar
157ad6832f9Ssantosh.shilimkar@oracle.com /* To control the number of wrs from fastreg */
158ad6832f9Ssantosh.shilimkar@oracle.com atomic_t i_fastreg_wrs;
1593a2886ccSGerd Rausch atomic_t i_fastreg_inuse_count;
160ad6832f9Ssantosh.shilimkar@oracle.com
161f4f943c9SSantosh Shilimkar /* interrupt handling */
1620c28c045SSantosh Shilimkar struct tasklet_struct i_send_tasklet;
163f4f943c9SSantosh Shilimkar struct tasklet_struct i_recv_tasklet;
164ec16227eSAndy Grover
165ec16227eSAndy Grover /* tx */
166ec16227eSAndy Grover struct rds_ib_work_ring i_send_ring;
167ff3d7d36SAndy Grover struct rm_data_op *i_data_op;
1689b17f588SKa-Cheong Poon struct rds_header **i_send_hdrs;
1699b17f588SKa-Cheong Poon dma_addr_t *i_send_hdrs_dma;
170ec16227eSAndy Grover struct rds_ib_send_work *i_sends;
171f046011cSZach Brown atomic_t i_signaled_sends;
172ec16227eSAndy Grover
173ec16227eSAndy Grover /* rx */
174ec16227eSAndy Grover struct mutex i_recv_mutex;
175ec16227eSAndy Grover struct rds_ib_work_ring i_recv_ring;
176ec16227eSAndy Grover struct rds_ib_incoming *i_ibinc;
177ec16227eSAndy Grover u32 i_recv_data_rem;
1789b17f588SKa-Cheong Poon struct rds_header **i_recv_hdrs;
1799b17f588SKa-Cheong Poon dma_addr_t *i_recv_hdrs_dma;
180ec16227eSAndy Grover struct rds_ib_recv_work *i_recvs;
181ec16227eSAndy Grover u64 i_ack_recv; /* last ACK received */
18233244125SChris Mason struct rds_ib_refill_cache i_cache_incs;
18333244125SChris Mason struct rds_ib_refill_cache i_cache_frags;
18409b2b8f5SSantosh Shilimkar atomic_t i_cache_allocs;
185ec16227eSAndy Grover
186ec16227eSAndy Grover /* sending acks */
187ec16227eSAndy Grover unsigned long i_ack_flags;
1888cbd9606SAndy Grover #ifdef KERNEL_HAS_ATOMIC64
1898cbd9606SAndy Grover atomic64_t i_ack_next; /* next ACK to send */
1908cbd9606SAndy Grover #else
1918cbd9606SAndy Grover spinlock_t i_ack_lock; /* protect i_ack_next */
192ec16227eSAndy Grover u64 i_ack_next; /* next ACK to send */
1938cbd9606SAndy Grover #endif
194ec16227eSAndy Grover struct rds_header *i_ack;
195ec16227eSAndy Grover struct ib_send_wr i_ack_wr;
196ec16227eSAndy Grover struct ib_sge i_ack_sge;
197d43dbacfSBart Van Assche dma_addr_t i_ack_dma;
198ec16227eSAndy Grover unsigned long i_ack_queued;
199ec16227eSAndy Grover
200ec16227eSAndy Grover /* Flow control related information
201ec16227eSAndy Grover *
202ec16227eSAndy Grover * Our algorithm uses a pair variables that we need to access
203ec16227eSAndy Grover * atomically - one for the send credits, and one posted
204ec16227eSAndy Grover * recv credits we need to transfer to remote.
205ec16227eSAndy Grover * Rather than protect them using a slow spinlock, we put both into
206ec16227eSAndy Grover * a single atomic_t and update it using cmpxchg
207ec16227eSAndy Grover */
208ec16227eSAndy Grover atomic_t i_credits;
209ec16227eSAndy Grover
210ec16227eSAndy Grover /* Protocol version specific information */
211ec16227eSAndy Grover unsigned int i_flowctl:1; /* enable/disable flow ctl */
212ec16227eSAndy Grover
213ec16227eSAndy Grover /* Batched completions */
214ec16227eSAndy Grover unsigned int i_unsignaled_wrs;
215581d53c9SSantosh Shilimkar
216581d53c9SSantosh Shilimkar /* Endpoint role in connection */
217581d53c9SSantosh Shilimkar bool i_active_side;
218cf657269SSantosh Shilimkar atomic_t i_cq_quiesce;
219be2f76eaSSantosh Shilimkar
220be2f76eaSSantosh Shilimkar /* Send/Recv vectors */
221be2f76eaSSantosh Shilimkar int i_scq_vector;
222be2f76eaSSantosh Shilimkar int i_rcq_vector;
223e0e6d062SZhu Yanjun u8 i_sl;
224ec16227eSAndy Grover };
225ec16227eSAndy Grover
226ec16227eSAndy Grover /* This assumes that atomic_t is at least 32 bits */
227ec16227eSAndy Grover #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
228ec16227eSAndy Grover #define IB_GET_POST_CREDITS(v) ((v) >> 16)
229ec16227eSAndy Grover #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
230ec16227eSAndy Grover #define IB_SET_POST_CREDITS(v) ((v) << 16)
231ec16227eSAndy Grover
232ec16227eSAndy Grover struct rds_ib_ipaddr {
233ec16227eSAndy Grover struct list_head list;
234ec16227eSAndy Grover __be32 ipaddr;
23559fe4606SSantosh Shilimkar struct rcu_head rcu;
236ec16227eSAndy Grover };
237ec16227eSAndy Grover
23806766513SSantosh Shilimkar enum {
23906766513SSantosh Shilimkar RDS_IB_MR_8K_POOL,
24006766513SSantosh Shilimkar RDS_IB_MR_1M_POOL,
24106766513SSantosh Shilimkar };
24206766513SSantosh Shilimkar
243ec16227eSAndy Grover struct rds_ib_device {
244ec16227eSAndy Grover struct list_head list;
245ec16227eSAndy Grover struct list_head ipaddr_list;
246ec16227eSAndy Grover struct list_head conn_list;
247ec16227eSAndy Grover struct ib_device *dev;
248ec16227eSAndy Grover struct ib_pd *pd;
249*2eafa174SHans Westgaard Ry u8 odp_capable:1;
2502cb2912dSsantosh.shilimkar@oracle.com
251f6df683fSsantosh.shilimkar@oracle.com unsigned int max_mrs;
25206766513SSantosh Shilimkar struct rds_ib_mr_pool *mr_1m_pool;
25306766513SSantosh Shilimkar struct rds_ib_mr_pool *mr_8k_pool;
254f6df683fSsantosh.shilimkar@oracle.com unsigned int max_8k_mrs;
255f6df683fSsantosh.shilimkar@oracle.com unsigned int max_1m_mrs;
256ec16227eSAndy Grover int max_sge;
257ec16227eSAndy Grover unsigned int max_wrs;
25840589e74SAndy Grover unsigned int max_initiator_depth;
25940589e74SAndy Grover unsigned int max_responder_resources;
260ec16227eSAndy Grover spinlock_t spinlock; /* protect the above */
26150d61ff7SReshetova, Elena refcount_t refcount;
2623e0249f9SZach Brown struct work_struct free_work;
263be2f76eaSSantosh Shilimkar int *vector_load;
264ec16227eSAndy Grover };
265ec16227eSAndy Grover
266e4c52c98SAndy Grover #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
267e4c52c98SAndy Grover
268ec16227eSAndy Grover /* bits for i_ack_flags */
269ec16227eSAndy Grover #define IB_ACK_IN_FLIGHT 0
270ec16227eSAndy Grover #define IB_ACK_REQUESTED 1
271ec16227eSAndy Grover
272ec16227eSAndy Grover /* Magic WR_ID for ACKs */
273ec16227eSAndy Grover #define RDS_IB_ACK_WR_ID (~(u64) 0)
274ec16227eSAndy Grover
275ec16227eSAndy Grover struct rds_ib_statistics {
276ec16227eSAndy Grover uint64_t s_ib_connect_raced;
277ec16227eSAndy Grover uint64_t s_ib_listen_closed_stale;
278f4f943c9SSantosh Shilimkar uint64_t s_ib_evt_handler_call;
279f4f943c9SSantosh Shilimkar uint64_t s_ib_tasklet_call;
280ec16227eSAndy Grover uint64_t s_ib_tx_cq_event;
281ec16227eSAndy Grover uint64_t s_ib_tx_ring_full;
282ec16227eSAndy Grover uint64_t s_ib_tx_throttle;
283ec16227eSAndy Grover uint64_t s_ib_tx_sg_mapping_failure;
284ec16227eSAndy Grover uint64_t s_ib_tx_stalled;
285ec16227eSAndy Grover uint64_t s_ib_tx_credit_updates;
286ec16227eSAndy Grover uint64_t s_ib_rx_cq_event;
287ec16227eSAndy Grover uint64_t s_ib_rx_ring_empty;
288ec16227eSAndy Grover uint64_t s_ib_rx_refill_from_cq;
289ec16227eSAndy Grover uint64_t s_ib_rx_refill_from_thread;
290ec16227eSAndy Grover uint64_t s_ib_rx_alloc_limit;
29109b2b8f5SSantosh Shilimkar uint64_t s_ib_rx_total_frags;
29209b2b8f5SSantosh Shilimkar uint64_t s_ib_rx_total_incs;
293ec16227eSAndy Grover uint64_t s_ib_rx_credit_updates;
294ec16227eSAndy Grover uint64_t s_ib_ack_sent;
295ec16227eSAndy Grover uint64_t s_ib_ack_send_failure;
296ec16227eSAndy Grover uint64_t s_ib_ack_send_delayed;
297ec16227eSAndy Grover uint64_t s_ib_ack_send_piggybacked;
298ec16227eSAndy Grover uint64_t s_ib_ack_received;
29906766513SSantosh Shilimkar uint64_t s_ib_rdma_mr_8k_alloc;
30006766513SSantosh Shilimkar uint64_t s_ib_rdma_mr_8k_free;
30106766513SSantosh Shilimkar uint64_t s_ib_rdma_mr_8k_used;
30206766513SSantosh Shilimkar uint64_t s_ib_rdma_mr_8k_pool_flush;
30306766513SSantosh Shilimkar uint64_t s_ib_rdma_mr_8k_pool_wait;
30406766513SSantosh Shilimkar uint64_t s_ib_rdma_mr_8k_pool_depleted;
30506766513SSantosh Shilimkar uint64_t s_ib_rdma_mr_1m_alloc;
30606766513SSantosh Shilimkar uint64_t s_ib_rdma_mr_1m_free;
30706766513SSantosh Shilimkar uint64_t s_ib_rdma_mr_1m_used;
30806766513SSantosh Shilimkar uint64_t s_ib_rdma_mr_1m_pool_flush;
30906766513SSantosh Shilimkar uint64_t s_ib_rdma_mr_1m_pool_wait;
31006766513SSantosh Shilimkar uint64_t s_ib_rdma_mr_1m_pool_depleted;
311db42753aSsantosh.shilimkar@oracle.com uint64_t s_ib_rdma_mr_8k_reused;
312db42753aSsantosh.shilimkar@oracle.com uint64_t s_ib_rdma_mr_1m_reused;
31351e2cba8SAndy Grover uint64_t s_ib_atomic_cswp;
31451e2cba8SAndy Grover uint64_t s_ib_atomic_fadd;
31509b2b8f5SSantosh Shilimkar uint64_t s_ib_recv_added_to_cache;
31609b2b8f5SSantosh Shilimkar uint64_t s_ib_recv_removed_from_cache;
317ec16227eSAndy Grover };
318ec16227eSAndy Grover
319ec16227eSAndy Grover extern struct workqueue_struct *rds_ib_wq;
320ec16227eSAndy Grover
321ec16227eSAndy Grover /*
322ec16227eSAndy Grover * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
323ec16227eSAndy Grover * doesn't define it.
324ec16227eSAndy Grover */
rds_ib_dma_sync_sg_for_cpu(struct ib_device * dev,struct scatterlist * sglist,unsigned int sg_dma_len,int direction)325ec16227eSAndy Grover static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
326d2a9ec64SFabian Frederick struct scatterlist *sglist,
327d2a9ec64SFabian Frederick unsigned int sg_dma_len,
328d2a9ec64SFabian Frederick int direction)
329ec16227eSAndy Grover {
330d2a9ec64SFabian Frederick struct scatterlist *sg;
331ec16227eSAndy Grover unsigned int i;
332ec16227eSAndy Grover
333d2a9ec64SFabian Frederick for_each_sg(sglist, sg, sg_dma_len, i) {
334a163afc8SBart Van Assche ib_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
335a163afc8SBart Van Assche sg_dma_len(sg), direction);
336ec16227eSAndy Grover }
337ec16227eSAndy Grover }
338ec16227eSAndy Grover #define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
339ec16227eSAndy Grover
rds_ib_dma_sync_sg_for_device(struct ib_device * dev,struct scatterlist * sglist,unsigned int sg_dma_len,int direction)340ec16227eSAndy Grover static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
341d2a9ec64SFabian Frederick struct scatterlist *sglist,
342d2a9ec64SFabian Frederick unsigned int sg_dma_len,
343d2a9ec64SFabian Frederick int direction)
344ec16227eSAndy Grover {
345d2a9ec64SFabian Frederick struct scatterlist *sg;
346ec16227eSAndy Grover unsigned int i;
347ec16227eSAndy Grover
348d2a9ec64SFabian Frederick for_each_sg(sglist, sg, sg_dma_len, i) {
349a163afc8SBart Van Assche ib_dma_sync_single_for_device(dev, sg_dma_address(sg),
350a163afc8SBart Van Assche sg_dma_len(sg), direction);
351ec16227eSAndy Grover }
352ec16227eSAndy Grover }
353ec16227eSAndy Grover #define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
354ec16227eSAndy Grover
355ec16227eSAndy Grover
356ec16227eSAndy Grover /* ib.c */
357ec16227eSAndy Grover extern struct rds_transport rds_ib_transport;
3583e0249f9SZach Brown struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
3593e0249f9SZach Brown void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
360ec16227eSAndy Grover extern struct ib_client rds_ib_client;
361ec16227eSAndy Grover
3623ba23adeSAndy Grover extern unsigned int rds_ib_retry_count;
363ec16227eSAndy Grover
364ec16227eSAndy Grover extern spinlock_t ib_nodev_conns_lock;
365ec16227eSAndy Grover extern struct list_head ib_nodev_conns;
366ec16227eSAndy Grover
367ec16227eSAndy Grover /* ib_cm.c */
368ec16227eSAndy Grover int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
369ec16227eSAndy Grover void rds_ib_conn_free(void *arg);
370b04e8554SSowmini Varadhan int rds_ib_conn_path_connect(struct rds_conn_path *cp);
371226f7a7dSSowmini Varadhan void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
372ec16227eSAndy Grover void rds_ib_state_change(struct sock *sk);
373ef87b7eaSZach Brown int rds_ib_listen_init(void);
374ec16227eSAndy Grover void rds_ib_listen_stop(void);
3756cdaf03fSNicolas Iooss __printf(2, 3)
376ec16227eSAndy Grover void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
377ec16227eSAndy Grover int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
378eee2fa6aSKa-Cheong Poon struct rdma_cm_event *event, bool isv6);
379eee2fa6aSKa-Cheong Poon int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6);
380ec16227eSAndy Grover void rds_ib_cm_connect_complete(struct rds_connection *conn,
381ec16227eSAndy Grover struct rdma_cm_event *event);
382ec16227eSAndy Grover
383ec16227eSAndy Grover #define rds_ib_conn_error(conn, fmt...) \
384ec16227eSAndy Grover __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
385ec16227eSAndy Grover
386ec16227eSAndy Grover /* ib_rdma.c */
387eee2fa6aSKa-Cheong Poon int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
388eee2fa6aSKa-Cheong Poon struct in6_addr *ipaddr);
389745cbccaSAndy Grover void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
390745cbccaSAndy Grover void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
3918aeb1ba6SZach Brown void rds_ib_destroy_nodev_conns(void);
3921659185fSAvinash Repaka void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
393ec16227eSAndy Grover
394ec16227eSAndy Grover /* ib_recv.c */
395ef87b7eaSZach Brown int rds_ib_recv_init(void);
396ec16227eSAndy Grover void rds_ib_recv_exit(void);
3972da43c4aSSowmini Varadhan int rds_ib_recv_path(struct rds_conn_path *conn);
398f394ad28SKa-Cheong Poon int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp);
39933244125SChris Mason void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
40073ce4317Ssantosh.shilimkar@oracle.com void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
401ec16227eSAndy Grover void rds_ib_inc_free(struct rds_incoming *inc);
402c310e72cSAl Viro int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
403f4f943c9SSantosh Shilimkar void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
404f4f943c9SSantosh Shilimkar struct rds_ib_ack_state *state);
405d521b63bSAndy Grover void rds_ib_recv_tasklet_fn(unsigned long data);
406ec16227eSAndy Grover void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
407ec16227eSAndy Grover void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
408ec16227eSAndy Grover void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
409ec16227eSAndy Grover void rds_ib_attempt_ack(struct rds_ib_connection *ic);
410ec16227eSAndy Grover void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
411ec16227eSAndy Grover u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
412f4f943c9SSantosh Shilimkar void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
413ec16227eSAndy Grover
414ec16227eSAndy Grover /* ib_ring.c */
415ec16227eSAndy Grover void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
416ec16227eSAndy Grover void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
417ec16227eSAndy Grover u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
418ec16227eSAndy Grover void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
419ec16227eSAndy Grover void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
420ec16227eSAndy Grover int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
421ec16227eSAndy Grover int rds_ib_ring_low(struct rds_ib_work_ring *ring);
422ec16227eSAndy Grover u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
423ec16227eSAndy Grover u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
424ec16227eSAndy Grover extern wait_queue_head_t rds_ib_ring_empty_wait;
425ec16227eSAndy Grover
426ec16227eSAndy Grover /* ib_send.c */
427226f7a7dSSowmini Varadhan void rds_ib_xmit_path_complete(struct rds_conn_path *cp);
428ec16227eSAndy Grover int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
429ec16227eSAndy Grover unsigned int hdr_off, unsigned int sg, unsigned int off);
4300c28c045SSantosh Shilimkar void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
431ec16227eSAndy Grover void rds_ib_send_init_ring(struct rds_ib_connection *ic);
432ec16227eSAndy Grover void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
433f8b3aaf2SAndy Grover int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
434ec16227eSAndy Grover void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
435ec16227eSAndy Grover void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
436ec16227eSAndy Grover int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
4377b70d033SSteve Wise u32 *adv_credits, int need_posted, int max_posted);
438ff3d7d36SAndy Grover int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
439ec16227eSAndy Grover
440ec16227eSAndy Grover /* ib_stats.c */
44116fdf8baSDavid S. Miller DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
442ec16227eSAndy Grover #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
44309b2b8f5SSantosh Shilimkar #define rds_ib_stats_add(member, count) \
44409b2b8f5SSantosh Shilimkar rds_stats_add_which(rds_ib_stats, member, count)
445ec16227eSAndy Grover unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
446ec16227eSAndy Grover unsigned int avail);
447ec16227eSAndy Grover
448ec16227eSAndy Grover /* ib_sysctl.c */
449ef87b7eaSZach Brown int rds_ib_sysctl_init(void);
450ec16227eSAndy Grover void rds_ib_sysctl_exit(void);
451ec16227eSAndy Grover extern unsigned long rds_ib_sysctl_max_send_wr;
452ec16227eSAndy Grover extern unsigned long rds_ib_sysctl_max_recv_wr;
453ec16227eSAndy Grover extern unsigned long rds_ib_sysctl_max_unsig_wrs;
454ec16227eSAndy Grover extern unsigned long rds_ib_sysctl_max_unsig_bytes;
455ec16227eSAndy Grover extern unsigned long rds_ib_sysctl_max_recv_allocation;
456ec16227eSAndy Grover extern unsigned int rds_ib_sysctl_flow_control;
457ec16227eSAndy Grover
458ec16227eSAndy Grover #endif
459