xref: /openbmc/linux/net/sunrpc/xprtrdma/svc_rdma.c (revision 9cfc5c90)
1 /*
2  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Author: Tom Tucker <tom@opengridcomputing.com>
40  */
41 
42 #include <linux/slab.h>
43 #include <linux/fs.h>
44 #include <linux/sysctl.h>
45 #include <linux/workqueue.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/sched.h>
48 #include <linux/sunrpc/svc_rdma.h>
49 #include "xprt_rdma.h"
50 
51 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
52 
53 /* RPC/RDMA parameters */
54 unsigned int svcrdma_ord = RPCRDMA_ORD;
55 static unsigned int min_ord = 1;
56 static unsigned int max_ord = 4096;
57 unsigned int svcrdma_max_requests = RPCRDMA_MAX_REQUESTS;
58 static unsigned int min_max_requests = 4;
59 static unsigned int max_max_requests = 16384;
60 unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
61 static unsigned int min_max_inline = 4096;
62 static unsigned int max_max_inline = 65536;
63 
64 atomic_t rdma_stat_recv;
65 atomic_t rdma_stat_read;
66 atomic_t rdma_stat_write;
67 atomic_t rdma_stat_sq_starve;
68 atomic_t rdma_stat_rq_starve;
69 atomic_t rdma_stat_rq_poll;
70 atomic_t rdma_stat_rq_prod;
71 atomic_t rdma_stat_sq_poll;
72 atomic_t rdma_stat_sq_prod;
73 
74 /* Temporary NFS request map and context caches */
75 struct kmem_cache *svc_rdma_map_cachep;
76 struct kmem_cache *svc_rdma_ctxt_cachep;
77 
78 struct workqueue_struct *svc_rdma_wq;
79 
80 /*
81  * This function implements reading and resetting an atomic_t stat
82  * variable through read/write to a proc file. Any write to the file
83  * resets the associated statistic to zero. Any read returns it's
84  * current value.
85  */
86 static int read_reset_stat(struct ctl_table *table, int write,
87 			   void __user *buffer, size_t *lenp,
88 			   loff_t *ppos)
89 {
90 	atomic_t *stat = (atomic_t *)table->data;
91 
92 	if (!stat)
93 		return -EINVAL;
94 
95 	if (write)
96 		atomic_set(stat, 0);
97 	else {
98 		char str_buf[32];
99 		char *data;
100 		int len = snprintf(str_buf, 32, "%d\n", atomic_read(stat));
101 		if (len >= 32)
102 			return -EFAULT;
103 		len = strlen(str_buf);
104 		if (*ppos > len) {
105 			*lenp = 0;
106 			return 0;
107 		}
108 		data = &str_buf[*ppos];
109 		len -= *ppos;
110 		if (len > *lenp)
111 			len = *lenp;
112 		if (len && copy_to_user(buffer, str_buf, len))
113 			return -EFAULT;
114 		*lenp = len;
115 		*ppos += len;
116 	}
117 	return 0;
118 }
119 
120 static struct ctl_table_header *svcrdma_table_header;
121 static struct ctl_table svcrdma_parm_table[] = {
122 	{
123 		.procname	= "max_requests",
124 		.data		= &svcrdma_max_requests,
125 		.maxlen		= sizeof(unsigned int),
126 		.mode		= 0644,
127 		.proc_handler	= proc_dointvec_minmax,
128 		.extra1		= &min_max_requests,
129 		.extra2		= &max_max_requests
130 	},
131 	{
132 		.procname	= "max_req_size",
133 		.data		= &svcrdma_max_req_size,
134 		.maxlen		= sizeof(unsigned int),
135 		.mode		= 0644,
136 		.proc_handler	= proc_dointvec_minmax,
137 		.extra1		= &min_max_inline,
138 		.extra2		= &max_max_inline
139 	},
140 	{
141 		.procname	= "max_outbound_read_requests",
142 		.data		= &svcrdma_ord,
143 		.maxlen		= sizeof(unsigned int),
144 		.mode		= 0644,
145 		.proc_handler	= proc_dointvec_minmax,
146 		.extra1		= &min_ord,
147 		.extra2		= &max_ord,
148 	},
149 
150 	{
151 		.procname	= "rdma_stat_read",
152 		.data		= &rdma_stat_read,
153 		.maxlen		= sizeof(atomic_t),
154 		.mode		= 0644,
155 		.proc_handler	= read_reset_stat,
156 	},
157 	{
158 		.procname	= "rdma_stat_recv",
159 		.data		= &rdma_stat_recv,
160 		.maxlen		= sizeof(atomic_t),
161 		.mode		= 0644,
162 		.proc_handler	= read_reset_stat,
163 	},
164 	{
165 		.procname	= "rdma_stat_write",
166 		.data		= &rdma_stat_write,
167 		.maxlen		= sizeof(atomic_t),
168 		.mode		= 0644,
169 		.proc_handler	= read_reset_stat,
170 	},
171 	{
172 		.procname	= "rdma_stat_sq_starve",
173 		.data		= &rdma_stat_sq_starve,
174 		.maxlen		= sizeof(atomic_t),
175 		.mode		= 0644,
176 		.proc_handler	= read_reset_stat,
177 	},
178 	{
179 		.procname	= "rdma_stat_rq_starve",
180 		.data		= &rdma_stat_rq_starve,
181 		.maxlen		= sizeof(atomic_t),
182 		.mode		= 0644,
183 		.proc_handler	= read_reset_stat,
184 	},
185 	{
186 		.procname	= "rdma_stat_rq_poll",
187 		.data		= &rdma_stat_rq_poll,
188 		.maxlen		= sizeof(atomic_t),
189 		.mode		= 0644,
190 		.proc_handler	= read_reset_stat,
191 	},
192 	{
193 		.procname	= "rdma_stat_rq_prod",
194 		.data		= &rdma_stat_rq_prod,
195 		.maxlen		= sizeof(atomic_t),
196 		.mode		= 0644,
197 		.proc_handler	= read_reset_stat,
198 	},
199 	{
200 		.procname	= "rdma_stat_sq_poll",
201 		.data		= &rdma_stat_sq_poll,
202 		.maxlen		= sizeof(atomic_t),
203 		.mode		= 0644,
204 		.proc_handler	= read_reset_stat,
205 	},
206 	{
207 		.procname	= "rdma_stat_sq_prod",
208 		.data		= &rdma_stat_sq_prod,
209 		.maxlen		= sizeof(atomic_t),
210 		.mode		= 0644,
211 		.proc_handler	= read_reset_stat,
212 	},
213 	{ },
214 };
215 
216 static struct ctl_table svcrdma_table[] = {
217 	{
218 		.procname	= "svc_rdma",
219 		.mode		= 0555,
220 		.child		= svcrdma_parm_table
221 	},
222 	{ },
223 };
224 
225 static struct ctl_table svcrdma_root_table[] = {
226 	{
227 		.procname	= "sunrpc",
228 		.mode		= 0555,
229 		.child		= svcrdma_table
230 	},
231 	{ },
232 };
233 
234 void svc_rdma_cleanup(void)
235 {
236 	dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n");
237 	destroy_workqueue(svc_rdma_wq);
238 	if (svcrdma_table_header) {
239 		unregister_sysctl_table(svcrdma_table_header);
240 		svcrdma_table_header = NULL;
241 	}
242 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
243 	svc_unreg_xprt_class(&svc_rdma_bc_class);
244 #endif
245 	svc_unreg_xprt_class(&svc_rdma_class);
246 	kmem_cache_destroy(svc_rdma_map_cachep);
247 	kmem_cache_destroy(svc_rdma_ctxt_cachep);
248 }
249 
250 int svc_rdma_init(void)
251 {
252 	dprintk("SVCRDMA Module Init, register RPC RDMA transport\n");
253 	dprintk("\tsvcrdma_ord      : %d\n", svcrdma_ord);
254 	dprintk("\tmax_requests     : %d\n", svcrdma_max_requests);
255 	dprintk("\tsq_depth         : %d\n",
256 		svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT);
257 	dprintk("\tmax_inline       : %d\n", svcrdma_max_req_size);
258 
259 	svc_rdma_wq = alloc_workqueue("svc_rdma", 0, 0);
260 	if (!svc_rdma_wq)
261 		return -ENOMEM;
262 
263 	if (!svcrdma_table_header)
264 		svcrdma_table_header =
265 			register_sysctl_table(svcrdma_root_table);
266 
267 	/* Create the temporary map cache */
268 	svc_rdma_map_cachep = kmem_cache_create("svc_rdma_map_cache",
269 						sizeof(struct svc_rdma_req_map),
270 						0,
271 						SLAB_HWCACHE_ALIGN,
272 						NULL);
273 	if (!svc_rdma_map_cachep) {
274 		printk(KERN_INFO "Could not allocate map cache.\n");
275 		goto err0;
276 	}
277 
278 	/* Create the temporary context cache */
279 	svc_rdma_ctxt_cachep =
280 		kmem_cache_create("svc_rdma_ctxt_cache",
281 				  sizeof(struct svc_rdma_op_ctxt),
282 				  0,
283 				  SLAB_HWCACHE_ALIGN,
284 				  NULL);
285 	if (!svc_rdma_ctxt_cachep) {
286 		printk(KERN_INFO "Could not allocate WR ctxt cache.\n");
287 		goto err1;
288 	}
289 
290 	/* Register RDMA with the SVC transport switch */
291 	svc_reg_xprt_class(&svc_rdma_class);
292 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
293 	svc_reg_xprt_class(&svc_rdma_bc_class);
294 #endif
295 	return 0;
296  err1:
297 	kmem_cache_destroy(svc_rdma_map_cachep);
298  err0:
299 	unregister_sysctl_table(svcrdma_table_header);
300 	destroy_workqueue(svc_rdma_wq);
301 	return -ENOMEM;
302 }
303