xref: /openbmc/linux/drivers/infiniband/core/mad.c (revision e2f1cf25)
1 /*
2  * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
5  * Copyright (c) 2009 HNR Consulting. All rights reserved.
6  * Copyright (c) 2014 Intel Corporation.  All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses.  You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING in the main directory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  *     Redistribution and use in source and binary forms, with or
15  *     without modification, are permitted provided that the following
16  *     conditions are met:
17  *
18  *      - Redistributions of source code must retain the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer.
21  *
22  *      - Redistributions in binary form must reproduce the above
23  *        copyright notice, this list of conditions and the following
24  *        disclaimer in the documentation and/or other materials
25  *        provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34  * SOFTWARE.
35  *
36  */
37 
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39 
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <rdma/ib_cache.h>
44 
45 #include "mad_priv.h"
46 #include "mad_rmpp.h"
47 #include "smi.h"
48 #include "opa_smi.h"
49 #include "agent.h"
50 
51 MODULE_LICENSE("Dual BSD/GPL");
52 MODULE_DESCRIPTION("kernel IB MAD API");
53 MODULE_AUTHOR("Hal Rosenstock");
54 MODULE_AUTHOR("Sean Hefty");
55 
56 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
57 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
58 
59 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
60 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
61 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
62 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
63 
64 static struct list_head ib_mad_port_list;
65 static u32 ib_mad_client_id = 0;
66 
67 /* Port list lock */
68 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
69 
70 /* Forward declarations */
71 static int method_in_use(struct ib_mad_mgmt_method_table **method,
72 			 struct ib_mad_reg_req *mad_reg_req);
73 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
74 static struct ib_mad_agent_private *find_mad_agent(
75 					struct ib_mad_port_private *port_priv,
76 					const struct ib_mad_hdr *mad);
77 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
78 				    struct ib_mad_private *mad);
79 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
80 static void timeout_sends(struct work_struct *work);
81 static void local_completions(struct work_struct *work);
82 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83 			      struct ib_mad_agent_private *agent_priv,
84 			      u8 mgmt_class);
85 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 			   struct ib_mad_agent_private *agent_priv);
87 
88 /*
89  * Returns a ib_mad_port_private structure or NULL for a device/port
90  * Assumes ib_mad_port_list_lock is being held
91  */
92 static inline struct ib_mad_port_private *
93 __ib_get_mad_port(struct ib_device *device, int port_num)
94 {
95 	struct ib_mad_port_private *entry;
96 
97 	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
98 		if (entry->device == device && entry->port_num == port_num)
99 			return entry;
100 	}
101 	return NULL;
102 }
103 
104 /*
105  * Wrapper function to return a ib_mad_port_private structure or NULL
106  * for a device/port
107  */
108 static inline struct ib_mad_port_private *
109 ib_get_mad_port(struct ib_device *device, int port_num)
110 {
111 	struct ib_mad_port_private *entry;
112 	unsigned long flags;
113 
114 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
115 	entry = __ib_get_mad_port(device, port_num);
116 	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
117 
118 	return entry;
119 }
120 
121 static inline u8 convert_mgmt_class(u8 mgmt_class)
122 {
123 	/* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
124 	return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
125 		0 : mgmt_class;
126 }
127 
128 static int get_spl_qp_index(enum ib_qp_type qp_type)
129 {
130 	switch (qp_type)
131 	{
132 	case IB_QPT_SMI:
133 		return 0;
134 	case IB_QPT_GSI:
135 		return 1;
136 	default:
137 		return -1;
138 	}
139 }
140 
141 static int vendor_class_index(u8 mgmt_class)
142 {
143 	return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
144 }
145 
146 static int is_vendor_class(u8 mgmt_class)
147 {
148 	if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
149 	    (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
150 		return 0;
151 	return 1;
152 }
153 
154 static int is_vendor_oui(char *oui)
155 {
156 	if (oui[0] || oui[1] || oui[2])
157 		return 1;
158 	return 0;
159 }
160 
161 static int is_vendor_method_in_use(
162 		struct ib_mad_mgmt_vendor_class *vendor_class,
163 		struct ib_mad_reg_req *mad_reg_req)
164 {
165 	struct ib_mad_mgmt_method_table *method;
166 	int i;
167 
168 	for (i = 0; i < MAX_MGMT_OUI; i++) {
169 		if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
170 			method = vendor_class->method_table[i];
171 			if (method) {
172 				if (method_in_use(&method, mad_reg_req))
173 					return 1;
174 				else
175 					break;
176 			}
177 		}
178 	}
179 	return 0;
180 }
181 
182 int ib_response_mad(const struct ib_mad_hdr *hdr)
183 {
184 	return ((hdr->method & IB_MGMT_METHOD_RESP) ||
185 		(hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
186 		((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
187 		 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
188 }
189 EXPORT_SYMBOL(ib_response_mad);
190 
191 /*
192  * ib_register_mad_agent - Register to send/receive MADs
193  */
194 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
195 					   u8 port_num,
196 					   enum ib_qp_type qp_type,
197 					   struct ib_mad_reg_req *mad_reg_req,
198 					   u8 rmpp_version,
199 					   ib_mad_send_handler send_handler,
200 					   ib_mad_recv_handler recv_handler,
201 					   void *context,
202 					   u32 registration_flags)
203 {
204 	struct ib_mad_port_private *port_priv;
205 	struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
206 	struct ib_mad_agent_private *mad_agent_priv;
207 	struct ib_mad_reg_req *reg_req = NULL;
208 	struct ib_mad_mgmt_class_table *class;
209 	struct ib_mad_mgmt_vendor_class_table *vendor;
210 	struct ib_mad_mgmt_vendor_class *vendor_class;
211 	struct ib_mad_mgmt_method_table *method;
212 	int ret2, qpn;
213 	unsigned long flags;
214 	u8 mgmt_class, vclass;
215 
216 	/* Validate parameters */
217 	qpn = get_spl_qp_index(qp_type);
218 	if (qpn == -1) {
219 		dev_notice(&device->dev,
220 			   "ib_register_mad_agent: invalid QP Type %d\n",
221 			   qp_type);
222 		goto error1;
223 	}
224 
225 	if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
226 		dev_notice(&device->dev,
227 			   "ib_register_mad_agent: invalid RMPP Version %u\n",
228 			   rmpp_version);
229 		goto error1;
230 	}
231 
232 	/* Validate MAD registration request if supplied */
233 	if (mad_reg_req) {
234 		if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
235 			dev_notice(&device->dev,
236 				   "ib_register_mad_agent: invalid Class Version %u\n",
237 				   mad_reg_req->mgmt_class_version);
238 			goto error1;
239 		}
240 		if (!recv_handler) {
241 			dev_notice(&device->dev,
242 				   "ib_register_mad_agent: no recv_handler\n");
243 			goto error1;
244 		}
245 		if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
246 			/*
247 			 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
248 			 * one in this range currently allowed
249 			 */
250 			if (mad_reg_req->mgmt_class !=
251 			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
252 				dev_notice(&device->dev,
253 					   "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
254 					   mad_reg_req->mgmt_class);
255 				goto error1;
256 			}
257 		} else if (mad_reg_req->mgmt_class == 0) {
258 			/*
259 			 * Class 0 is reserved in IBA and is used for
260 			 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
261 			 */
262 			dev_notice(&device->dev,
263 				   "ib_register_mad_agent: Invalid Mgmt Class 0\n");
264 			goto error1;
265 		} else if (is_vendor_class(mad_reg_req->mgmt_class)) {
266 			/*
267 			 * If class is in "new" vendor range,
268 			 * ensure supplied OUI is not zero
269 			 */
270 			if (!is_vendor_oui(mad_reg_req->oui)) {
271 				dev_notice(&device->dev,
272 					   "ib_register_mad_agent: No OUI specified for class 0x%x\n",
273 					   mad_reg_req->mgmt_class);
274 				goto error1;
275 			}
276 		}
277 		/* Make sure class supplied is consistent with RMPP */
278 		if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
279 			if (rmpp_version) {
280 				dev_notice(&device->dev,
281 					   "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
282 					   mad_reg_req->mgmt_class);
283 				goto error1;
284 			}
285 		}
286 
287 		/* Make sure class supplied is consistent with QP type */
288 		if (qp_type == IB_QPT_SMI) {
289 			if ((mad_reg_req->mgmt_class !=
290 					IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
291 			    (mad_reg_req->mgmt_class !=
292 					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
293 				dev_notice(&device->dev,
294 					   "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
295 					   mad_reg_req->mgmt_class);
296 				goto error1;
297 			}
298 		} else {
299 			if ((mad_reg_req->mgmt_class ==
300 					IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
301 			    (mad_reg_req->mgmt_class ==
302 					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
303 				dev_notice(&device->dev,
304 					   "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
305 					   mad_reg_req->mgmt_class);
306 				goto error1;
307 			}
308 		}
309 	} else {
310 		/* No registration request supplied */
311 		if (!send_handler)
312 			goto error1;
313 		if (registration_flags & IB_MAD_USER_RMPP)
314 			goto error1;
315 	}
316 
317 	/* Validate device and port */
318 	port_priv = ib_get_mad_port(device, port_num);
319 	if (!port_priv) {
320 		dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
321 		ret = ERR_PTR(-ENODEV);
322 		goto error1;
323 	}
324 
325 	/* Verify the QP requested is supported.  For example, Ethernet devices
326 	 * will not have QP0 */
327 	if (!port_priv->qp_info[qpn].qp) {
328 		dev_notice(&device->dev,
329 			   "ib_register_mad_agent: QP %d not supported\n", qpn);
330 		ret = ERR_PTR(-EPROTONOSUPPORT);
331 		goto error1;
332 	}
333 
334 	/* Allocate structures */
335 	mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
336 	if (!mad_agent_priv) {
337 		ret = ERR_PTR(-ENOMEM);
338 		goto error1;
339 	}
340 
341 	mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
342 						 IB_ACCESS_LOCAL_WRITE);
343 	if (IS_ERR(mad_agent_priv->agent.mr)) {
344 		ret = ERR_PTR(-ENOMEM);
345 		goto error2;
346 	}
347 
348 	if (mad_reg_req) {
349 		reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
350 		if (!reg_req) {
351 			ret = ERR_PTR(-ENOMEM);
352 			goto error3;
353 		}
354 	}
355 
356 	/* Now, fill in the various structures */
357 	mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
358 	mad_agent_priv->reg_req = reg_req;
359 	mad_agent_priv->agent.rmpp_version = rmpp_version;
360 	mad_agent_priv->agent.device = device;
361 	mad_agent_priv->agent.recv_handler = recv_handler;
362 	mad_agent_priv->agent.send_handler = send_handler;
363 	mad_agent_priv->agent.context = context;
364 	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
365 	mad_agent_priv->agent.port_num = port_num;
366 	mad_agent_priv->agent.flags = registration_flags;
367 	spin_lock_init(&mad_agent_priv->lock);
368 	INIT_LIST_HEAD(&mad_agent_priv->send_list);
369 	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
370 	INIT_LIST_HEAD(&mad_agent_priv->done_list);
371 	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
372 	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
373 	INIT_LIST_HEAD(&mad_agent_priv->local_list);
374 	INIT_WORK(&mad_agent_priv->local_work, local_completions);
375 	atomic_set(&mad_agent_priv->refcount, 1);
376 	init_completion(&mad_agent_priv->comp);
377 
378 	spin_lock_irqsave(&port_priv->reg_lock, flags);
379 	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
380 
381 	/*
382 	 * Make sure MAD registration (if supplied)
383 	 * is non overlapping with any existing ones
384 	 */
385 	if (mad_reg_req) {
386 		mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
387 		if (!is_vendor_class(mgmt_class)) {
388 			class = port_priv->version[mad_reg_req->
389 						   mgmt_class_version].class;
390 			if (class) {
391 				method = class->method_table[mgmt_class];
392 				if (method) {
393 					if (method_in_use(&method,
394 							   mad_reg_req))
395 						goto error4;
396 				}
397 			}
398 			ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
399 						  mgmt_class);
400 		} else {
401 			/* "New" vendor class range */
402 			vendor = port_priv->version[mad_reg_req->
403 						    mgmt_class_version].vendor;
404 			if (vendor) {
405 				vclass = vendor_class_index(mgmt_class);
406 				vendor_class = vendor->vendor_class[vclass];
407 				if (vendor_class) {
408 					if (is_vendor_method_in_use(
409 							vendor_class,
410 							mad_reg_req))
411 						goto error4;
412 				}
413 			}
414 			ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
415 		}
416 		if (ret2) {
417 			ret = ERR_PTR(ret2);
418 			goto error4;
419 		}
420 	}
421 
422 	/* Add mad agent into port's agent list */
423 	list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
424 	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
425 
426 	return &mad_agent_priv->agent;
427 
428 error4:
429 	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
430 	kfree(reg_req);
431 error3:
432 	ib_dereg_mr(mad_agent_priv->agent.mr);
433 error2:
434 	kfree(mad_agent_priv);
435 error1:
436 	return ret;
437 }
438 EXPORT_SYMBOL(ib_register_mad_agent);
439 
440 static inline int is_snooping_sends(int mad_snoop_flags)
441 {
442 	return (mad_snoop_flags &
443 		(/*IB_MAD_SNOOP_POSTED_SENDS |
444 		 IB_MAD_SNOOP_RMPP_SENDS |*/
445 		 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
446 		 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
447 }
448 
449 static inline int is_snooping_recvs(int mad_snoop_flags)
450 {
451 	return (mad_snoop_flags &
452 		(IB_MAD_SNOOP_RECVS /*|
453 		 IB_MAD_SNOOP_RMPP_RECVS*/));
454 }
455 
456 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
457 				struct ib_mad_snoop_private *mad_snoop_priv)
458 {
459 	struct ib_mad_snoop_private **new_snoop_table;
460 	unsigned long flags;
461 	int i;
462 
463 	spin_lock_irqsave(&qp_info->snoop_lock, flags);
464 	/* Check for empty slot in array. */
465 	for (i = 0; i < qp_info->snoop_table_size; i++)
466 		if (!qp_info->snoop_table[i])
467 			break;
468 
469 	if (i == qp_info->snoop_table_size) {
470 		/* Grow table. */
471 		new_snoop_table = krealloc(qp_info->snoop_table,
472 					   sizeof mad_snoop_priv *
473 					   (qp_info->snoop_table_size + 1),
474 					   GFP_ATOMIC);
475 		if (!new_snoop_table) {
476 			i = -ENOMEM;
477 			goto out;
478 		}
479 
480 		qp_info->snoop_table = new_snoop_table;
481 		qp_info->snoop_table_size++;
482 	}
483 	qp_info->snoop_table[i] = mad_snoop_priv;
484 	atomic_inc(&qp_info->snoop_count);
485 out:
486 	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
487 	return i;
488 }
489 
490 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
491 					   u8 port_num,
492 					   enum ib_qp_type qp_type,
493 					   int mad_snoop_flags,
494 					   ib_mad_snoop_handler snoop_handler,
495 					   ib_mad_recv_handler recv_handler,
496 					   void *context)
497 {
498 	struct ib_mad_port_private *port_priv;
499 	struct ib_mad_agent *ret;
500 	struct ib_mad_snoop_private *mad_snoop_priv;
501 	int qpn;
502 
503 	/* Validate parameters */
504 	if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
505 	    (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
506 		ret = ERR_PTR(-EINVAL);
507 		goto error1;
508 	}
509 	qpn = get_spl_qp_index(qp_type);
510 	if (qpn == -1) {
511 		ret = ERR_PTR(-EINVAL);
512 		goto error1;
513 	}
514 	port_priv = ib_get_mad_port(device, port_num);
515 	if (!port_priv) {
516 		ret = ERR_PTR(-ENODEV);
517 		goto error1;
518 	}
519 	/* Allocate structures */
520 	mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
521 	if (!mad_snoop_priv) {
522 		ret = ERR_PTR(-ENOMEM);
523 		goto error1;
524 	}
525 
526 	/* Now, fill in the various structures */
527 	mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
528 	mad_snoop_priv->agent.device = device;
529 	mad_snoop_priv->agent.recv_handler = recv_handler;
530 	mad_snoop_priv->agent.snoop_handler = snoop_handler;
531 	mad_snoop_priv->agent.context = context;
532 	mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
533 	mad_snoop_priv->agent.port_num = port_num;
534 	mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
535 	init_completion(&mad_snoop_priv->comp);
536 	mad_snoop_priv->snoop_index = register_snoop_agent(
537 						&port_priv->qp_info[qpn],
538 						mad_snoop_priv);
539 	if (mad_snoop_priv->snoop_index < 0) {
540 		ret = ERR_PTR(mad_snoop_priv->snoop_index);
541 		goto error2;
542 	}
543 
544 	atomic_set(&mad_snoop_priv->refcount, 1);
545 	return &mad_snoop_priv->agent;
546 
547 error2:
548 	kfree(mad_snoop_priv);
549 error1:
550 	return ret;
551 }
552 EXPORT_SYMBOL(ib_register_mad_snoop);
553 
554 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
555 {
556 	if (atomic_dec_and_test(&mad_agent_priv->refcount))
557 		complete(&mad_agent_priv->comp);
558 }
559 
560 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
561 {
562 	if (atomic_dec_and_test(&mad_snoop_priv->refcount))
563 		complete(&mad_snoop_priv->comp);
564 }
565 
566 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
567 {
568 	struct ib_mad_port_private *port_priv;
569 	unsigned long flags;
570 
571 	/* Note that we could still be handling received MADs */
572 
573 	/*
574 	 * Canceling all sends results in dropping received response
575 	 * MADs, preventing us from queuing additional work
576 	 */
577 	cancel_mads(mad_agent_priv);
578 	port_priv = mad_agent_priv->qp_info->port_priv;
579 	cancel_delayed_work(&mad_agent_priv->timed_work);
580 
581 	spin_lock_irqsave(&port_priv->reg_lock, flags);
582 	remove_mad_reg_req(mad_agent_priv);
583 	list_del(&mad_agent_priv->agent_list);
584 	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
585 
586 	flush_workqueue(port_priv->wq);
587 	ib_cancel_rmpp_recvs(mad_agent_priv);
588 
589 	deref_mad_agent(mad_agent_priv);
590 	wait_for_completion(&mad_agent_priv->comp);
591 
592 	kfree(mad_agent_priv->reg_req);
593 	ib_dereg_mr(mad_agent_priv->agent.mr);
594 	kfree(mad_agent_priv);
595 }
596 
597 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
598 {
599 	struct ib_mad_qp_info *qp_info;
600 	unsigned long flags;
601 
602 	qp_info = mad_snoop_priv->qp_info;
603 	spin_lock_irqsave(&qp_info->snoop_lock, flags);
604 	qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
605 	atomic_dec(&qp_info->snoop_count);
606 	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
607 
608 	deref_snoop_agent(mad_snoop_priv);
609 	wait_for_completion(&mad_snoop_priv->comp);
610 
611 	kfree(mad_snoop_priv);
612 }
613 
614 /*
615  * ib_unregister_mad_agent - Unregisters a client from using MAD services
616  */
617 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
618 {
619 	struct ib_mad_agent_private *mad_agent_priv;
620 	struct ib_mad_snoop_private *mad_snoop_priv;
621 
622 	/* If the TID is zero, the agent can only snoop. */
623 	if (mad_agent->hi_tid) {
624 		mad_agent_priv = container_of(mad_agent,
625 					      struct ib_mad_agent_private,
626 					      agent);
627 		unregister_mad_agent(mad_agent_priv);
628 	} else {
629 		mad_snoop_priv = container_of(mad_agent,
630 					      struct ib_mad_snoop_private,
631 					      agent);
632 		unregister_mad_snoop(mad_snoop_priv);
633 	}
634 	return 0;
635 }
636 EXPORT_SYMBOL(ib_unregister_mad_agent);
637 
638 static void dequeue_mad(struct ib_mad_list_head *mad_list)
639 {
640 	struct ib_mad_queue *mad_queue;
641 	unsigned long flags;
642 
643 	BUG_ON(!mad_list->mad_queue);
644 	mad_queue = mad_list->mad_queue;
645 	spin_lock_irqsave(&mad_queue->lock, flags);
646 	list_del(&mad_list->list);
647 	mad_queue->count--;
648 	spin_unlock_irqrestore(&mad_queue->lock, flags);
649 }
650 
651 static void snoop_send(struct ib_mad_qp_info *qp_info,
652 		       struct ib_mad_send_buf *send_buf,
653 		       struct ib_mad_send_wc *mad_send_wc,
654 		       int mad_snoop_flags)
655 {
656 	struct ib_mad_snoop_private *mad_snoop_priv;
657 	unsigned long flags;
658 	int i;
659 
660 	spin_lock_irqsave(&qp_info->snoop_lock, flags);
661 	for (i = 0; i < qp_info->snoop_table_size; i++) {
662 		mad_snoop_priv = qp_info->snoop_table[i];
663 		if (!mad_snoop_priv ||
664 		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
665 			continue;
666 
667 		atomic_inc(&mad_snoop_priv->refcount);
668 		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
669 		mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
670 						    send_buf, mad_send_wc);
671 		deref_snoop_agent(mad_snoop_priv);
672 		spin_lock_irqsave(&qp_info->snoop_lock, flags);
673 	}
674 	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
675 }
676 
677 static void snoop_recv(struct ib_mad_qp_info *qp_info,
678 		       struct ib_mad_recv_wc *mad_recv_wc,
679 		       int mad_snoop_flags)
680 {
681 	struct ib_mad_snoop_private *mad_snoop_priv;
682 	unsigned long flags;
683 	int i;
684 
685 	spin_lock_irqsave(&qp_info->snoop_lock, flags);
686 	for (i = 0; i < qp_info->snoop_table_size; i++) {
687 		mad_snoop_priv = qp_info->snoop_table[i];
688 		if (!mad_snoop_priv ||
689 		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
690 			continue;
691 
692 		atomic_inc(&mad_snoop_priv->refcount);
693 		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
694 		mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
695 						   mad_recv_wc);
696 		deref_snoop_agent(mad_snoop_priv);
697 		spin_lock_irqsave(&qp_info->snoop_lock, flags);
698 	}
699 	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
700 }
701 
702 static void build_smp_wc(struct ib_qp *qp,
703 			 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
704 			 struct ib_wc *wc)
705 {
706 	memset(wc, 0, sizeof *wc);
707 	wc->wr_id = wr_id;
708 	wc->status = IB_WC_SUCCESS;
709 	wc->opcode = IB_WC_RECV;
710 	wc->pkey_index = pkey_index;
711 	wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
712 	wc->src_qp = IB_QP0;
713 	wc->qp = qp;
714 	wc->slid = slid;
715 	wc->sl = 0;
716 	wc->dlid_path_bits = 0;
717 	wc->port_num = port_num;
718 }
719 
720 static size_t mad_priv_size(const struct ib_mad_private *mp)
721 {
722 	return sizeof(struct ib_mad_private) + mp->mad_size;
723 }
724 
725 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
726 {
727 	size_t size = sizeof(struct ib_mad_private) + mad_size;
728 	struct ib_mad_private *ret = kzalloc(size, flags);
729 
730 	if (ret)
731 		ret->mad_size = mad_size;
732 
733 	return ret;
734 }
735 
736 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
737 {
738 	return rdma_max_mad_size(port_priv->device, port_priv->port_num);
739 }
740 
741 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
742 {
743 	return sizeof(struct ib_grh) + mp->mad_size;
744 }
745 
746 /*
747  * Return 0 if SMP is to be sent
748  * Return 1 if SMP was consumed locally (whether or not solicited)
749  * Return < 0 if error
750  */
751 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
752 				  struct ib_mad_send_wr_private *mad_send_wr)
753 {
754 	int ret = 0;
755 	struct ib_smp *smp = mad_send_wr->send_buf.mad;
756 	struct opa_smp *opa_smp = (struct opa_smp *)smp;
757 	unsigned long flags;
758 	struct ib_mad_local_private *local;
759 	struct ib_mad_private *mad_priv;
760 	struct ib_mad_port_private *port_priv;
761 	struct ib_mad_agent_private *recv_mad_agent = NULL;
762 	struct ib_device *device = mad_agent_priv->agent.device;
763 	u8 port_num;
764 	struct ib_wc mad_wc;
765 	struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
766 	size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
767 	u16 out_mad_pkey_index = 0;
768 	u16 drslid;
769 	bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
770 				    mad_agent_priv->qp_info->port_priv->port_num);
771 
772 	if (rdma_cap_ib_switch(device) &&
773 	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
774 		port_num = send_wr->wr.ud.port_num;
775 	else
776 		port_num = mad_agent_priv->agent.port_num;
777 
778 	/*
779 	 * Directed route handling starts if the initial LID routed part of
780 	 * a request or the ending LID routed part of a response is empty.
781 	 * If we are at the start of the LID routed part, don't update the
782 	 * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
783 	 */
784 	if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
785 		u32 opa_drslid;
786 
787 		if ((opa_get_smp_direction(opa_smp)
788 		     ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
789 		     OPA_LID_PERMISSIVE &&
790 		     opa_smi_handle_dr_smp_send(opa_smp,
791 						rdma_cap_ib_switch(device),
792 						port_num) == IB_SMI_DISCARD) {
793 			ret = -EINVAL;
794 			dev_err(&device->dev, "OPA Invalid directed route\n");
795 			goto out;
796 		}
797 		opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
798 		if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
799 		    opa_drslid & 0xffff0000) {
800 			ret = -EINVAL;
801 			dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
802 			       opa_drslid);
803 			goto out;
804 		}
805 		drslid = (u16)(opa_drslid & 0x0000ffff);
806 
807 		/* Check to post send on QP or process locally */
808 		if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
809 		    opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
810 			goto out;
811 	} else {
812 		if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
813 		     IB_LID_PERMISSIVE &&
814 		     smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
815 		     IB_SMI_DISCARD) {
816 			ret = -EINVAL;
817 			dev_err(&device->dev, "Invalid directed route\n");
818 			goto out;
819 		}
820 		drslid = be16_to_cpu(smp->dr_slid);
821 
822 		/* Check to post send on QP or process locally */
823 		if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
824 		    smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
825 			goto out;
826 	}
827 
828 	local = kmalloc(sizeof *local, GFP_ATOMIC);
829 	if (!local) {
830 		ret = -ENOMEM;
831 		dev_err(&device->dev, "No memory for ib_mad_local_private\n");
832 		goto out;
833 	}
834 	local->mad_priv = NULL;
835 	local->recv_mad_agent = NULL;
836 	mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
837 	if (!mad_priv) {
838 		ret = -ENOMEM;
839 		dev_err(&device->dev, "No memory for local response MAD\n");
840 		kfree(local);
841 		goto out;
842 	}
843 
844 	build_smp_wc(mad_agent_priv->agent.qp,
845 		     send_wr->wr_id, drslid,
846 		     send_wr->wr.ud.pkey_index,
847 		     send_wr->wr.ud.port_num, &mad_wc);
848 
849 	if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
850 		mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
851 					+ mad_send_wr->send_buf.data_len
852 					+ sizeof(struct ib_grh);
853 	}
854 
855 	/* No GRH for DR SMP */
856 	ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
857 				  (const struct ib_mad_hdr *)smp, mad_size,
858 				  (struct ib_mad_hdr *)mad_priv->mad,
859 				  &mad_size, &out_mad_pkey_index);
860 	switch (ret)
861 	{
862 	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
863 		if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
864 		    mad_agent_priv->agent.recv_handler) {
865 			local->mad_priv = mad_priv;
866 			local->recv_mad_agent = mad_agent_priv;
867 			/*
868 			 * Reference MAD agent until receive
869 			 * side of local completion handled
870 			 */
871 			atomic_inc(&mad_agent_priv->refcount);
872 		} else
873 			kfree(mad_priv);
874 		break;
875 	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
876 		kfree(mad_priv);
877 		break;
878 	case IB_MAD_RESULT_SUCCESS:
879 		/* Treat like an incoming receive MAD */
880 		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
881 					    mad_agent_priv->agent.port_num);
882 		if (port_priv) {
883 			memcpy(mad_priv->mad, smp, mad_priv->mad_size);
884 			recv_mad_agent = find_mad_agent(port_priv,
885 						        (const struct ib_mad_hdr *)mad_priv->mad);
886 		}
887 		if (!port_priv || !recv_mad_agent) {
888 			/*
889 			 * No receiving agent so drop packet and
890 			 * generate send completion.
891 			 */
892 			kfree(mad_priv);
893 			break;
894 		}
895 		local->mad_priv = mad_priv;
896 		local->recv_mad_agent = recv_mad_agent;
897 		break;
898 	default:
899 		kfree(mad_priv);
900 		kfree(local);
901 		ret = -EINVAL;
902 		goto out;
903 	}
904 
905 	local->mad_send_wr = mad_send_wr;
906 	if (opa) {
907 		local->mad_send_wr->send_wr.wr.ud.pkey_index = out_mad_pkey_index;
908 		local->return_wc_byte_len = mad_size;
909 	}
910 	/* Reference MAD agent until send side of local completion handled */
911 	atomic_inc(&mad_agent_priv->refcount);
912 	/* Queue local completion to local list */
913 	spin_lock_irqsave(&mad_agent_priv->lock, flags);
914 	list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
915 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
916 	queue_work(mad_agent_priv->qp_info->port_priv->wq,
917 		   &mad_agent_priv->local_work);
918 	ret = 1;
919 out:
920 	return ret;
921 }
922 
923 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
924 {
925 	int seg_size, pad;
926 
927 	seg_size = mad_size - hdr_len;
928 	if (data_len && seg_size) {
929 		pad = seg_size - data_len % seg_size;
930 		return pad == seg_size ? 0 : pad;
931 	} else
932 		return seg_size;
933 }
934 
935 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
936 {
937 	struct ib_rmpp_segment *s, *t;
938 
939 	list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
940 		list_del(&s->list);
941 		kfree(s);
942 	}
943 }
944 
945 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
946 				size_t mad_size, gfp_t gfp_mask)
947 {
948 	struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
949 	struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
950 	struct ib_rmpp_segment *seg = NULL;
951 	int left, seg_size, pad;
952 
953 	send_buf->seg_size = mad_size - send_buf->hdr_len;
954 	send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
955 	seg_size = send_buf->seg_size;
956 	pad = send_wr->pad;
957 
958 	/* Allocate data segments. */
959 	for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
960 		seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
961 		if (!seg) {
962 			dev_err(&send_buf->mad_agent->device->dev,
963 				"alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
964 				sizeof (*seg) + seg_size, gfp_mask);
965 			free_send_rmpp_list(send_wr);
966 			return -ENOMEM;
967 		}
968 		seg->num = ++send_buf->seg_count;
969 		list_add_tail(&seg->list, &send_wr->rmpp_list);
970 	}
971 
972 	/* Zero any padding */
973 	if (pad)
974 		memset(seg->data + seg_size - pad, 0, pad);
975 
976 	rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
977 					  agent.rmpp_version;
978 	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
979 	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
980 
981 	send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
982 					struct ib_rmpp_segment, list);
983 	send_wr->last_ack_seg = send_wr->cur_seg;
984 	return 0;
985 }
986 
987 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
988 {
989 	return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
990 }
991 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
992 
993 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
994 					    u32 remote_qpn, u16 pkey_index,
995 					    int rmpp_active,
996 					    int hdr_len, int data_len,
997 					    gfp_t gfp_mask,
998 					    u8 base_version)
999 {
1000 	struct ib_mad_agent_private *mad_agent_priv;
1001 	struct ib_mad_send_wr_private *mad_send_wr;
1002 	int pad, message_size, ret, size;
1003 	void *buf;
1004 	size_t mad_size;
1005 	bool opa;
1006 
1007 	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1008 				      agent);
1009 
1010 	opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1011 
1012 	if (opa && base_version == OPA_MGMT_BASE_VERSION)
1013 		mad_size = sizeof(struct opa_mad);
1014 	else
1015 		mad_size = sizeof(struct ib_mad);
1016 
1017 	pad = get_pad_size(hdr_len, data_len, mad_size);
1018 	message_size = hdr_len + data_len + pad;
1019 
1020 	if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1021 		if (!rmpp_active && message_size > mad_size)
1022 			return ERR_PTR(-EINVAL);
1023 	} else
1024 		if (rmpp_active || message_size > mad_size)
1025 			return ERR_PTR(-EINVAL);
1026 
1027 	size = rmpp_active ? hdr_len : mad_size;
1028 	buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1029 	if (!buf)
1030 		return ERR_PTR(-ENOMEM);
1031 
1032 	mad_send_wr = buf + size;
1033 	INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1034 	mad_send_wr->send_buf.mad = buf;
1035 	mad_send_wr->send_buf.hdr_len = hdr_len;
1036 	mad_send_wr->send_buf.data_len = data_len;
1037 	mad_send_wr->pad = pad;
1038 
1039 	mad_send_wr->mad_agent_priv = mad_agent_priv;
1040 	mad_send_wr->sg_list[0].length = hdr_len;
1041 	mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
1042 
1043 	/* OPA MADs don't have to be the full 2048 bytes */
1044 	if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1045 	    data_len < mad_size - hdr_len)
1046 		mad_send_wr->sg_list[1].length = data_len;
1047 	else
1048 		mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1049 
1050 	mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
1051 
1052 	mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
1053 	mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
1054 	mad_send_wr->send_wr.num_sge = 2;
1055 	mad_send_wr->send_wr.opcode = IB_WR_SEND;
1056 	mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
1057 	mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
1058 	mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
1059 	mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
1060 
1061 	if (rmpp_active) {
1062 		ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1063 		if (ret) {
1064 			kfree(buf);
1065 			return ERR_PTR(ret);
1066 		}
1067 	}
1068 
1069 	mad_send_wr->send_buf.mad_agent = mad_agent;
1070 	atomic_inc(&mad_agent_priv->refcount);
1071 	return &mad_send_wr->send_buf;
1072 }
1073 EXPORT_SYMBOL(ib_create_send_mad);
1074 
1075 int ib_get_mad_data_offset(u8 mgmt_class)
1076 {
1077 	if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1078 		return IB_MGMT_SA_HDR;
1079 	else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1080 		 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1081 		 (mgmt_class == IB_MGMT_CLASS_BIS))
1082 		return IB_MGMT_DEVICE_HDR;
1083 	else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1084 		 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1085 		return IB_MGMT_VENDOR_HDR;
1086 	else
1087 		return IB_MGMT_MAD_HDR;
1088 }
1089 EXPORT_SYMBOL(ib_get_mad_data_offset);
1090 
1091 int ib_is_mad_class_rmpp(u8 mgmt_class)
1092 {
1093 	if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1094 	    (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1095 	    (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1096 	    (mgmt_class == IB_MGMT_CLASS_BIS) ||
1097 	    ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1098 	     (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1099 		return 1;
1100 	return 0;
1101 }
1102 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1103 
1104 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1105 {
1106 	struct ib_mad_send_wr_private *mad_send_wr;
1107 	struct list_head *list;
1108 
1109 	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1110 				   send_buf);
1111 	list = &mad_send_wr->cur_seg->list;
1112 
1113 	if (mad_send_wr->cur_seg->num < seg_num) {
1114 		list_for_each_entry(mad_send_wr->cur_seg, list, list)
1115 			if (mad_send_wr->cur_seg->num == seg_num)
1116 				break;
1117 	} else if (mad_send_wr->cur_seg->num > seg_num) {
1118 		list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1119 			if (mad_send_wr->cur_seg->num == seg_num)
1120 				break;
1121 	}
1122 	return mad_send_wr->cur_seg->data;
1123 }
1124 EXPORT_SYMBOL(ib_get_rmpp_segment);
1125 
1126 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1127 {
1128 	if (mad_send_wr->send_buf.seg_count)
1129 		return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1130 					   mad_send_wr->seg_num);
1131 	else
1132 		return mad_send_wr->send_buf.mad +
1133 		       mad_send_wr->send_buf.hdr_len;
1134 }
1135 
1136 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1137 {
1138 	struct ib_mad_agent_private *mad_agent_priv;
1139 	struct ib_mad_send_wr_private *mad_send_wr;
1140 
1141 	mad_agent_priv = container_of(send_buf->mad_agent,
1142 				      struct ib_mad_agent_private, agent);
1143 	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1144 				   send_buf);
1145 
1146 	free_send_rmpp_list(mad_send_wr);
1147 	kfree(send_buf->mad);
1148 	deref_mad_agent(mad_agent_priv);
1149 }
1150 EXPORT_SYMBOL(ib_free_send_mad);
1151 
1152 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1153 {
1154 	struct ib_mad_qp_info *qp_info;
1155 	struct list_head *list;
1156 	struct ib_send_wr *bad_send_wr;
1157 	struct ib_mad_agent *mad_agent;
1158 	struct ib_sge *sge;
1159 	unsigned long flags;
1160 	int ret;
1161 
1162 	/* Set WR ID to find mad_send_wr upon completion */
1163 	qp_info = mad_send_wr->mad_agent_priv->qp_info;
1164 	mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1165 	mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1166 
1167 	mad_agent = mad_send_wr->send_buf.mad_agent;
1168 	sge = mad_send_wr->sg_list;
1169 	sge[0].addr = ib_dma_map_single(mad_agent->device,
1170 					mad_send_wr->send_buf.mad,
1171 					sge[0].length,
1172 					DMA_TO_DEVICE);
1173 	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1174 		return -ENOMEM;
1175 
1176 	mad_send_wr->header_mapping = sge[0].addr;
1177 
1178 	sge[1].addr = ib_dma_map_single(mad_agent->device,
1179 					ib_get_payload(mad_send_wr),
1180 					sge[1].length,
1181 					DMA_TO_DEVICE);
1182 	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1183 		ib_dma_unmap_single(mad_agent->device,
1184 				    mad_send_wr->header_mapping,
1185 				    sge[0].length, DMA_TO_DEVICE);
1186 		return -ENOMEM;
1187 	}
1188 	mad_send_wr->payload_mapping = sge[1].addr;
1189 
1190 	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1191 	if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1192 		ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1193 				   &bad_send_wr);
1194 		list = &qp_info->send_queue.list;
1195 	} else {
1196 		ret = 0;
1197 		list = &qp_info->overflow_list;
1198 	}
1199 
1200 	if (!ret) {
1201 		qp_info->send_queue.count++;
1202 		list_add_tail(&mad_send_wr->mad_list.list, list);
1203 	}
1204 	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1205 	if (ret) {
1206 		ib_dma_unmap_single(mad_agent->device,
1207 				    mad_send_wr->header_mapping,
1208 				    sge[0].length, DMA_TO_DEVICE);
1209 		ib_dma_unmap_single(mad_agent->device,
1210 				    mad_send_wr->payload_mapping,
1211 				    sge[1].length, DMA_TO_DEVICE);
1212 	}
1213 	return ret;
1214 }
1215 
1216 /*
1217  * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1218  *  with the registered client
1219  */
1220 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1221 		     struct ib_mad_send_buf **bad_send_buf)
1222 {
1223 	struct ib_mad_agent_private *mad_agent_priv;
1224 	struct ib_mad_send_buf *next_send_buf;
1225 	struct ib_mad_send_wr_private *mad_send_wr;
1226 	unsigned long flags;
1227 	int ret = -EINVAL;
1228 
1229 	/* Walk list of send WRs and post each on send list */
1230 	for (; send_buf; send_buf = next_send_buf) {
1231 
1232 		mad_send_wr = container_of(send_buf,
1233 					   struct ib_mad_send_wr_private,
1234 					   send_buf);
1235 		mad_agent_priv = mad_send_wr->mad_agent_priv;
1236 
1237 		if (!send_buf->mad_agent->send_handler ||
1238 		    (send_buf->timeout_ms &&
1239 		     !send_buf->mad_agent->recv_handler)) {
1240 			ret = -EINVAL;
1241 			goto error;
1242 		}
1243 
1244 		if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1245 			if (mad_agent_priv->agent.rmpp_version) {
1246 				ret = -EINVAL;
1247 				goto error;
1248 			}
1249 		}
1250 
1251 		/*
1252 		 * Save pointer to next work request to post in case the
1253 		 * current one completes, and the user modifies the work
1254 		 * request associated with the completion
1255 		 */
1256 		next_send_buf = send_buf->next;
1257 		mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1258 
1259 		if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1260 		    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1261 			ret = handle_outgoing_dr_smp(mad_agent_priv,
1262 						     mad_send_wr);
1263 			if (ret < 0)		/* error */
1264 				goto error;
1265 			else if (ret == 1)	/* locally consumed */
1266 				continue;
1267 		}
1268 
1269 		mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1270 		/* Timeout will be updated after send completes */
1271 		mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1272 		mad_send_wr->max_retries = send_buf->retries;
1273 		mad_send_wr->retries_left = send_buf->retries;
1274 		send_buf->retries = 0;
1275 		/* Reference for work request to QP + response */
1276 		mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1277 		mad_send_wr->status = IB_WC_SUCCESS;
1278 
1279 		/* Reference MAD agent until send completes */
1280 		atomic_inc(&mad_agent_priv->refcount);
1281 		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1282 		list_add_tail(&mad_send_wr->agent_list,
1283 			      &mad_agent_priv->send_list);
1284 		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1285 
1286 		if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1287 			ret = ib_send_rmpp_mad(mad_send_wr);
1288 			if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1289 				ret = ib_send_mad(mad_send_wr);
1290 		} else
1291 			ret = ib_send_mad(mad_send_wr);
1292 		if (ret < 0) {
1293 			/* Fail send request */
1294 			spin_lock_irqsave(&mad_agent_priv->lock, flags);
1295 			list_del(&mad_send_wr->agent_list);
1296 			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1297 			atomic_dec(&mad_agent_priv->refcount);
1298 			goto error;
1299 		}
1300 	}
1301 	return 0;
1302 error:
1303 	if (bad_send_buf)
1304 		*bad_send_buf = send_buf;
1305 	return ret;
1306 }
1307 EXPORT_SYMBOL(ib_post_send_mad);
1308 
1309 /*
1310  * ib_free_recv_mad - Returns data buffers used to receive
1311  *  a MAD to the access layer
1312  */
1313 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1314 {
1315 	struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1316 	struct ib_mad_private_header *mad_priv_hdr;
1317 	struct ib_mad_private *priv;
1318 	struct list_head free_list;
1319 
1320 	INIT_LIST_HEAD(&free_list);
1321 	list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1322 
1323 	list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1324 					&free_list, list) {
1325 		mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1326 					   recv_buf);
1327 		mad_priv_hdr = container_of(mad_recv_wc,
1328 					    struct ib_mad_private_header,
1329 					    recv_wc);
1330 		priv = container_of(mad_priv_hdr, struct ib_mad_private,
1331 				    header);
1332 		kfree(priv);
1333 	}
1334 }
1335 EXPORT_SYMBOL(ib_free_recv_mad);
1336 
1337 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1338 					u8 rmpp_version,
1339 					ib_mad_send_handler send_handler,
1340 					ib_mad_recv_handler recv_handler,
1341 					void *context)
1342 {
1343 	return ERR_PTR(-EINVAL);	/* XXX: for now */
1344 }
1345 EXPORT_SYMBOL(ib_redirect_mad_qp);
1346 
1347 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1348 		      struct ib_wc *wc)
1349 {
1350 	dev_err(&mad_agent->device->dev,
1351 		"ib_process_mad_wc() not implemented yet\n");
1352 	return 0;
1353 }
1354 EXPORT_SYMBOL(ib_process_mad_wc);
1355 
1356 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1357 			 struct ib_mad_reg_req *mad_reg_req)
1358 {
1359 	int i;
1360 
1361 	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1362 		if ((*method)->agent[i]) {
1363 			pr_err("Method %d already in use\n", i);
1364 			return -EINVAL;
1365 		}
1366 	}
1367 	return 0;
1368 }
1369 
1370 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1371 {
1372 	/* Allocate management method table */
1373 	*method = kzalloc(sizeof **method, GFP_ATOMIC);
1374 	if (!*method) {
1375 		pr_err("No memory for ib_mad_mgmt_method_table\n");
1376 		return -ENOMEM;
1377 	}
1378 
1379 	return 0;
1380 }
1381 
1382 /*
1383  * Check to see if there are any methods still in use
1384  */
1385 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1386 {
1387 	int i;
1388 
1389 	for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1390 		if (method->agent[i])
1391 			return 1;
1392 	return 0;
1393 }
1394 
1395 /*
1396  * Check to see if there are any method tables for this class still in use
1397  */
1398 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1399 {
1400 	int i;
1401 
1402 	for (i = 0; i < MAX_MGMT_CLASS; i++)
1403 		if (class->method_table[i])
1404 			return 1;
1405 	return 0;
1406 }
1407 
1408 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1409 {
1410 	int i;
1411 
1412 	for (i = 0; i < MAX_MGMT_OUI; i++)
1413 		if (vendor_class->method_table[i])
1414 			return 1;
1415 	return 0;
1416 }
1417 
1418 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1419 			   const char *oui)
1420 {
1421 	int i;
1422 
1423 	for (i = 0; i < MAX_MGMT_OUI; i++)
1424 		/* Is there matching OUI for this vendor class ? */
1425 		if (!memcmp(vendor_class->oui[i], oui, 3))
1426 			return i;
1427 
1428 	return -1;
1429 }
1430 
1431 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1432 {
1433 	int i;
1434 
1435 	for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1436 		if (vendor->vendor_class[i])
1437 			return 1;
1438 
1439 	return 0;
1440 }
1441 
1442 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1443 				     struct ib_mad_agent_private *agent)
1444 {
1445 	int i;
1446 
1447 	/* Remove any methods for this mad agent */
1448 	for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1449 		if (method->agent[i] == agent) {
1450 			method->agent[i] = NULL;
1451 		}
1452 	}
1453 }
1454 
1455 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1456 			      struct ib_mad_agent_private *agent_priv,
1457 			      u8 mgmt_class)
1458 {
1459 	struct ib_mad_port_private *port_priv;
1460 	struct ib_mad_mgmt_class_table **class;
1461 	struct ib_mad_mgmt_method_table **method;
1462 	int i, ret;
1463 
1464 	port_priv = agent_priv->qp_info->port_priv;
1465 	class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1466 	if (!*class) {
1467 		/* Allocate management class table for "new" class version */
1468 		*class = kzalloc(sizeof **class, GFP_ATOMIC);
1469 		if (!*class) {
1470 			dev_err(&agent_priv->agent.device->dev,
1471 				"No memory for ib_mad_mgmt_class_table\n");
1472 			ret = -ENOMEM;
1473 			goto error1;
1474 		}
1475 
1476 		/* Allocate method table for this management class */
1477 		method = &(*class)->method_table[mgmt_class];
1478 		if ((ret = allocate_method_table(method)))
1479 			goto error2;
1480 	} else {
1481 		method = &(*class)->method_table[mgmt_class];
1482 		if (!*method) {
1483 			/* Allocate method table for this management class */
1484 			if ((ret = allocate_method_table(method)))
1485 				goto error1;
1486 		}
1487 	}
1488 
1489 	/* Now, make sure methods are not already in use */
1490 	if (method_in_use(method, mad_reg_req))
1491 		goto error3;
1492 
1493 	/* Finally, add in methods being registered */
1494 	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1495 		(*method)->agent[i] = agent_priv;
1496 
1497 	return 0;
1498 
1499 error3:
1500 	/* Remove any methods for this mad agent */
1501 	remove_methods_mad_agent(*method, agent_priv);
1502 	/* Now, check to see if there are any methods in use */
1503 	if (!check_method_table(*method)) {
1504 		/* If not, release management method table */
1505 		kfree(*method);
1506 		*method = NULL;
1507 	}
1508 	ret = -EINVAL;
1509 	goto error1;
1510 error2:
1511 	kfree(*class);
1512 	*class = NULL;
1513 error1:
1514 	return ret;
1515 }
1516 
1517 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1518 			   struct ib_mad_agent_private *agent_priv)
1519 {
1520 	struct ib_mad_port_private *port_priv;
1521 	struct ib_mad_mgmt_vendor_class_table **vendor_table;
1522 	struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1523 	struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1524 	struct ib_mad_mgmt_method_table **method;
1525 	int i, ret = -ENOMEM;
1526 	u8 vclass;
1527 
1528 	/* "New" vendor (with OUI) class */
1529 	vclass = vendor_class_index(mad_reg_req->mgmt_class);
1530 	port_priv = agent_priv->qp_info->port_priv;
1531 	vendor_table = &port_priv->version[
1532 				mad_reg_req->mgmt_class_version].vendor;
1533 	if (!*vendor_table) {
1534 		/* Allocate mgmt vendor class table for "new" class version */
1535 		vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1536 		if (!vendor) {
1537 			dev_err(&agent_priv->agent.device->dev,
1538 				"No memory for ib_mad_mgmt_vendor_class_table\n");
1539 			goto error1;
1540 		}
1541 
1542 		*vendor_table = vendor;
1543 	}
1544 	if (!(*vendor_table)->vendor_class[vclass]) {
1545 		/* Allocate table for this management vendor class */
1546 		vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1547 		if (!vendor_class) {
1548 			dev_err(&agent_priv->agent.device->dev,
1549 				"No memory for ib_mad_mgmt_vendor_class\n");
1550 			goto error2;
1551 		}
1552 
1553 		(*vendor_table)->vendor_class[vclass] = vendor_class;
1554 	}
1555 	for (i = 0; i < MAX_MGMT_OUI; i++) {
1556 		/* Is there matching OUI for this vendor class ? */
1557 		if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1558 			    mad_reg_req->oui, 3)) {
1559 			method = &(*vendor_table)->vendor_class[
1560 						vclass]->method_table[i];
1561 			BUG_ON(!*method);
1562 			goto check_in_use;
1563 		}
1564 	}
1565 	for (i = 0; i < MAX_MGMT_OUI; i++) {
1566 		/* OUI slot available ? */
1567 		if (!is_vendor_oui((*vendor_table)->vendor_class[
1568 				vclass]->oui[i])) {
1569 			method = &(*vendor_table)->vendor_class[
1570 				vclass]->method_table[i];
1571 			BUG_ON(*method);
1572 			/* Allocate method table for this OUI */
1573 			if ((ret = allocate_method_table(method)))
1574 				goto error3;
1575 			memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1576 			       mad_reg_req->oui, 3);
1577 			goto check_in_use;
1578 		}
1579 	}
1580 	dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1581 	goto error3;
1582 
1583 check_in_use:
1584 	/* Now, make sure methods are not already in use */
1585 	if (method_in_use(method, mad_reg_req))
1586 		goto error4;
1587 
1588 	/* Finally, add in methods being registered */
1589 	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1590 		(*method)->agent[i] = agent_priv;
1591 
1592 	return 0;
1593 
1594 error4:
1595 	/* Remove any methods for this mad agent */
1596 	remove_methods_mad_agent(*method, agent_priv);
1597 	/* Now, check to see if there are any methods in use */
1598 	if (!check_method_table(*method)) {
1599 		/* If not, release management method table */
1600 		kfree(*method);
1601 		*method = NULL;
1602 	}
1603 	ret = -EINVAL;
1604 error3:
1605 	if (vendor_class) {
1606 		(*vendor_table)->vendor_class[vclass] = NULL;
1607 		kfree(vendor_class);
1608 	}
1609 error2:
1610 	if (vendor) {
1611 		*vendor_table = NULL;
1612 		kfree(vendor);
1613 	}
1614 error1:
1615 	return ret;
1616 }
1617 
1618 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1619 {
1620 	struct ib_mad_port_private *port_priv;
1621 	struct ib_mad_mgmt_class_table *class;
1622 	struct ib_mad_mgmt_method_table *method;
1623 	struct ib_mad_mgmt_vendor_class_table *vendor;
1624 	struct ib_mad_mgmt_vendor_class *vendor_class;
1625 	int index;
1626 	u8 mgmt_class;
1627 
1628 	/*
1629 	 * Was MAD registration request supplied
1630 	 * with original registration ?
1631 	 */
1632 	if (!agent_priv->reg_req) {
1633 		goto out;
1634 	}
1635 
1636 	port_priv = agent_priv->qp_info->port_priv;
1637 	mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1638 	class = port_priv->version[
1639 			agent_priv->reg_req->mgmt_class_version].class;
1640 	if (!class)
1641 		goto vendor_check;
1642 
1643 	method = class->method_table[mgmt_class];
1644 	if (method) {
1645 		/* Remove any methods for this mad agent */
1646 		remove_methods_mad_agent(method, agent_priv);
1647 		/* Now, check to see if there are any methods still in use */
1648 		if (!check_method_table(method)) {
1649 			/* If not, release management method table */
1650 			 kfree(method);
1651 			 class->method_table[mgmt_class] = NULL;
1652 			 /* Any management classes left ? */
1653 			if (!check_class_table(class)) {
1654 				/* If not, release management class table */
1655 				kfree(class);
1656 				port_priv->version[
1657 					agent_priv->reg_req->
1658 					mgmt_class_version].class = NULL;
1659 			}
1660 		}
1661 	}
1662 
1663 vendor_check:
1664 	if (!is_vendor_class(mgmt_class))
1665 		goto out;
1666 
1667 	/* normalize mgmt_class to vendor range 2 */
1668 	mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1669 	vendor = port_priv->version[
1670 			agent_priv->reg_req->mgmt_class_version].vendor;
1671 
1672 	if (!vendor)
1673 		goto out;
1674 
1675 	vendor_class = vendor->vendor_class[mgmt_class];
1676 	if (vendor_class) {
1677 		index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1678 		if (index < 0)
1679 			goto out;
1680 		method = vendor_class->method_table[index];
1681 		if (method) {
1682 			/* Remove any methods for this mad agent */
1683 			remove_methods_mad_agent(method, agent_priv);
1684 			/*
1685 			 * Now, check to see if there are
1686 			 * any methods still in use
1687 			 */
1688 			if (!check_method_table(method)) {
1689 				/* If not, release management method table */
1690 				kfree(method);
1691 				vendor_class->method_table[index] = NULL;
1692 				memset(vendor_class->oui[index], 0, 3);
1693 				/* Any OUIs left ? */
1694 				if (!check_vendor_class(vendor_class)) {
1695 					/* If not, release vendor class table */
1696 					kfree(vendor_class);
1697 					vendor->vendor_class[mgmt_class] = NULL;
1698 					/* Any other vendor classes left ? */
1699 					if (!check_vendor_table(vendor)) {
1700 						kfree(vendor);
1701 						port_priv->version[
1702 							agent_priv->reg_req->
1703 							mgmt_class_version].
1704 							vendor = NULL;
1705 					}
1706 				}
1707 			}
1708 		}
1709 	}
1710 
1711 out:
1712 	return;
1713 }
1714 
1715 static struct ib_mad_agent_private *
1716 find_mad_agent(struct ib_mad_port_private *port_priv,
1717 	       const struct ib_mad_hdr *mad_hdr)
1718 {
1719 	struct ib_mad_agent_private *mad_agent = NULL;
1720 	unsigned long flags;
1721 
1722 	spin_lock_irqsave(&port_priv->reg_lock, flags);
1723 	if (ib_response_mad(mad_hdr)) {
1724 		u32 hi_tid;
1725 		struct ib_mad_agent_private *entry;
1726 
1727 		/*
1728 		 * Routing is based on high 32 bits of transaction ID
1729 		 * of MAD.
1730 		 */
1731 		hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1732 		list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1733 			if (entry->agent.hi_tid == hi_tid) {
1734 				mad_agent = entry;
1735 				break;
1736 			}
1737 		}
1738 	} else {
1739 		struct ib_mad_mgmt_class_table *class;
1740 		struct ib_mad_mgmt_method_table *method;
1741 		struct ib_mad_mgmt_vendor_class_table *vendor;
1742 		struct ib_mad_mgmt_vendor_class *vendor_class;
1743 		const struct ib_vendor_mad *vendor_mad;
1744 		int index;
1745 
1746 		/*
1747 		 * Routing is based on version, class, and method
1748 		 * For "newer" vendor MADs, also based on OUI
1749 		 */
1750 		if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1751 			goto out;
1752 		if (!is_vendor_class(mad_hdr->mgmt_class)) {
1753 			class = port_priv->version[
1754 					mad_hdr->class_version].class;
1755 			if (!class)
1756 				goto out;
1757 			if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1758 			    IB_MGMT_MAX_METHODS)
1759 				goto out;
1760 			method = class->method_table[convert_mgmt_class(
1761 							mad_hdr->mgmt_class)];
1762 			if (method)
1763 				mad_agent = method->agent[mad_hdr->method &
1764 							  ~IB_MGMT_METHOD_RESP];
1765 		} else {
1766 			vendor = port_priv->version[
1767 					mad_hdr->class_version].vendor;
1768 			if (!vendor)
1769 				goto out;
1770 			vendor_class = vendor->vendor_class[vendor_class_index(
1771 						mad_hdr->mgmt_class)];
1772 			if (!vendor_class)
1773 				goto out;
1774 			/* Find matching OUI */
1775 			vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1776 			index = find_vendor_oui(vendor_class, vendor_mad->oui);
1777 			if (index == -1)
1778 				goto out;
1779 			method = vendor_class->method_table[index];
1780 			if (method) {
1781 				mad_agent = method->agent[mad_hdr->method &
1782 							  ~IB_MGMT_METHOD_RESP];
1783 			}
1784 		}
1785 	}
1786 
1787 	if (mad_agent) {
1788 		if (mad_agent->agent.recv_handler)
1789 			atomic_inc(&mad_agent->refcount);
1790 		else {
1791 			dev_notice(&port_priv->device->dev,
1792 				   "No receive handler for client %p on port %d\n",
1793 				   &mad_agent->agent, port_priv->port_num);
1794 			mad_agent = NULL;
1795 		}
1796 	}
1797 out:
1798 	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1799 
1800 	return mad_agent;
1801 }
1802 
1803 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1804 			const struct ib_mad_qp_info *qp_info,
1805 			bool opa)
1806 {
1807 	int valid = 0;
1808 	u32 qp_num = qp_info->qp->qp_num;
1809 
1810 	/* Make sure MAD base version is understood */
1811 	if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1812 	    (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1813 		pr_err("MAD received with unsupported base version %d %s\n",
1814 		       mad_hdr->base_version, opa ? "(opa)" : "");
1815 		goto out;
1816 	}
1817 
1818 	/* Filter SMI packets sent to other than QP0 */
1819 	if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1820 	    (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1821 		if (qp_num == 0)
1822 			valid = 1;
1823 	} else {
1824 		/* Filter GSI packets sent to QP0 */
1825 		if (qp_num != 0)
1826 			valid = 1;
1827 	}
1828 
1829 out:
1830 	return valid;
1831 }
1832 
1833 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1834 			    const struct ib_mad_hdr *mad_hdr)
1835 {
1836 	struct ib_rmpp_mad *rmpp_mad;
1837 
1838 	rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1839 	return !mad_agent_priv->agent.rmpp_version ||
1840 		!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1841 		!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1842 				    IB_MGMT_RMPP_FLAG_ACTIVE) ||
1843 		(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1844 }
1845 
1846 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1847 				     const struct ib_mad_recv_wc *rwc)
1848 {
1849 	return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1850 		rwc->recv_buf.mad->mad_hdr.mgmt_class;
1851 }
1852 
1853 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1854 				   const struct ib_mad_send_wr_private *wr,
1855 				   const struct ib_mad_recv_wc *rwc )
1856 {
1857 	struct ib_ah_attr attr;
1858 	u8 send_resp, rcv_resp;
1859 	union ib_gid sgid;
1860 	struct ib_device *device = mad_agent_priv->agent.device;
1861 	u8 port_num = mad_agent_priv->agent.port_num;
1862 	u8 lmc;
1863 
1864 	send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1865 	rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1866 
1867 	if (send_resp == rcv_resp)
1868 		/* both requests, or both responses. GIDs different */
1869 		return 0;
1870 
1871 	if (ib_query_ah(wr->send_buf.ah, &attr))
1872 		/* Assume not equal, to avoid false positives. */
1873 		return 0;
1874 
1875 	if (!!(attr.ah_flags & IB_AH_GRH) !=
1876 	    !!(rwc->wc->wc_flags & IB_WC_GRH))
1877 		/* one has GID, other does not.  Assume different */
1878 		return 0;
1879 
1880 	if (!send_resp && rcv_resp) {
1881 		/* is request/response. */
1882 		if (!(attr.ah_flags & IB_AH_GRH)) {
1883 			if (ib_get_cached_lmc(device, port_num, &lmc))
1884 				return 0;
1885 			return (!lmc || !((attr.src_path_bits ^
1886 					   rwc->wc->dlid_path_bits) &
1887 					  ((1 << lmc) - 1)));
1888 		} else {
1889 			if (ib_get_cached_gid(device, port_num,
1890 					      attr.grh.sgid_index, &sgid))
1891 				return 0;
1892 			return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1893 				       16);
1894 		}
1895 	}
1896 
1897 	if (!(attr.ah_flags & IB_AH_GRH))
1898 		return attr.dlid == rwc->wc->slid;
1899 	else
1900 		return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1901 			       16);
1902 }
1903 
1904 static inline int is_direct(u8 class)
1905 {
1906 	return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1907 }
1908 
1909 struct ib_mad_send_wr_private*
1910 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1911 		 const struct ib_mad_recv_wc *wc)
1912 {
1913 	struct ib_mad_send_wr_private *wr;
1914 	const struct ib_mad_hdr *mad_hdr;
1915 
1916 	mad_hdr = &wc->recv_buf.mad->mad_hdr;
1917 
1918 	list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1919 		if ((wr->tid == mad_hdr->tid) &&
1920 		    rcv_has_same_class(wr, wc) &&
1921 		    /*
1922 		     * Don't check GID for direct routed MADs.
1923 		     * These might have permissive LIDs.
1924 		     */
1925 		    (is_direct(mad_hdr->mgmt_class) ||
1926 		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1927 			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1928 	}
1929 
1930 	/*
1931 	 * It's possible to receive the response before we've
1932 	 * been notified that the send has completed
1933 	 */
1934 	list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1935 		if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1936 		    wr->tid == mad_hdr->tid &&
1937 		    wr->timeout &&
1938 		    rcv_has_same_class(wr, wc) &&
1939 		    /*
1940 		     * Don't check GID for direct routed MADs.
1941 		     * These might have permissive LIDs.
1942 		     */
1943 		    (is_direct(mad_hdr->mgmt_class) ||
1944 		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1945 			/* Verify request has not been canceled */
1946 			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1947 	}
1948 	return NULL;
1949 }
1950 
1951 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1952 {
1953 	mad_send_wr->timeout = 0;
1954 	if (mad_send_wr->refcount == 1)
1955 		list_move_tail(&mad_send_wr->agent_list,
1956 			      &mad_send_wr->mad_agent_priv->done_list);
1957 }
1958 
1959 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1960 				 struct ib_mad_recv_wc *mad_recv_wc)
1961 {
1962 	struct ib_mad_send_wr_private *mad_send_wr;
1963 	struct ib_mad_send_wc mad_send_wc;
1964 	unsigned long flags;
1965 
1966 	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1967 	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1968 	if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1969 		mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1970 						      mad_recv_wc);
1971 		if (!mad_recv_wc) {
1972 			deref_mad_agent(mad_agent_priv);
1973 			return;
1974 		}
1975 	}
1976 
1977 	/* Complete corresponding request */
1978 	if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1979 		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1980 		mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1981 		if (!mad_send_wr) {
1982 			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1983 			if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1984 			   && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1985 			   && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1986 					& IB_MGMT_RMPP_FLAG_ACTIVE)) {
1987 				/* user rmpp is in effect
1988 				 * and this is an active RMPP MAD
1989 				 */
1990 				mad_recv_wc->wc->wr_id = 0;
1991 				mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1992 								   mad_recv_wc);
1993 				atomic_dec(&mad_agent_priv->refcount);
1994 			} else {
1995 				/* not user rmpp, revert to normal behavior and
1996 				 * drop the mad */
1997 				ib_free_recv_mad(mad_recv_wc);
1998 				deref_mad_agent(mad_agent_priv);
1999 				return;
2000 			}
2001 		} else {
2002 			ib_mark_mad_done(mad_send_wr);
2003 			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2004 
2005 			/* Defined behavior is to complete response before request */
2006 			mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
2007 			mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
2008 							   mad_recv_wc);
2009 			atomic_dec(&mad_agent_priv->refcount);
2010 
2011 			mad_send_wc.status = IB_WC_SUCCESS;
2012 			mad_send_wc.vendor_err = 0;
2013 			mad_send_wc.send_buf = &mad_send_wr->send_buf;
2014 			ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2015 		}
2016 	} else {
2017 		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
2018 						   mad_recv_wc);
2019 		deref_mad_agent(mad_agent_priv);
2020 	}
2021 }
2022 
2023 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2024 				     const struct ib_mad_qp_info *qp_info,
2025 				     const struct ib_wc *wc,
2026 				     int port_num,
2027 				     struct ib_mad_private *recv,
2028 				     struct ib_mad_private *response)
2029 {
2030 	enum smi_forward_action retsmi;
2031 	struct ib_smp *smp = (struct ib_smp *)recv->mad;
2032 
2033 	if (smi_handle_dr_smp_recv(smp,
2034 				   rdma_cap_ib_switch(port_priv->device),
2035 				   port_num,
2036 				   port_priv->device->phys_port_cnt) ==
2037 				   IB_SMI_DISCARD)
2038 		return IB_SMI_DISCARD;
2039 
2040 	retsmi = smi_check_forward_dr_smp(smp);
2041 	if (retsmi == IB_SMI_LOCAL)
2042 		return IB_SMI_HANDLE;
2043 
2044 	if (retsmi == IB_SMI_SEND) { /* don't forward */
2045 		if (smi_handle_dr_smp_send(smp,
2046 					   rdma_cap_ib_switch(port_priv->device),
2047 					   port_num) == IB_SMI_DISCARD)
2048 			return IB_SMI_DISCARD;
2049 
2050 		if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2051 			return IB_SMI_DISCARD;
2052 	} else if (rdma_cap_ib_switch(port_priv->device)) {
2053 		/* forward case for switches */
2054 		memcpy(response, recv, mad_priv_size(response));
2055 		response->header.recv_wc.wc = &response->header.wc;
2056 		response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2057 		response->header.recv_wc.recv_buf.grh = &response->grh;
2058 
2059 		agent_send_response((const struct ib_mad_hdr *)response->mad,
2060 				    &response->grh, wc,
2061 				    port_priv->device,
2062 				    smi_get_fwd_port(smp),
2063 				    qp_info->qp->qp_num,
2064 				    response->mad_size,
2065 				    false);
2066 
2067 		return IB_SMI_DISCARD;
2068 	}
2069 	return IB_SMI_HANDLE;
2070 }
2071 
2072 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2073 				    struct ib_mad_private *response,
2074 				    size_t *resp_len, bool opa)
2075 {
2076 	const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2077 	struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2078 
2079 	if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2080 	    recv_hdr->method == IB_MGMT_METHOD_SET) {
2081 		memcpy(response, recv, mad_priv_size(response));
2082 		response->header.recv_wc.wc = &response->header.wc;
2083 		response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2084 		response->header.recv_wc.recv_buf.grh = &response->grh;
2085 		resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2086 		resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2087 		if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2088 			resp_hdr->status |= IB_SMP_DIRECTION;
2089 
2090 		if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2091 			if (recv_hdr->mgmt_class ==
2092 			    IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2093 			    recv_hdr->mgmt_class ==
2094 			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2095 				*resp_len = opa_get_smp_header_size(
2096 							(struct opa_smp *)recv->mad);
2097 			else
2098 				*resp_len = sizeof(struct ib_mad_hdr);
2099 		}
2100 
2101 		return true;
2102 	} else {
2103 		return false;
2104 	}
2105 }
2106 
2107 static enum smi_action
2108 handle_opa_smi(struct ib_mad_port_private *port_priv,
2109 	       struct ib_mad_qp_info *qp_info,
2110 	       struct ib_wc *wc,
2111 	       int port_num,
2112 	       struct ib_mad_private *recv,
2113 	       struct ib_mad_private *response)
2114 {
2115 	enum smi_forward_action retsmi;
2116 	struct opa_smp *smp = (struct opa_smp *)recv->mad;
2117 
2118 	if (opa_smi_handle_dr_smp_recv(smp,
2119 				   rdma_cap_ib_switch(port_priv->device),
2120 				   port_num,
2121 				   port_priv->device->phys_port_cnt) ==
2122 				   IB_SMI_DISCARD)
2123 		return IB_SMI_DISCARD;
2124 
2125 	retsmi = opa_smi_check_forward_dr_smp(smp);
2126 	if (retsmi == IB_SMI_LOCAL)
2127 		return IB_SMI_HANDLE;
2128 
2129 	if (retsmi == IB_SMI_SEND) { /* don't forward */
2130 		if (opa_smi_handle_dr_smp_send(smp,
2131 					   rdma_cap_ib_switch(port_priv->device),
2132 					   port_num) == IB_SMI_DISCARD)
2133 			return IB_SMI_DISCARD;
2134 
2135 		if (opa_smi_check_local_smp(smp, port_priv->device) ==
2136 		    IB_SMI_DISCARD)
2137 			return IB_SMI_DISCARD;
2138 
2139 	} else if (rdma_cap_ib_switch(port_priv->device)) {
2140 		/* forward case for switches */
2141 		memcpy(response, recv, mad_priv_size(response));
2142 		response->header.recv_wc.wc = &response->header.wc;
2143 		response->header.recv_wc.recv_buf.opa_mad =
2144 				(struct opa_mad *)response->mad;
2145 		response->header.recv_wc.recv_buf.grh = &response->grh;
2146 
2147 		agent_send_response((const struct ib_mad_hdr *)response->mad,
2148 				    &response->grh, wc,
2149 				    port_priv->device,
2150 				    opa_smi_get_fwd_port(smp),
2151 				    qp_info->qp->qp_num,
2152 				    recv->header.wc.byte_len,
2153 				    true);
2154 
2155 		return IB_SMI_DISCARD;
2156 	}
2157 
2158 	return IB_SMI_HANDLE;
2159 }
2160 
2161 static enum smi_action
2162 handle_smi(struct ib_mad_port_private *port_priv,
2163 	   struct ib_mad_qp_info *qp_info,
2164 	   struct ib_wc *wc,
2165 	   int port_num,
2166 	   struct ib_mad_private *recv,
2167 	   struct ib_mad_private *response,
2168 	   bool opa)
2169 {
2170 	struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2171 
2172 	if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2173 	    mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
2174 		return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2175 				      response);
2176 
2177 	return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2178 }
2179 
2180 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
2181 				     struct ib_wc *wc)
2182 {
2183 	struct ib_mad_qp_info *qp_info;
2184 	struct ib_mad_private_header *mad_priv_hdr;
2185 	struct ib_mad_private *recv, *response = NULL;
2186 	struct ib_mad_list_head *mad_list;
2187 	struct ib_mad_agent_private *mad_agent;
2188 	int port_num;
2189 	int ret = IB_MAD_RESULT_SUCCESS;
2190 	size_t mad_size;
2191 	u16 resp_mad_pkey_index = 0;
2192 	bool opa;
2193 
2194 	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2195 	qp_info = mad_list->mad_queue->qp_info;
2196 	dequeue_mad(mad_list);
2197 
2198 	opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2199 			       qp_info->port_priv->port_num);
2200 
2201 	mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2202 				    mad_list);
2203 	recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2204 	ib_dma_unmap_single(port_priv->device,
2205 			    recv->header.mapping,
2206 			    mad_priv_dma_size(recv),
2207 			    DMA_FROM_DEVICE);
2208 
2209 	/* Setup MAD receive work completion from "normal" work completion */
2210 	recv->header.wc = *wc;
2211 	recv->header.recv_wc.wc = &recv->header.wc;
2212 
2213 	if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2214 		recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2215 		recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2216 	} else {
2217 		recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2218 		recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2219 	}
2220 
2221 	recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2222 	recv->header.recv_wc.recv_buf.grh = &recv->grh;
2223 
2224 	if (atomic_read(&qp_info->snoop_count))
2225 		snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2226 
2227 	/* Validate MAD */
2228 	if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2229 		goto out;
2230 
2231 	mad_size = recv->mad_size;
2232 	response = alloc_mad_private(mad_size, GFP_KERNEL);
2233 	if (!response) {
2234 		dev_err(&port_priv->device->dev,
2235 			"ib_mad_recv_done_handler no memory for response buffer\n");
2236 		goto out;
2237 	}
2238 
2239 	if (rdma_cap_ib_switch(port_priv->device))
2240 		port_num = wc->port_num;
2241 	else
2242 		port_num = port_priv->port_num;
2243 
2244 	if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2245 	    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2246 		if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2247 			       response, opa)
2248 		    == IB_SMI_DISCARD)
2249 			goto out;
2250 	}
2251 
2252 	/* Give driver "right of first refusal" on incoming MAD */
2253 	if (port_priv->device->process_mad) {
2254 		ret = port_priv->device->process_mad(port_priv->device, 0,
2255 						     port_priv->port_num,
2256 						     wc, &recv->grh,
2257 						     (const struct ib_mad_hdr *)recv->mad,
2258 						     recv->mad_size,
2259 						     (struct ib_mad_hdr *)response->mad,
2260 						     &mad_size, &resp_mad_pkey_index);
2261 
2262 		if (opa)
2263 			wc->pkey_index = resp_mad_pkey_index;
2264 
2265 		if (ret & IB_MAD_RESULT_SUCCESS) {
2266 			if (ret & IB_MAD_RESULT_CONSUMED)
2267 				goto out;
2268 			if (ret & IB_MAD_RESULT_REPLY) {
2269 				agent_send_response((const struct ib_mad_hdr *)response->mad,
2270 						    &recv->grh, wc,
2271 						    port_priv->device,
2272 						    port_num,
2273 						    qp_info->qp->qp_num,
2274 						    mad_size, opa);
2275 				goto out;
2276 			}
2277 		}
2278 	}
2279 
2280 	mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2281 	if (mad_agent) {
2282 		ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2283 		/*
2284 		 * recv is freed up in error cases in ib_mad_complete_recv
2285 		 * or via recv_handler in ib_mad_complete_recv()
2286 		 */
2287 		recv = NULL;
2288 	} else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2289 		   generate_unmatched_resp(recv, response, &mad_size, opa)) {
2290 		agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2291 				    port_priv->device, port_num,
2292 				    qp_info->qp->qp_num, mad_size, opa);
2293 	}
2294 
2295 out:
2296 	/* Post another receive request for this QP */
2297 	if (response) {
2298 		ib_mad_post_receive_mads(qp_info, response);
2299 		kfree(recv);
2300 	} else
2301 		ib_mad_post_receive_mads(qp_info, recv);
2302 }
2303 
2304 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2305 {
2306 	struct ib_mad_send_wr_private *mad_send_wr;
2307 	unsigned long delay;
2308 
2309 	if (list_empty(&mad_agent_priv->wait_list)) {
2310 		cancel_delayed_work(&mad_agent_priv->timed_work);
2311 	} else {
2312 		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2313 					 struct ib_mad_send_wr_private,
2314 					 agent_list);
2315 
2316 		if (time_after(mad_agent_priv->timeout,
2317 			       mad_send_wr->timeout)) {
2318 			mad_agent_priv->timeout = mad_send_wr->timeout;
2319 			delay = mad_send_wr->timeout - jiffies;
2320 			if ((long)delay <= 0)
2321 				delay = 1;
2322 			mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2323 					 &mad_agent_priv->timed_work, delay);
2324 		}
2325 	}
2326 }
2327 
2328 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2329 {
2330 	struct ib_mad_agent_private *mad_agent_priv;
2331 	struct ib_mad_send_wr_private *temp_mad_send_wr;
2332 	struct list_head *list_item;
2333 	unsigned long delay;
2334 
2335 	mad_agent_priv = mad_send_wr->mad_agent_priv;
2336 	list_del(&mad_send_wr->agent_list);
2337 
2338 	delay = mad_send_wr->timeout;
2339 	mad_send_wr->timeout += jiffies;
2340 
2341 	if (delay) {
2342 		list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2343 			temp_mad_send_wr = list_entry(list_item,
2344 						struct ib_mad_send_wr_private,
2345 						agent_list);
2346 			if (time_after(mad_send_wr->timeout,
2347 				       temp_mad_send_wr->timeout))
2348 				break;
2349 		}
2350 	}
2351 	else
2352 		list_item = &mad_agent_priv->wait_list;
2353 	list_add(&mad_send_wr->agent_list, list_item);
2354 
2355 	/* Reschedule a work item if we have a shorter timeout */
2356 	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2357 		mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2358 				 &mad_agent_priv->timed_work, delay);
2359 }
2360 
2361 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2362 			  int timeout_ms)
2363 {
2364 	mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2365 	wait_for_response(mad_send_wr);
2366 }
2367 
2368 /*
2369  * Process a send work completion
2370  */
2371 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2372 			     struct ib_mad_send_wc *mad_send_wc)
2373 {
2374 	struct ib_mad_agent_private	*mad_agent_priv;
2375 	unsigned long			flags;
2376 	int				ret;
2377 
2378 	mad_agent_priv = mad_send_wr->mad_agent_priv;
2379 	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2380 	if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2381 		ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2382 		if (ret == IB_RMPP_RESULT_CONSUMED)
2383 			goto done;
2384 	} else
2385 		ret = IB_RMPP_RESULT_UNHANDLED;
2386 
2387 	if (mad_send_wc->status != IB_WC_SUCCESS &&
2388 	    mad_send_wr->status == IB_WC_SUCCESS) {
2389 		mad_send_wr->status = mad_send_wc->status;
2390 		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2391 	}
2392 
2393 	if (--mad_send_wr->refcount > 0) {
2394 		if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2395 		    mad_send_wr->status == IB_WC_SUCCESS) {
2396 			wait_for_response(mad_send_wr);
2397 		}
2398 		goto done;
2399 	}
2400 
2401 	/* Remove send from MAD agent and notify client of completion */
2402 	list_del(&mad_send_wr->agent_list);
2403 	adjust_timeout(mad_agent_priv);
2404 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2405 
2406 	if (mad_send_wr->status != IB_WC_SUCCESS )
2407 		mad_send_wc->status = mad_send_wr->status;
2408 	if (ret == IB_RMPP_RESULT_INTERNAL)
2409 		ib_rmpp_send_handler(mad_send_wc);
2410 	else
2411 		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2412 						   mad_send_wc);
2413 
2414 	/* Release reference on agent taken when sending */
2415 	deref_mad_agent(mad_agent_priv);
2416 	return;
2417 done:
2418 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2419 }
2420 
2421 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2422 				     struct ib_wc *wc)
2423 {
2424 	struct ib_mad_send_wr_private	*mad_send_wr, *queued_send_wr;
2425 	struct ib_mad_list_head		*mad_list;
2426 	struct ib_mad_qp_info		*qp_info;
2427 	struct ib_mad_queue		*send_queue;
2428 	struct ib_send_wr		*bad_send_wr;
2429 	struct ib_mad_send_wc		mad_send_wc;
2430 	unsigned long flags;
2431 	int ret;
2432 
2433 	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2434 	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2435 				   mad_list);
2436 	send_queue = mad_list->mad_queue;
2437 	qp_info = send_queue->qp_info;
2438 
2439 retry:
2440 	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2441 			    mad_send_wr->header_mapping,
2442 			    mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2443 	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2444 			    mad_send_wr->payload_mapping,
2445 			    mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2446 	queued_send_wr = NULL;
2447 	spin_lock_irqsave(&send_queue->lock, flags);
2448 	list_del(&mad_list->list);
2449 
2450 	/* Move queued send to the send queue */
2451 	if (send_queue->count-- > send_queue->max_active) {
2452 		mad_list = container_of(qp_info->overflow_list.next,
2453 					struct ib_mad_list_head, list);
2454 		queued_send_wr = container_of(mad_list,
2455 					struct ib_mad_send_wr_private,
2456 					mad_list);
2457 		list_move_tail(&mad_list->list, &send_queue->list);
2458 	}
2459 	spin_unlock_irqrestore(&send_queue->lock, flags);
2460 
2461 	mad_send_wc.send_buf = &mad_send_wr->send_buf;
2462 	mad_send_wc.status = wc->status;
2463 	mad_send_wc.vendor_err = wc->vendor_err;
2464 	if (atomic_read(&qp_info->snoop_count))
2465 		snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2466 			   IB_MAD_SNOOP_SEND_COMPLETIONS);
2467 	ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2468 
2469 	if (queued_send_wr) {
2470 		ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2471 				   &bad_send_wr);
2472 		if (ret) {
2473 			dev_err(&port_priv->device->dev,
2474 				"ib_post_send failed: %d\n", ret);
2475 			mad_send_wr = queued_send_wr;
2476 			wc->status = IB_WC_LOC_QP_OP_ERR;
2477 			goto retry;
2478 		}
2479 	}
2480 }
2481 
2482 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2483 {
2484 	struct ib_mad_send_wr_private *mad_send_wr;
2485 	struct ib_mad_list_head *mad_list;
2486 	unsigned long flags;
2487 
2488 	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2489 	list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2490 		mad_send_wr = container_of(mad_list,
2491 					   struct ib_mad_send_wr_private,
2492 					   mad_list);
2493 		mad_send_wr->retry = 1;
2494 	}
2495 	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2496 }
2497 
2498 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2499 			      struct ib_wc *wc)
2500 {
2501 	struct ib_mad_list_head *mad_list;
2502 	struct ib_mad_qp_info *qp_info;
2503 	struct ib_mad_send_wr_private *mad_send_wr;
2504 	int ret;
2505 
2506 	/* Determine if failure was a send or receive */
2507 	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2508 	qp_info = mad_list->mad_queue->qp_info;
2509 	if (mad_list->mad_queue == &qp_info->recv_queue)
2510 		/*
2511 		 * Receive errors indicate that the QP has entered the error
2512 		 * state - error handling/shutdown code will cleanup
2513 		 */
2514 		return;
2515 
2516 	/*
2517 	 * Send errors will transition the QP to SQE - move
2518 	 * QP to RTS and repost flushed work requests
2519 	 */
2520 	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2521 				   mad_list);
2522 	if (wc->status == IB_WC_WR_FLUSH_ERR) {
2523 		if (mad_send_wr->retry) {
2524 			/* Repost send */
2525 			struct ib_send_wr *bad_send_wr;
2526 
2527 			mad_send_wr->retry = 0;
2528 			ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2529 					&bad_send_wr);
2530 			if (ret)
2531 				ib_mad_send_done_handler(port_priv, wc);
2532 		} else
2533 			ib_mad_send_done_handler(port_priv, wc);
2534 	} else {
2535 		struct ib_qp_attr *attr;
2536 
2537 		/* Transition QP to RTS and fail offending send */
2538 		attr = kmalloc(sizeof *attr, GFP_KERNEL);
2539 		if (attr) {
2540 			attr->qp_state = IB_QPS_RTS;
2541 			attr->cur_qp_state = IB_QPS_SQE;
2542 			ret = ib_modify_qp(qp_info->qp, attr,
2543 					   IB_QP_STATE | IB_QP_CUR_STATE);
2544 			kfree(attr);
2545 			if (ret)
2546 				dev_err(&port_priv->device->dev,
2547 					"mad_error_handler - ib_modify_qp to RTS : %d\n",
2548 					ret);
2549 			else
2550 				mark_sends_for_retry(qp_info);
2551 		}
2552 		ib_mad_send_done_handler(port_priv, wc);
2553 	}
2554 }
2555 
2556 /*
2557  * IB MAD completion callback
2558  */
2559 static void ib_mad_completion_handler(struct work_struct *work)
2560 {
2561 	struct ib_mad_port_private *port_priv;
2562 	struct ib_wc wc;
2563 
2564 	port_priv = container_of(work, struct ib_mad_port_private, work);
2565 	ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2566 
2567 	while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2568 		if (wc.status == IB_WC_SUCCESS) {
2569 			switch (wc.opcode) {
2570 			case IB_WC_SEND:
2571 				ib_mad_send_done_handler(port_priv, &wc);
2572 				break;
2573 			case IB_WC_RECV:
2574 				ib_mad_recv_done_handler(port_priv, &wc);
2575 				break;
2576 			default:
2577 				BUG_ON(1);
2578 				break;
2579 			}
2580 		} else
2581 			mad_error_handler(port_priv, &wc);
2582 	}
2583 }
2584 
2585 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2586 {
2587 	unsigned long flags;
2588 	struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2589 	struct ib_mad_send_wc mad_send_wc;
2590 	struct list_head cancel_list;
2591 
2592 	INIT_LIST_HEAD(&cancel_list);
2593 
2594 	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2595 	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2596 				 &mad_agent_priv->send_list, agent_list) {
2597 		if (mad_send_wr->status == IB_WC_SUCCESS) {
2598 			mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2599 			mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2600 		}
2601 	}
2602 
2603 	/* Empty wait list to prevent receives from finding a request */
2604 	list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2605 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2606 
2607 	/* Report all cancelled requests */
2608 	mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2609 	mad_send_wc.vendor_err = 0;
2610 
2611 	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2612 				 &cancel_list, agent_list) {
2613 		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2614 		list_del(&mad_send_wr->agent_list);
2615 		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2616 						   &mad_send_wc);
2617 		atomic_dec(&mad_agent_priv->refcount);
2618 	}
2619 }
2620 
2621 static struct ib_mad_send_wr_private*
2622 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2623 	     struct ib_mad_send_buf *send_buf)
2624 {
2625 	struct ib_mad_send_wr_private *mad_send_wr;
2626 
2627 	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2628 			    agent_list) {
2629 		if (&mad_send_wr->send_buf == send_buf)
2630 			return mad_send_wr;
2631 	}
2632 
2633 	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2634 			    agent_list) {
2635 		if (is_rmpp_data_mad(mad_agent_priv,
2636 				     mad_send_wr->send_buf.mad) &&
2637 		    &mad_send_wr->send_buf == send_buf)
2638 			return mad_send_wr;
2639 	}
2640 	return NULL;
2641 }
2642 
2643 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2644 		  struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2645 {
2646 	struct ib_mad_agent_private *mad_agent_priv;
2647 	struct ib_mad_send_wr_private *mad_send_wr;
2648 	unsigned long flags;
2649 	int active;
2650 
2651 	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2652 				      agent);
2653 	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2654 	mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2655 	if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2656 		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2657 		return -EINVAL;
2658 	}
2659 
2660 	active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2661 	if (!timeout_ms) {
2662 		mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2663 		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2664 	}
2665 
2666 	mad_send_wr->send_buf.timeout_ms = timeout_ms;
2667 	if (active)
2668 		mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2669 	else
2670 		ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2671 
2672 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2673 	return 0;
2674 }
2675 EXPORT_SYMBOL(ib_modify_mad);
2676 
2677 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2678 		   struct ib_mad_send_buf *send_buf)
2679 {
2680 	ib_modify_mad(mad_agent, send_buf, 0);
2681 }
2682 EXPORT_SYMBOL(ib_cancel_mad);
2683 
2684 static void local_completions(struct work_struct *work)
2685 {
2686 	struct ib_mad_agent_private *mad_agent_priv;
2687 	struct ib_mad_local_private *local;
2688 	struct ib_mad_agent_private *recv_mad_agent;
2689 	unsigned long flags;
2690 	int free_mad;
2691 	struct ib_wc wc;
2692 	struct ib_mad_send_wc mad_send_wc;
2693 	bool opa;
2694 
2695 	mad_agent_priv =
2696 		container_of(work, struct ib_mad_agent_private, local_work);
2697 
2698 	opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2699 			       mad_agent_priv->qp_info->port_priv->port_num);
2700 
2701 	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2702 	while (!list_empty(&mad_agent_priv->local_list)) {
2703 		local = list_entry(mad_agent_priv->local_list.next,
2704 				   struct ib_mad_local_private,
2705 				   completion_list);
2706 		list_del(&local->completion_list);
2707 		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2708 		free_mad = 0;
2709 		if (local->mad_priv) {
2710 			u8 base_version;
2711 			recv_mad_agent = local->recv_mad_agent;
2712 			if (!recv_mad_agent) {
2713 				dev_err(&mad_agent_priv->agent.device->dev,
2714 					"No receive MAD agent for local completion\n");
2715 				free_mad = 1;
2716 				goto local_send_completion;
2717 			}
2718 
2719 			/*
2720 			 * Defined behavior is to complete response
2721 			 * before request
2722 			 */
2723 			build_smp_wc(recv_mad_agent->agent.qp,
2724 				     (unsigned long) local->mad_send_wr,
2725 				     be16_to_cpu(IB_LID_PERMISSIVE),
2726 				     local->mad_send_wr->send_wr.wr.ud.pkey_index,
2727 				     recv_mad_agent->agent.port_num, &wc);
2728 
2729 			local->mad_priv->header.recv_wc.wc = &wc;
2730 
2731 			base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2732 			if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2733 				local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2734 				local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2735 			} else {
2736 				local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2737 				local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2738 			}
2739 
2740 			INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2741 			list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2742 				 &local->mad_priv->header.recv_wc.rmpp_list);
2743 			local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2744 			local->mad_priv->header.recv_wc.recv_buf.mad =
2745 						(struct ib_mad *)local->mad_priv->mad;
2746 			if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2747 				snoop_recv(recv_mad_agent->qp_info,
2748 					  &local->mad_priv->header.recv_wc,
2749 					   IB_MAD_SNOOP_RECVS);
2750 			recv_mad_agent->agent.recv_handler(
2751 						&recv_mad_agent->agent,
2752 						&local->mad_priv->header.recv_wc);
2753 			spin_lock_irqsave(&recv_mad_agent->lock, flags);
2754 			atomic_dec(&recv_mad_agent->refcount);
2755 			spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2756 		}
2757 
2758 local_send_completion:
2759 		/* Complete send */
2760 		mad_send_wc.status = IB_WC_SUCCESS;
2761 		mad_send_wc.vendor_err = 0;
2762 		mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2763 		if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2764 			snoop_send(mad_agent_priv->qp_info,
2765 				   &local->mad_send_wr->send_buf,
2766 				   &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2767 		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2768 						   &mad_send_wc);
2769 
2770 		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2771 		atomic_dec(&mad_agent_priv->refcount);
2772 		if (free_mad)
2773 			kfree(local->mad_priv);
2774 		kfree(local);
2775 	}
2776 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2777 }
2778 
2779 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2780 {
2781 	int ret;
2782 
2783 	if (!mad_send_wr->retries_left)
2784 		return -ETIMEDOUT;
2785 
2786 	mad_send_wr->retries_left--;
2787 	mad_send_wr->send_buf.retries++;
2788 
2789 	mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2790 
2791 	if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2792 		ret = ib_retry_rmpp(mad_send_wr);
2793 		switch (ret) {
2794 		case IB_RMPP_RESULT_UNHANDLED:
2795 			ret = ib_send_mad(mad_send_wr);
2796 			break;
2797 		case IB_RMPP_RESULT_CONSUMED:
2798 			ret = 0;
2799 			break;
2800 		default:
2801 			ret = -ECOMM;
2802 			break;
2803 		}
2804 	} else
2805 		ret = ib_send_mad(mad_send_wr);
2806 
2807 	if (!ret) {
2808 		mad_send_wr->refcount++;
2809 		list_add_tail(&mad_send_wr->agent_list,
2810 			      &mad_send_wr->mad_agent_priv->send_list);
2811 	}
2812 	return ret;
2813 }
2814 
2815 static void timeout_sends(struct work_struct *work)
2816 {
2817 	struct ib_mad_agent_private *mad_agent_priv;
2818 	struct ib_mad_send_wr_private *mad_send_wr;
2819 	struct ib_mad_send_wc mad_send_wc;
2820 	unsigned long flags, delay;
2821 
2822 	mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2823 				      timed_work.work);
2824 	mad_send_wc.vendor_err = 0;
2825 
2826 	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2827 	while (!list_empty(&mad_agent_priv->wait_list)) {
2828 		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2829 					 struct ib_mad_send_wr_private,
2830 					 agent_list);
2831 
2832 		if (time_after(mad_send_wr->timeout, jiffies)) {
2833 			delay = mad_send_wr->timeout - jiffies;
2834 			if ((long)delay <= 0)
2835 				delay = 1;
2836 			queue_delayed_work(mad_agent_priv->qp_info->
2837 					   port_priv->wq,
2838 					   &mad_agent_priv->timed_work, delay);
2839 			break;
2840 		}
2841 
2842 		list_del(&mad_send_wr->agent_list);
2843 		if (mad_send_wr->status == IB_WC_SUCCESS &&
2844 		    !retry_send(mad_send_wr))
2845 			continue;
2846 
2847 		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2848 
2849 		if (mad_send_wr->status == IB_WC_SUCCESS)
2850 			mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2851 		else
2852 			mad_send_wc.status = mad_send_wr->status;
2853 		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2854 		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2855 						   &mad_send_wc);
2856 
2857 		atomic_dec(&mad_agent_priv->refcount);
2858 		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2859 	}
2860 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2861 }
2862 
2863 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2864 {
2865 	struct ib_mad_port_private *port_priv = cq->cq_context;
2866 	unsigned long flags;
2867 
2868 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2869 	if (!list_empty(&port_priv->port_list))
2870 		queue_work(port_priv->wq, &port_priv->work);
2871 	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2872 }
2873 
2874 /*
2875  * Allocate receive MADs and post receive WRs for them
2876  */
2877 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2878 				    struct ib_mad_private *mad)
2879 {
2880 	unsigned long flags;
2881 	int post, ret;
2882 	struct ib_mad_private *mad_priv;
2883 	struct ib_sge sg_list;
2884 	struct ib_recv_wr recv_wr, *bad_recv_wr;
2885 	struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2886 
2887 	/* Initialize common scatter list fields */
2888 	sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2889 
2890 	/* Initialize common receive WR fields */
2891 	recv_wr.next = NULL;
2892 	recv_wr.sg_list = &sg_list;
2893 	recv_wr.num_sge = 1;
2894 
2895 	do {
2896 		/* Allocate and map receive buffer */
2897 		if (mad) {
2898 			mad_priv = mad;
2899 			mad = NULL;
2900 		} else {
2901 			mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2902 						     GFP_ATOMIC);
2903 			if (!mad_priv) {
2904 				dev_err(&qp_info->port_priv->device->dev,
2905 					"No memory for receive buffer\n");
2906 				ret = -ENOMEM;
2907 				break;
2908 			}
2909 		}
2910 		sg_list.length = mad_priv_dma_size(mad_priv);
2911 		sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2912 						 &mad_priv->grh,
2913 						 mad_priv_dma_size(mad_priv),
2914 						 DMA_FROM_DEVICE);
2915 		if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2916 						  sg_list.addr))) {
2917 			ret = -ENOMEM;
2918 			break;
2919 		}
2920 		mad_priv->header.mapping = sg_list.addr;
2921 		recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2922 		mad_priv->header.mad_list.mad_queue = recv_queue;
2923 
2924 		/* Post receive WR */
2925 		spin_lock_irqsave(&recv_queue->lock, flags);
2926 		post = (++recv_queue->count < recv_queue->max_active);
2927 		list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2928 		spin_unlock_irqrestore(&recv_queue->lock, flags);
2929 		ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2930 		if (ret) {
2931 			spin_lock_irqsave(&recv_queue->lock, flags);
2932 			list_del(&mad_priv->header.mad_list.list);
2933 			recv_queue->count--;
2934 			spin_unlock_irqrestore(&recv_queue->lock, flags);
2935 			ib_dma_unmap_single(qp_info->port_priv->device,
2936 					    mad_priv->header.mapping,
2937 					    mad_priv_dma_size(mad_priv),
2938 					    DMA_FROM_DEVICE);
2939 			kfree(mad_priv);
2940 			dev_err(&qp_info->port_priv->device->dev,
2941 				"ib_post_recv failed: %d\n", ret);
2942 			break;
2943 		}
2944 	} while (post);
2945 
2946 	return ret;
2947 }
2948 
2949 /*
2950  * Return all the posted receive MADs
2951  */
2952 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2953 {
2954 	struct ib_mad_private_header *mad_priv_hdr;
2955 	struct ib_mad_private *recv;
2956 	struct ib_mad_list_head *mad_list;
2957 
2958 	if (!qp_info->qp)
2959 		return;
2960 
2961 	while (!list_empty(&qp_info->recv_queue.list)) {
2962 
2963 		mad_list = list_entry(qp_info->recv_queue.list.next,
2964 				      struct ib_mad_list_head, list);
2965 		mad_priv_hdr = container_of(mad_list,
2966 					    struct ib_mad_private_header,
2967 					    mad_list);
2968 		recv = container_of(mad_priv_hdr, struct ib_mad_private,
2969 				    header);
2970 
2971 		/* Remove from posted receive MAD list */
2972 		list_del(&mad_list->list);
2973 
2974 		ib_dma_unmap_single(qp_info->port_priv->device,
2975 				    recv->header.mapping,
2976 				    mad_priv_dma_size(recv),
2977 				    DMA_FROM_DEVICE);
2978 		kfree(recv);
2979 	}
2980 
2981 	qp_info->recv_queue.count = 0;
2982 }
2983 
2984 /*
2985  * Start the port
2986  */
2987 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2988 {
2989 	int ret, i;
2990 	struct ib_qp_attr *attr;
2991 	struct ib_qp *qp;
2992 	u16 pkey_index;
2993 
2994 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
2995 	if (!attr) {
2996 		dev_err(&port_priv->device->dev,
2997 			"Couldn't kmalloc ib_qp_attr\n");
2998 		return -ENOMEM;
2999 	}
3000 
3001 	ret = ib_find_pkey(port_priv->device, port_priv->port_num,
3002 			   IB_DEFAULT_PKEY_FULL, &pkey_index);
3003 	if (ret)
3004 		pkey_index = 0;
3005 
3006 	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3007 		qp = port_priv->qp_info[i].qp;
3008 		if (!qp)
3009 			continue;
3010 
3011 		/*
3012 		 * PKey index for QP1 is irrelevant but
3013 		 * one is needed for the Reset to Init transition
3014 		 */
3015 		attr->qp_state = IB_QPS_INIT;
3016 		attr->pkey_index = pkey_index;
3017 		attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3018 		ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3019 					     IB_QP_PKEY_INDEX | IB_QP_QKEY);
3020 		if (ret) {
3021 			dev_err(&port_priv->device->dev,
3022 				"Couldn't change QP%d state to INIT: %d\n",
3023 				i, ret);
3024 			goto out;
3025 		}
3026 
3027 		attr->qp_state = IB_QPS_RTR;
3028 		ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3029 		if (ret) {
3030 			dev_err(&port_priv->device->dev,
3031 				"Couldn't change QP%d state to RTR: %d\n",
3032 				i, ret);
3033 			goto out;
3034 		}
3035 
3036 		attr->qp_state = IB_QPS_RTS;
3037 		attr->sq_psn = IB_MAD_SEND_Q_PSN;
3038 		ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3039 		if (ret) {
3040 			dev_err(&port_priv->device->dev,
3041 				"Couldn't change QP%d state to RTS: %d\n",
3042 				i, ret);
3043 			goto out;
3044 		}
3045 	}
3046 
3047 	ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3048 	if (ret) {
3049 		dev_err(&port_priv->device->dev,
3050 			"Failed to request completion notification: %d\n",
3051 			ret);
3052 		goto out;
3053 	}
3054 
3055 	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3056 		if (!port_priv->qp_info[i].qp)
3057 			continue;
3058 
3059 		ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3060 		if (ret) {
3061 			dev_err(&port_priv->device->dev,
3062 				"Couldn't post receive WRs\n");
3063 			goto out;
3064 		}
3065 	}
3066 out:
3067 	kfree(attr);
3068 	return ret;
3069 }
3070 
3071 static void qp_event_handler(struct ib_event *event, void *qp_context)
3072 {
3073 	struct ib_mad_qp_info	*qp_info = qp_context;
3074 
3075 	/* It's worse than that! He's dead, Jim! */
3076 	dev_err(&qp_info->port_priv->device->dev,
3077 		"Fatal error (%d) on MAD QP (%d)\n",
3078 		event->event, qp_info->qp->qp_num);
3079 }
3080 
3081 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3082 			   struct ib_mad_queue *mad_queue)
3083 {
3084 	mad_queue->qp_info = qp_info;
3085 	mad_queue->count = 0;
3086 	spin_lock_init(&mad_queue->lock);
3087 	INIT_LIST_HEAD(&mad_queue->list);
3088 }
3089 
3090 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3091 			struct ib_mad_qp_info *qp_info)
3092 {
3093 	qp_info->port_priv = port_priv;
3094 	init_mad_queue(qp_info, &qp_info->send_queue);
3095 	init_mad_queue(qp_info, &qp_info->recv_queue);
3096 	INIT_LIST_HEAD(&qp_info->overflow_list);
3097 	spin_lock_init(&qp_info->snoop_lock);
3098 	qp_info->snoop_table = NULL;
3099 	qp_info->snoop_table_size = 0;
3100 	atomic_set(&qp_info->snoop_count, 0);
3101 }
3102 
3103 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3104 			 enum ib_qp_type qp_type)
3105 {
3106 	struct ib_qp_init_attr	qp_init_attr;
3107 	int ret;
3108 
3109 	memset(&qp_init_attr, 0, sizeof qp_init_attr);
3110 	qp_init_attr.send_cq = qp_info->port_priv->cq;
3111 	qp_init_attr.recv_cq = qp_info->port_priv->cq;
3112 	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3113 	qp_init_attr.cap.max_send_wr = mad_sendq_size;
3114 	qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3115 	qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3116 	qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3117 	qp_init_attr.qp_type = qp_type;
3118 	qp_init_attr.port_num = qp_info->port_priv->port_num;
3119 	qp_init_attr.qp_context = qp_info;
3120 	qp_init_attr.event_handler = qp_event_handler;
3121 	qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3122 	if (IS_ERR(qp_info->qp)) {
3123 		dev_err(&qp_info->port_priv->device->dev,
3124 			"Couldn't create ib_mad QP%d\n",
3125 			get_spl_qp_index(qp_type));
3126 		ret = PTR_ERR(qp_info->qp);
3127 		goto error;
3128 	}
3129 	/* Use minimum queue sizes unless the CQ is resized */
3130 	qp_info->send_queue.max_active = mad_sendq_size;
3131 	qp_info->recv_queue.max_active = mad_recvq_size;
3132 	return 0;
3133 
3134 error:
3135 	return ret;
3136 }
3137 
3138 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3139 {
3140 	if (!qp_info->qp)
3141 		return;
3142 
3143 	ib_destroy_qp(qp_info->qp);
3144 	kfree(qp_info->snoop_table);
3145 }
3146 
3147 /*
3148  * Open the port
3149  * Create the QP, PD, MR, and CQ if needed
3150  */
3151 static int ib_mad_port_open(struct ib_device *device,
3152 			    int port_num)
3153 {
3154 	int ret, cq_size;
3155 	struct ib_mad_port_private *port_priv;
3156 	unsigned long flags;
3157 	char name[sizeof "ib_mad123"];
3158 	int has_smi;
3159 	struct ib_cq_init_attr cq_attr = {};
3160 
3161 	if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3162 		return -EFAULT;
3163 
3164 	if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3165 		    rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3166 		return -EFAULT;
3167 
3168 	/* Create new device info */
3169 	port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3170 	if (!port_priv) {
3171 		dev_err(&device->dev, "No memory for ib_mad_port_private\n");
3172 		return -ENOMEM;
3173 	}
3174 
3175 	port_priv->device = device;
3176 	port_priv->port_num = port_num;
3177 	spin_lock_init(&port_priv->reg_lock);
3178 	INIT_LIST_HEAD(&port_priv->agent_list);
3179 	init_mad_qp(port_priv, &port_priv->qp_info[0]);
3180 	init_mad_qp(port_priv, &port_priv->qp_info[1]);
3181 
3182 	cq_size = mad_sendq_size + mad_recvq_size;
3183 	has_smi = rdma_cap_ib_smi(device, port_num);
3184 	if (has_smi)
3185 		cq_size *= 2;
3186 
3187 	cq_attr.cqe = cq_size;
3188 	port_priv->cq = ib_create_cq(port_priv->device,
3189 				     ib_mad_thread_completion_handler,
3190 				     NULL, port_priv, &cq_attr);
3191 	if (IS_ERR(port_priv->cq)) {
3192 		dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3193 		ret = PTR_ERR(port_priv->cq);
3194 		goto error3;
3195 	}
3196 
3197 	port_priv->pd = ib_alloc_pd(device);
3198 	if (IS_ERR(port_priv->pd)) {
3199 		dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3200 		ret = PTR_ERR(port_priv->pd);
3201 		goto error4;
3202 	}
3203 
3204 	port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
3205 	if (IS_ERR(port_priv->mr)) {
3206 		dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
3207 		ret = PTR_ERR(port_priv->mr);
3208 		goto error5;
3209 	}
3210 
3211 	if (has_smi) {
3212 		ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3213 		if (ret)
3214 			goto error6;
3215 	}
3216 	ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3217 	if (ret)
3218 		goto error7;
3219 
3220 	snprintf(name, sizeof name, "ib_mad%d", port_num);
3221 	port_priv->wq = create_singlethread_workqueue(name);
3222 	if (!port_priv->wq) {
3223 		ret = -ENOMEM;
3224 		goto error8;
3225 	}
3226 	INIT_WORK(&port_priv->work, ib_mad_completion_handler);
3227 
3228 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3229 	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3230 	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3231 
3232 	ret = ib_mad_port_start(port_priv);
3233 	if (ret) {
3234 		dev_err(&device->dev, "Couldn't start port\n");
3235 		goto error9;
3236 	}
3237 
3238 	return 0;
3239 
3240 error9:
3241 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3242 	list_del_init(&port_priv->port_list);
3243 	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3244 
3245 	destroy_workqueue(port_priv->wq);
3246 error8:
3247 	destroy_mad_qp(&port_priv->qp_info[1]);
3248 error7:
3249 	destroy_mad_qp(&port_priv->qp_info[0]);
3250 error6:
3251 	ib_dereg_mr(port_priv->mr);
3252 error5:
3253 	ib_dealloc_pd(port_priv->pd);
3254 error4:
3255 	ib_destroy_cq(port_priv->cq);
3256 	cleanup_recv_queue(&port_priv->qp_info[1]);
3257 	cleanup_recv_queue(&port_priv->qp_info[0]);
3258 error3:
3259 	kfree(port_priv);
3260 
3261 	return ret;
3262 }
3263 
3264 /*
3265  * Close the port
3266  * If there are no classes using the port, free the port
3267  * resources (CQ, MR, PD, QP) and remove the port's info structure
3268  */
3269 static int ib_mad_port_close(struct ib_device *device, int port_num)
3270 {
3271 	struct ib_mad_port_private *port_priv;
3272 	unsigned long flags;
3273 
3274 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3275 	port_priv = __ib_get_mad_port(device, port_num);
3276 	if (port_priv == NULL) {
3277 		spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3278 		dev_err(&device->dev, "Port %d not found\n", port_num);
3279 		return -ENODEV;
3280 	}
3281 	list_del_init(&port_priv->port_list);
3282 	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3283 
3284 	destroy_workqueue(port_priv->wq);
3285 	destroy_mad_qp(&port_priv->qp_info[1]);
3286 	destroy_mad_qp(&port_priv->qp_info[0]);
3287 	ib_dereg_mr(port_priv->mr);
3288 	ib_dealloc_pd(port_priv->pd);
3289 	ib_destroy_cq(port_priv->cq);
3290 	cleanup_recv_queue(&port_priv->qp_info[1]);
3291 	cleanup_recv_queue(&port_priv->qp_info[0]);
3292 	/* XXX: Handle deallocation of MAD registration tables */
3293 
3294 	kfree(port_priv);
3295 
3296 	return 0;
3297 }
3298 
3299 static void ib_mad_init_device(struct ib_device *device)
3300 {
3301 	int start, i;
3302 
3303 	start = rdma_start_port(device);
3304 
3305 	for (i = start; i <= rdma_end_port(device); i++) {
3306 		if (!rdma_cap_ib_mad(device, i))
3307 			continue;
3308 
3309 		if (ib_mad_port_open(device, i)) {
3310 			dev_err(&device->dev, "Couldn't open port %d\n", i);
3311 			goto error;
3312 		}
3313 		if (ib_agent_port_open(device, i)) {
3314 			dev_err(&device->dev,
3315 				"Couldn't open port %d for agents\n", i);
3316 			goto error_agent;
3317 		}
3318 	}
3319 	return;
3320 
3321 error_agent:
3322 	if (ib_mad_port_close(device, i))
3323 		dev_err(&device->dev, "Couldn't close port %d\n", i);
3324 
3325 error:
3326 	while (--i >= start) {
3327 		if (!rdma_cap_ib_mad(device, i))
3328 			continue;
3329 
3330 		if (ib_agent_port_close(device, i))
3331 			dev_err(&device->dev,
3332 				"Couldn't close port %d for agents\n", i);
3333 		if (ib_mad_port_close(device, i))
3334 			dev_err(&device->dev, "Couldn't close port %d\n", i);
3335 	}
3336 }
3337 
3338 static void ib_mad_remove_device(struct ib_device *device)
3339 {
3340 	int i;
3341 
3342 	for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3343 		if (!rdma_cap_ib_mad(device, i))
3344 			continue;
3345 
3346 		if (ib_agent_port_close(device, i))
3347 			dev_err(&device->dev,
3348 				"Couldn't close port %d for agents\n", i);
3349 		if (ib_mad_port_close(device, i))
3350 			dev_err(&device->dev, "Couldn't close port %d\n", i);
3351 	}
3352 }
3353 
3354 static struct ib_client mad_client = {
3355 	.name   = "mad",
3356 	.add = ib_mad_init_device,
3357 	.remove = ib_mad_remove_device
3358 };
3359 
3360 static int __init ib_mad_init_module(void)
3361 {
3362 	mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3363 	mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3364 
3365 	mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3366 	mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3367 
3368 	INIT_LIST_HEAD(&ib_mad_port_list);
3369 
3370 	if (ib_register_client(&mad_client)) {
3371 		pr_err("Couldn't register ib_mad client\n");
3372 		return -EINVAL;
3373 	}
3374 
3375 	return 0;
3376 }
3377 
3378 static void __exit ib_mad_cleanup_module(void)
3379 {
3380 	ib_unregister_client(&mad_client);
3381 }
3382 
3383 module_init(ib_mad_init_module);
3384 module_exit(ib_mad_cleanup_module);
3385