xref: /openbmc/linux/drivers/infiniband/core/agent.c (revision 497677ab)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses.  You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING in the main directory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  *     Redistribution and use in source and binary forms, with or
15  *     without modification, are permitted provided that the following
16  *     conditions are met:
17  *
18  *      - Redistributions of source code must retain the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer.
21  *
22  *      - Redistributions in binary form must reproduce the above
23  *        copyright notice, this list of conditions and the following
24  *        disclaimer in the documentation and/or other materials
25  *        provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34  * SOFTWARE.
35  *
36  * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $
37  */
38 
39 #include <linux/dma-mapping.h>
40 
41 #include <asm/bug.h>
42 
43 #include <ib_smi.h>
44 
45 #include "smi.h"
46 #include "agent_priv.h"
47 #include "mad_priv.h"
48 #include "agent.h"
49 
50 spinlock_t ib_agent_port_list_lock;
51 static LIST_HEAD(ib_agent_port_list);
52 
53 /*
54  * Caller must hold ib_agent_port_list_lock
55  */
56 static inline struct ib_agent_port_private *
57 __ib_get_agent_port(struct ib_device *device, int port_num,
58 		    struct ib_mad_agent *mad_agent)
59 {
60 	struct ib_agent_port_private *entry;
61 
62 	BUG_ON(!(!!device ^ !!mad_agent));  /* Exactly one MUST be (!NULL) */
63 
64 	if (device) {
65 		list_for_each_entry(entry, &ib_agent_port_list, port_list) {
66 			if (entry->smp_agent->device == device &&
67 			    entry->port_num == port_num)
68 				return entry;
69 		}
70 	} else {
71 		list_for_each_entry(entry, &ib_agent_port_list, port_list) {
72 			if ((entry->smp_agent == mad_agent) ||
73 			    (entry->perf_mgmt_agent == mad_agent))
74 				return entry;
75 		}
76 	}
77 	return NULL;
78 }
79 
80 static inline struct ib_agent_port_private *
81 ib_get_agent_port(struct ib_device *device, int port_num,
82 		  struct ib_mad_agent *mad_agent)
83 {
84 	struct ib_agent_port_private *entry;
85 	unsigned long flags;
86 
87 	spin_lock_irqsave(&ib_agent_port_list_lock, flags);
88 	entry = __ib_get_agent_port(device, port_num, mad_agent);
89 	spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
90 
91 	return entry;
92 }
93 
94 int smi_check_local_dr_smp(struct ib_smp *smp,
95 			   struct ib_device *device,
96 			   int port_num)
97 {
98 	struct ib_agent_port_private *port_priv;
99 
100 	if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
101 		return 1;
102 	port_priv = ib_get_agent_port(device, port_num, NULL);
103 	if (!port_priv) {
104 		printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d "
105 		       "not open\n",
106 		       device->name, port_num);
107 		return 1;
108 	}
109 
110 	return smi_check_local_smp(port_priv->smp_agent, smp);
111 }
112 
113 static int agent_mad_send(struct ib_mad_agent *mad_agent,
114 			  struct ib_agent_port_private *port_priv,
115 			  struct ib_mad_private *mad_priv,
116 			  struct ib_grh *grh,
117 			  struct ib_wc *wc)
118 {
119 	struct ib_agent_send_wr *agent_send_wr;
120 	struct ib_sge gather_list;
121 	struct ib_send_wr send_wr;
122 	struct ib_send_wr *bad_send_wr;
123 	struct ib_ah_attr ah_attr;
124 	unsigned long flags;
125 	int ret = 1;
126 
127 	agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL);
128 	if (!agent_send_wr)
129 		goto out;
130 	agent_send_wr->mad = mad_priv;
131 
132 	gather_list.addr = dma_map_single(mad_agent->device->dma_device,
133 					  &mad_priv->mad,
134 					  sizeof(mad_priv->mad),
135 					  DMA_TO_DEVICE);
136 	gather_list.length = sizeof(mad_priv->mad);
137 	gather_list.lkey = mad_agent->mr->lkey;
138 
139 	send_wr.next = NULL;
140 	send_wr.opcode = IB_WR_SEND;
141 	send_wr.sg_list = &gather_list;
142 	send_wr.num_sge = 1;
143 	send_wr.wr.ud.remote_qpn = wc->src_qp; /* DQPN */
144 	send_wr.wr.ud.timeout_ms = 0;
145 	send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
146 
147 	ah_attr.dlid = wc->slid;
148 	ah_attr.port_num = mad_agent->port_num;
149 	ah_attr.src_path_bits = wc->dlid_path_bits;
150 	ah_attr.sl = wc->sl;
151 	ah_attr.static_rate = 0;
152 	ah_attr.ah_flags = 0; /* No GRH */
153 	if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
154 		if (wc->wc_flags & IB_WC_GRH) {
155 			ah_attr.ah_flags = IB_AH_GRH;
156 			/* Should sgid be looked up ? */
157 			ah_attr.grh.sgid_index = 0;
158 			ah_attr.grh.hop_limit = grh->hop_limit;
159 			ah_attr.grh.flow_label = be32_to_cpu(
160 				grh->version_tclass_flow)  & 0xfffff;
161 			ah_attr.grh.traffic_class = (be32_to_cpu(
162 				grh->version_tclass_flow) >> 20) & 0xff;
163 			memcpy(ah_attr.grh.dgid.raw,
164 			       grh->sgid.raw,
165 			       sizeof(ah_attr.grh.dgid));
166 		}
167 	}
168 
169 	agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr);
170 	if (IS_ERR(agent_send_wr->ah)) {
171 		printk(KERN_ERR SPFX "No memory for address handle\n");
172 		kfree(agent_send_wr);
173 		goto out;
174 	}
175 
176 	send_wr.wr.ud.ah = agent_send_wr->ah;
177 	if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
178 		send_wr.wr.ud.pkey_index = wc->pkey_index;
179 		send_wr.wr.ud.remote_qkey = IB_QP1_QKEY;
180 	} else { 	/* for SMPs */
181 		send_wr.wr.ud.pkey_index = 0;
182 		send_wr.wr.ud.remote_qkey = 0;
183 	}
184 	send_wr.wr.ud.mad_hdr = &mad_priv->mad.mad.mad_hdr;
185 	send_wr.wr_id = (unsigned long)agent_send_wr;
186 
187 	pci_unmap_addr_set(agent_send_wr, mapping, gather_list.addr);
188 
189 	/* Send */
190 	spin_lock_irqsave(&port_priv->send_list_lock, flags);
191 	if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) {
192 		spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
193 		dma_unmap_single(mad_agent->device->dma_device,
194 				 pci_unmap_addr(agent_send_wr, mapping),
195 				 sizeof(mad_priv->mad),
196 				 DMA_TO_DEVICE);
197 		ib_destroy_ah(agent_send_wr->ah);
198 		kfree(agent_send_wr);
199 	} else {
200 		list_add_tail(&agent_send_wr->send_list,
201 			      &port_priv->send_posted_list);
202 		spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
203 		ret = 0;
204 	}
205 
206 out:
207 	return ret;
208 }
209 
210 int agent_send(struct ib_mad_private *mad,
211 	       struct ib_grh *grh,
212 	       struct ib_wc *wc,
213 	       struct ib_device *device,
214 	       int port_num)
215 {
216 	struct ib_agent_port_private *port_priv;
217 	struct ib_mad_agent *mad_agent;
218 
219 	port_priv = ib_get_agent_port(device, port_num, NULL);
220 	if (!port_priv) {
221 		printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n",
222 		       device->name, port_num);
223 		return 1;
224 	}
225 
226 	/* Get mad agent based on mgmt_class in MAD */
227 	switch (mad->mad.mad.mad_hdr.mgmt_class) {
228 		case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
229 		case IB_MGMT_CLASS_SUBN_LID_ROUTED:
230 			mad_agent = port_priv->smp_agent;
231 			break;
232 		case IB_MGMT_CLASS_PERF_MGMT:
233 			mad_agent = port_priv->perf_mgmt_agent;
234 			break;
235 		default:
236 			return 1;
237 	}
238 
239 	return agent_mad_send(mad_agent, port_priv, mad, grh, wc);
240 }
241 
242 static void agent_send_handler(struct ib_mad_agent *mad_agent,
243 			       struct ib_mad_send_wc *mad_send_wc)
244 {
245 	struct ib_agent_port_private	*port_priv;
246 	struct ib_agent_send_wr		*agent_send_wr;
247 	unsigned long			flags;
248 
249 	/* Find matching MAD agent */
250 	port_priv = ib_get_agent_port(NULL, 0, mad_agent);
251 	if (!port_priv) {
252 		printk(KERN_ERR SPFX "agent_send_handler: no matching MAD "
253 		       "agent %p\n", mad_agent);
254 		return;
255 	}
256 
257 	agent_send_wr = (struct ib_agent_send_wr *)(unsigned long)mad_send_wc->wr_id;
258 	spin_lock_irqsave(&port_priv->send_list_lock, flags);
259 	/* Remove completed send from posted send MAD list */
260 	list_del(&agent_send_wr->send_list);
261 	spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
262 
263 	dma_unmap_single(mad_agent->device->dma_device,
264 			 pci_unmap_addr(agent_send_wr, mapping),
265 			 sizeof(agent_send_wr->mad->mad),
266 			 DMA_TO_DEVICE);
267 
268 	ib_destroy_ah(agent_send_wr->ah);
269 
270 	/* Release allocated memory */
271 	kmem_cache_free(ib_mad_cache, agent_send_wr->mad);
272 	kfree(agent_send_wr);
273 }
274 
275 int ib_agent_port_open(struct ib_device *device, int port_num)
276 {
277 	int ret;
278 	struct ib_agent_port_private *port_priv;
279 	unsigned long flags;
280 
281 	/* First, check if port already open for SMI */
282 	port_priv = ib_get_agent_port(device, port_num, NULL);
283 	if (port_priv) {
284 		printk(KERN_DEBUG SPFX "%s port %d already open\n",
285 		       device->name, port_num);
286 		return 0;
287 	}
288 
289 	/* Create new device info */
290 	port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
291 	if (!port_priv) {
292 		printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n");
293 		ret = -ENOMEM;
294 		goto error1;
295 	}
296 
297 	memset(port_priv, 0, sizeof *port_priv);
298 	port_priv->port_num = port_num;
299 	spin_lock_init(&port_priv->send_list_lock);
300 	INIT_LIST_HEAD(&port_priv->send_posted_list);
301 
302 	/* Obtain send only MAD agent for SM class (SMI QP) */
303 	port_priv->smp_agent = ib_register_mad_agent(device, port_num,
304 						     IB_QPT_SMI,
305 						     NULL, 0,
306 						    &agent_send_handler,
307 						     NULL, NULL);
308 
309 	if (IS_ERR(port_priv->smp_agent)) {
310 		ret = PTR_ERR(port_priv->smp_agent);
311 		goto error2;
312 	}
313 
314 	/* Obtain send only MAD agent for PerfMgmt class (GSI QP) */
315 	port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num,
316 							   IB_QPT_GSI,
317 							   NULL, 0,
318 							  &agent_send_handler,
319 							   NULL, NULL);
320 	if (IS_ERR(port_priv->perf_mgmt_agent)) {
321 		ret = PTR_ERR(port_priv->perf_mgmt_agent);
322 		goto error3;
323 	}
324 
325 	spin_lock_irqsave(&ib_agent_port_list_lock, flags);
326 	list_add_tail(&port_priv->port_list, &ib_agent_port_list);
327 	spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
328 
329 	return 0;
330 
331 error3:
332 	ib_unregister_mad_agent(port_priv->smp_agent);
333 error2:
334 	kfree(port_priv);
335 error1:
336 	return ret;
337 }
338 
339 int ib_agent_port_close(struct ib_device *device, int port_num)
340 {
341 	struct ib_agent_port_private *port_priv;
342 	unsigned long flags;
343 
344 	spin_lock_irqsave(&ib_agent_port_list_lock, flags);
345 	port_priv = __ib_get_agent_port(device, port_num, NULL);
346 	if (port_priv == NULL) {
347 		spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
348 		printk(KERN_ERR SPFX "Port %d not found\n", port_num);
349 		return -ENODEV;
350 	}
351 	list_del(&port_priv->port_list);
352 	spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
353 
354 	ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
355 	ib_unregister_mad_agent(port_priv->smp_agent);
356 	kfree(port_priv);
357 
358 	return 0;
359 }
360