1 /*
2  * Copyright (c) 2005 Intel Inc. All rights reserved.
3  * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include "mad_priv.h"
35 #include "mad_rmpp.h"
36 
37 enum rmpp_state {
38 	RMPP_STATE_ACTIVE,
39 	RMPP_STATE_TIMEOUT,
40 	RMPP_STATE_COMPLETE,
41 	RMPP_STATE_CANCELING
42 };
43 
44 struct mad_rmpp_recv {
45 	struct ib_mad_agent_private *agent;
46 	struct list_head list;
47 	struct delayed_work timeout_work;
48 	struct delayed_work cleanup_work;
49 	struct completion comp;
50 	enum rmpp_state state;
51 	spinlock_t lock;
52 	atomic_t refcount;
53 
54 	struct ib_ah *ah;
55 	struct ib_mad_recv_wc *rmpp_wc;
56 	struct ib_mad_recv_buf *cur_seg_buf;
57 	int last_ack;
58 	int seg_num;
59 	int newwin;
60 	int repwin;
61 
62 	__be64 tid;
63 	u32 src_qp;
64 	u16 slid;
65 	u8 mgmt_class;
66 	u8 class_version;
67 	u8 method;
68 };
69 
70 static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
71 {
72 	if (atomic_dec_and_test(&rmpp_recv->refcount))
73 		complete(&rmpp_recv->comp);
74 }
75 
76 static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
77 {
78 	deref_rmpp_recv(rmpp_recv);
79 	wait_for_completion(&rmpp_recv->comp);
80 	ib_destroy_ah(rmpp_recv->ah);
81 	kfree(rmpp_recv);
82 }
83 
84 void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
85 {
86 	struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
87 	unsigned long flags;
88 
89 	spin_lock_irqsave(&agent->lock, flags);
90 	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
91 		if (rmpp_recv->state != RMPP_STATE_COMPLETE)
92 			ib_free_recv_mad(rmpp_recv->rmpp_wc);
93 		rmpp_recv->state = RMPP_STATE_CANCELING;
94 	}
95 	spin_unlock_irqrestore(&agent->lock, flags);
96 
97 	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
98 		cancel_delayed_work(&rmpp_recv->timeout_work);
99 		cancel_delayed_work(&rmpp_recv->cleanup_work);
100 	}
101 
102 	flush_workqueue(agent->qp_info->port_priv->wq);
103 
104 	list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
105 				 &agent->rmpp_list, list) {
106 		list_del(&rmpp_recv->list);
107 		destroy_rmpp_recv(rmpp_recv);
108 	}
109 }
110 
111 static void format_ack(struct ib_mad_send_buf *msg,
112 		       struct ib_rmpp_mad *data,
113 		       struct mad_rmpp_recv *rmpp_recv)
114 {
115 	struct ib_rmpp_mad *ack = msg->mad;
116 	unsigned long flags;
117 
118 	memcpy(ack, &data->mad_hdr, msg->hdr_len);
119 
120 	ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
121 	ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
122 	ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
123 
124 	spin_lock_irqsave(&rmpp_recv->lock, flags);
125 	rmpp_recv->last_ack = rmpp_recv->seg_num;
126 	ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
127 	ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
128 	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
129 }
130 
131 static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
132 		     struct ib_mad_recv_wc *recv_wc)
133 {
134 	struct ib_mad_send_buf *msg;
135 	int ret, hdr_len;
136 
137 	hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
138 	msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
139 				 recv_wc->wc->pkey_index, 1, hdr_len,
140 				 0, GFP_KERNEL);
141 	if (IS_ERR(msg))
142 		return;
143 
144 	format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
145 	msg->ah = rmpp_recv->ah;
146 	ret = ib_post_send_mad(msg, NULL);
147 	if (ret)
148 		ib_free_send_mad(msg);
149 }
150 
151 static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
152 						  struct ib_mad_recv_wc *recv_wc)
153 {
154 	struct ib_mad_send_buf *msg;
155 	struct ib_ah *ah;
156 	int hdr_len;
157 
158 	ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
159 				  recv_wc->recv_buf.grh, agent->port_num);
160 	if (IS_ERR(ah))
161 		return (void *) ah;
162 
163 	hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
164 	msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
165 				 recv_wc->wc->pkey_index, 1,
166 				 hdr_len, 0, GFP_KERNEL);
167 	if (IS_ERR(msg))
168 		ib_destroy_ah(ah);
169 	else {
170 		msg->ah = ah;
171 		msg->context[0] = ah;
172 	}
173 
174 	return msg;
175 }
176 
177 static void ack_ds_ack(struct ib_mad_agent_private *agent,
178 		       struct ib_mad_recv_wc *recv_wc)
179 {
180 	struct ib_mad_send_buf *msg;
181 	struct ib_rmpp_mad *rmpp_mad;
182 	int ret;
183 
184 	msg = alloc_response_msg(&agent->agent, recv_wc);
185 	if (IS_ERR(msg))
186 		return;
187 
188 	rmpp_mad = msg->mad;
189 	memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
190 
191 	rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
192 	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
193 	rmpp_mad->rmpp_hdr.seg_num = 0;
194 	rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
195 
196 	ret = ib_post_send_mad(msg, NULL);
197 	if (ret) {
198 		ib_destroy_ah(msg->ah);
199 		ib_free_send_mad(msg);
200 	}
201 }
202 
203 void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
204 {
205 	if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah)
206 		ib_destroy_ah(mad_send_wc->send_buf->ah);
207 	ib_free_send_mad(mad_send_wc->send_buf);
208 }
209 
210 static void nack_recv(struct ib_mad_agent_private *agent,
211 		      struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
212 {
213 	struct ib_mad_send_buf *msg;
214 	struct ib_rmpp_mad *rmpp_mad;
215 	int ret;
216 
217 	msg = alloc_response_msg(&agent->agent, recv_wc);
218 	if (IS_ERR(msg))
219 		return;
220 
221 	rmpp_mad = msg->mad;
222 	memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
223 
224 	rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
225 	rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
226 	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
227 	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
228 	rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
229 	rmpp_mad->rmpp_hdr.seg_num = 0;
230 	rmpp_mad->rmpp_hdr.paylen_newwin = 0;
231 
232 	ret = ib_post_send_mad(msg, NULL);
233 	if (ret) {
234 		ib_destroy_ah(msg->ah);
235 		ib_free_send_mad(msg);
236 	}
237 }
238 
239 static void recv_timeout_handler(struct work_struct *work)
240 {
241 	struct mad_rmpp_recv *rmpp_recv =
242 		container_of(work, struct mad_rmpp_recv, timeout_work.work);
243 	struct ib_mad_recv_wc *rmpp_wc;
244 	unsigned long flags;
245 
246 	spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
247 	if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
248 		spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
249 		return;
250 	}
251 	rmpp_recv->state = RMPP_STATE_TIMEOUT;
252 	list_del(&rmpp_recv->list);
253 	spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
254 
255 	rmpp_wc = rmpp_recv->rmpp_wc;
256 	nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
257 	destroy_rmpp_recv(rmpp_recv);
258 	ib_free_recv_mad(rmpp_wc);
259 }
260 
261 static void recv_cleanup_handler(struct work_struct *work)
262 {
263 	struct mad_rmpp_recv *rmpp_recv =
264 		container_of(work, struct mad_rmpp_recv, cleanup_work.work);
265 	unsigned long flags;
266 
267 	spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
268 	if (rmpp_recv->state == RMPP_STATE_CANCELING) {
269 		spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
270 		return;
271 	}
272 	list_del(&rmpp_recv->list);
273 	spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
274 	destroy_rmpp_recv(rmpp_recv);
275 }
276 
277 static struct mad_rmpp_recv *
278 create_rmpp_recv(struct ib_mad_agent_private *agent,
279 		 struct ib_mad_recv_wc *mad_recv_wc)
280 {
281 	struct mad_rmpp_recv *rmpp_recv;
282 	struct ib_mad_hdr *mad_hdr;
283 
284 	rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
285 	if (!rmpp_recv)
286 		return NULL;
287 
288 	rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
289 					     mad_recv_wc->wc,
290 					     mad_recv_wc->recv_buf.grh,
291 					     agent->agent.port_num);
292 	if (IS_ERR(rmpp_recv->ah))
293 		goto error;
294 
295 	rmpp_recv->agent = agent;
296 	init_completion(&rmpp_recv->comp);
297 	INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
298 	INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
299 	spin_lock_init(&rmpp_recv->lock);
300 	rmpp_recv->state = RMPP_STATE_ACTIVE;
301 	atomic_set(&rmpp_recv->refcount, 1);
302 
303 	rmpp_recv->rmpp_wc = mad_recv_wc;
304 	rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
305 	rmpp_recv->newwin = 1;
306 	rmpp_recv->seg_num = 1;
307 	rmpp_recv->last_ack = 0;
308 	rmpp_recv->repwin = 1;
309 
310 	mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
311 	rmpp_recv->tid = mad_hdr->tid;
312 	rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
313 	rmpp_recv->slid = mad_recv_wc->wc->slid;
314 	rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
315 	rmpp_recv->class_version = mad_hdr->class_version;
316 	rmpp_recv->method  = mad_hdr->method;
317 	return rmpp_recv;
318 
319 error:	kfree(rmpp_recv);
320 	return NULL;
321 }
322 
323 static struct mad_rmpp_recv *
324 find_rmpp_recv(struct ib_mad_agent_private *agent,
325 	       struct ib_mad_recv_wc *mad_recv_wc)
326 {
327 	struct mad_rmpp_recv *rmpp_recv;
328 	struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
329 
330 	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
331 		if (rmpp_recv->tid == mad_hdr->tid &&
332 		    rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
333 		    rmpp_recv->slid == mad_recv_wc->wc->slid &&
334 		    rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
335 		    rmpp_recv->class_version == mad_hdr->class_version &&
336 		    rmpp_recv->method == mad_hdr->method)
337 			return rmpp_recv;
338 	}
339 	return NULL;
340 }
341 
342 static struct mad_rmpp_recv *
343 acquire_rmpp_recv(struct ib_mad_agent_private *agent,
344 		  struct ib_mad_recv_wc *mad_recv_wc)
345 {
346 	struct mad_rmpp_recv *rmpp_recv;
347 	unsigned long flags;
348 
349 	spin_lock_irqsave(&agent->lock, flags);
350 	rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
351 	if (rmpp_recv)
352 		atomic_inc(&rmpp_recv->refcount);
353 	spin_unlock_irqrestore(&agent->lock, flags);
354 	return rmpp_recv;
355 }
356 
357 static struct mad_rmpp_recv *
358 insert_rmpp_recv(struct ib_mad_agent_private *agent,
359 		 struct mad_rmpp_recv *rmpp_recv)
360 {
361 	struct mad_rmpp_recv *cur_rmpp_recv;
362 
363 	cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
364 	if (!cur_rmpp_recv)
365 		list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
366 
367 	return cur_rmpp_recv;
368 }
369 
370 static inline int get_last_flag(struct ib_mad_recv_buf *seg)
371 {
372 	struct ib_rmpp_mad *rmpp_mad;
373 
374 	rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
375 	return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
376 }
377 
378 static inline int get_seg_num(struct ib_mad_recv_buf *seg)
379 {
380 	struct ib_rmpp_mad *rmpp_mad;
381 
382 	rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
383 	return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
384 }
385 
386 static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
387 						    struct ib_mad_recv_buf *seg)
388 {
389 	if (seg->list.next == rmpp_list)
390 		return NULL;
391 
392 	return container_of(seg->list.next, struct ib_mad_recv_buf, list);
393 }
394 
395 static inline int window_size(struct ib_mad_agent_private *agent)
396 {
397 	return max(agent->qp_info->recv_queue.max_active >> 3, 1);
398 }
399 
400 static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
401 						  int seg_num)
402 {
403 	struct ib_mad_recv_buf *seg_buf;
404 	int cur_seg_num;
405 
406 	list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
407 		cur_seg_num = get_seg_num(seg_buf);
408 		if (seg_num > cur_seg_num)
409 			return seg_buf;
410 		if (seg_num == cur_seg_num)
411 			break;
412 	}
413 	return NULL;
414 }
415 
416 static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
417 			   struct ib_mad_recv_buf *new_buf)
418 {
419 	struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
420 
421 	while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
422 		rmpp_recv->cur_seg_buf = new_buf;
423 		rmpp_recv->seg_num++;
424 		new_buf = get_next_seg(rmpp_list, new_buf);
425 	}
426 }
427 
428 static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
429 {
430 	struct ib_rmpp_mad *rmpp_mad;
431 	int hdr_size, data_size, pad;
432 
433 	rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
434 
435 	hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
436 	data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
437 	pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
438 	if (pad > IB_MGMT_RMPP_DATA || pad < 0)
439 		pad = 0;
440 
441 	return hdr_size + rmpp_recv->seg_num * data_size - pad;
442 }
443 
444 static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
445 {
446 	struct ib_mad_recv_wc *rmpp_wc;
447 
448 	ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
449 	if (rmpp_recv->seg_num > 1)
450 		cancel_delayed_work(&rmpp_recv->timeout_work);
451 
452 	rmpp_wc = rmpp_recv->rmpp_wc;
453 	rmpp_wc->mad_len = get_mad_len(rmpp_recv);
454 	/* 10 seconds until we can find the packet lifetime */
455 	queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
456 			   &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
457 	return rmpp_wc;
458 }
459 
460 static struct ib_mad_recv_wc *
461 continue_rmpp(struct ib_mad_agent_private *agent,
462 	      struct ib_mad_recv_wc *mad_recv_wc)
463 {
464 	struct mad_rmpp_recv *rmpp_recv;
465 	struct ib_mad_recv_buf *prev_buf;
466 	struct ib_mad_recv_wc *done_wc;
467 	int seg_num;
468 	unsigned long flags;
469 
470 	rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
471 	if (!rmpp_recv)
472 		goto drop1;
473 
474 	seg_num = get_seg_num(&mad_recv_wc->recv_buf);
475 
476 	spin_lock_irqsave(&rmpp_recv->lock, flags);
477 	if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
478 	    (seg_num > rmpp_recv->newwin))
479 		goto drop3;
480 
481 	if ((seg_num <= rmpp_recv->last_ack) ||
482 	    (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
483 		spin_unlock_irqrestore(&rmpp_recv->lock, flags);
484 		ack_recv(rmpp_recv, mad_recv_wc);
485 		goto drop2;
486 	}
487 
488 	prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
489 	if (!prev_buf)
490 		goto drop3;
491 
492 	done_wc = NULL;
493 	list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
494 	if (rmpp_recv->cur_seg_buf == prev_buf) {
495 		update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
496 		if (get_last_flag(rmpp_recv->cur_seg_buf)) {
497 			rmpp_recv->state = RMPP_STATE_COMPLETE;
498 			spin_unlock_irqrestore(&rmpp_recv->lock, flags);
499 			done_wc = complete_rmpp(rmpp_recv);
500 			goto out;
501 		} else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
502 			rmpp_recv->newwin += window_size(agent);
503 			spin_unlock_irqrestore(&rmpp_recv->lock, flags);
504 			ack_recv(rmpp_recv, mad_recv_wc);
505 			goto out;
506 		}
507 	}
508 	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
509 out:
510 	deref_rmpp_recv(rmpp_recv);
511 	return done_wc;
512 
513 drop3:	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
514 drop2:	deref_rmpp_recv(rmpp_recv);
515 drop1:	ib_free_recv_mad(mad_recv_wc);
516 	return NULL;
517 }
518 
519 static struct ib_mad_recv_wc *
520 start_rmpp(struct ib_mad_agent_private *agent,
521 	   struct ib_mad_recv_wc *mad_recv_wc)
522 {
523 	struct mad_rmpp_recv *rmpp_recv;
524 	unsigned long flags;
525 
526 	rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
527 	if (!rmpp_recv) {
528 		ib_free_recv_mad(mad_recv_wc);
529 		return NULL;
530 	}
531 
532 	spin_lock_irqsave(&agent->lock, flags);
533 	if (insert_rmpp_recv(agent, rmpp_recv)) {
534 		spin_unlock_irqrestore(&agent->lock, flags);
535 		/* duplicate first MAD */
536 		destroy_rmpp_recv(rmpp_recv);
537 		return continue_rmpp(agent, mad_recv_wc);
538 	}
539 	atomic_inc(&rmpp_recv->refcount);
540 
541 	if (get_last_flag(&mad_recv_wc->recv_buf)) {
542 		rmpp_recv->state = RMPP_STATE_COMPLETE;
543 		spin_unlock_irqrestore(&agent->lock, flags);
544 		complete_rmpp(rmpp_recv);
545 	} else {
546 		spin_unlock_irqrestore(&agent->lock, flags);
547 		/* 40 seconds until we can find the packet lifetimes */
548 		queue_delayed_work(agent->qp_info->port_priv->wq,
549 				   &rmpp_recv->timeout_work,
550 				   msecs_to_jiffies(40000));
551 		rmpp_recv->newwin += window_size(agent);
552 		ack_recv(rmpp_recv, mad_recv_wc);
553 		mad_recv_wc = NULL;
554 	}
555 	deref_rmpp_recv(rmpp_recv);
556 	return mad_recv_wc;
557 }
558 
559 static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
560 {
561 	struct ib_rmpp_mad *rmpp_mad;
562 	int timeout;
563 	u32 paylen = 0;
564 
565 	rmpp_mad = mad_send_wr->send_buf.mad;
566 	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
567 	rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num);
568 
569 	if (mad_send_wr->seg_num == 1) {
570 		rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
571 		paylen = mad_send_wr->send_buf.seg_count * IB_MGMT_RMPP_DATA -
572 			 mad_send_wr->pad;
573 	}
574 
575 	if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
576 		rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
577 		paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
578 	}
579 	rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
580 
581 	/* 2 seconds for an ACK until we can find the packet lifetime */
582 	timeout = mad_send_wr->send_buf.timeout_ms;
583 	if (!timeout || timeout > 2000)
584 		mad_send_wr->timeout = msecs_to_jiffies(2000);
585 
586 	return ib_send_mad(mad_send_wr);
587 }
588 
589 static void abort_send(struct ib_mad_agent_private *agent,
590 		       struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
591 {
592 	struct ib_mad_send_wr_private *mad_send_wr;
593 	struct ib_mad_send_wc wc;
594 	unsigned long flags;
595 
596 	spin_lock_irqsave(&agent->lock, flags);
597 	mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
598 	if (!mad_send_wr)
599 		goto out;	/* Unmatched send */
600 
601 	if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
602 	    (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
603 		goto out;	/* Send is already done */
604 
605 	ib_mark_mad_done(mad_send_wr);
606 	spin_unlock_irqrestore(&agent->lock, flags);
607 
608 	wc.status = IB_WC_REM_ABORT_ERR;
609 	wc.vendor_err = rmpp_status;
610 	wc.send_buf = &mad_send_wr->send_buf;
611 	ib_mad_complete_send_wr(mad_send_wr, &wc);
612 	return;
613 out:
614 	spin_unlock_irqrestore(&agent->lock, flags);
615 }
616 
617 static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
618 				   int seg_num)
619 {
620 	struct list_head *list;
621 
622 	wr->last_ack = seg_num;
623 	list = &wr->last_ack_seg->list;
624 	list_for_each_entry(wr->last_ack_seg, list, list)
625 		if (wr->last_ack_seg->num == seg_num)
626 			break;
627 }
628 
629 static void process_ds_ack(struct ib_mad_agent_private *agent,
630 			   struct ib_mad_recv_wc *mad_recv_wc, int newwin)
631 {
632 	struct mad_rmpp_recv *rmpp_recv;
633 
634 	rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
635 	if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
636 		rmpp_recv->repwin = newwin;
637 }
638 
639 static void process_rmpp_ack(struct ib_mad_agent_private *agent,
640 			     struct ib_mad_recv_wc *mad_recv_wc)
641 {
642 	struct ib_mad_send_wr_private *mad_send_wr;
643 	struct ib_rmpp_mad *rmpp_mad;
644 	unsigned long flags;
645 	int seg_num, newwin, ret;
646 
647 	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
648 	if (rmpp_mad->rmpp_hdr.rmpp_status) {
649 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
650 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
651 		return;
652 	}
653 
654 	seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
655 	newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
656 	if (newwin < seg_num) {
657 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
658 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
659 		return;
660 	}
661 
662 	spin_lock_irqsave(&agent->lock, flags);
663 	mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
664 	if (!mad_send_wr) {
665 		if (!seg_num)
666 			process_ds_ack(agent, mad_recv_wc, newwin);
667 		goto out;	/* Unmatched or DS RMPP ACK */
668 	}
669 
670 	if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
671 	    (mad_send_wr->timeout)) {
672 		spin_unlock_irqrestore(&agent->lock, flags);
673 		ack_ds_ack(agent, mad_recv_wc);
674 		return;		/* Repeated ACK for DS RMPP transaction */
675 	}
676 
677 	if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
678 	    (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
679 		goto out;	/* Send is already done */
680 
681 	if (seg_num > mad_send_wr->send_buf.seg_count ||
682 	    seg_num > mad_send_wr->newwin) {
683 		spin_unlock_irqrestore(&agent->lock, flags);
684 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
685 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
686 		return;
687 	}
688 
689 	if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
690 		goto out;	/* Old ACK */
691 
692 	if (seg_num > mad_send_wr->last_ack) {
693 		adjust_last_ack(mad_send_wr, seg_num);
694 		mad_send_wr->retries_left = mad_send_wr->max_retries;
695 	}
696 	mad_send_wr->newwin = newwin;
697 	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
698 		/* If no response is expected, the ACK completes the send */
699 		if (!mad_send_wr->send_buf.timeout_ms) {
700 			struct ib_mad_send_wc wc;
701 
702 			ib_mark_mad_done(mad_send_wr);
703 			spin_unlock_irqrestore(&agent->lock, flags);
704 
705 			wc.status = IB_WC_SUCCESS;
706 			wc.vendor_err = 0;
707 			wc.send_buf = &mad_send_wr->send_buf;
708 			ib_mad_complete_send_wr(mad_send_wr, &wc);
709 			return;
710 		}
711 		if (mad_send_wr->refcount == 1)
712 			ib_reset_mad_timeout(mad_send_wr,
713 					     mad_send_wr->send_buf.timeout_ms);
714 		spin_unlock_irqrestore(&agent->lock, flags);
715 		ack_ds_ack(agent, mad_recv_wc);
716 		return;
717 	} else if (mad_send_wr->refcount == 1 &&
718 		   mad_send_wr->seg_num < mad_send_wr->newwin &&
719 		   mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
720 		/* Send failure will just result in a timeout/retry */
721 		ret = send_next_seg(mad_send_wr);
722 		if (ret)
723 			goto out;
724 
725 		mad_send_wr->refcount++;
726 		list_move_tail(&mad_send_wr->agent_list,
727 			      &mad_send_wr->mad_agent_priv->send_list);
728 	}
729 out:
730 	spin_unlock_irqrestore(&agent->lock, flags);
731 }
732 
733 static struct ib_mad_recv_wc *
734 process_rmpp_data(struct ib_mad_agent_private *agent,
735 		  struct ib_mad_recv_wc *mad_recv_wc)
736 {
737 	struct ib_rmpp_hdr *rmpp_hdr;
738 	u8 rmpp_status;
739 
740 	rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
741 
742 	if (rmpp_hdr->rmpp_status) {
743 		rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
744 		goto bad;
745 	}
746 
747 	if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
748 		if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
749 			rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
750 			goto bad;
751 		}
752 		return start_rmpp(agent, mad_recv_wc);
753 	} else {
754 		if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
755 			rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
756 			goto bad;
757 		}
758 		return continue_rmpp(agent, mad_recv_wc);
759 	}
760 bad:
761 	nack_recv(agent, mad_recv_wc, rmpp_status);
762 	ib_free_recv_mad(mad_recv_wc);
763 	return NULL;
764 }
765 
766 static void process_rmpp_stop(struct ib_mad_agent_private *agent,
767 			      struct ib_mad_recv_wc *mad_recv_wc)
768 {
769 	struct ib_rmpp_mad *rmpp_mad;
770 
771 	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
772 
773 	if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
774 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
775 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
776 	} else
777 		abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
778 }
779 
780 static void process_rmpp_abort(struct ib_mad_agent_private *agent,
781 			       struct ib_mad_recv_wc *mad_recv_wc)
782 {
783 	struct ib_rmpp_mad *rmpp_mad;
784 
785 	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
786 
787 	if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
788 	    rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
789 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
790 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
791 	} else
792 		abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
793 }
794 
795 struct ib_mad_recv_wc *
796 ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
797 			struct ib_mad_recv_wc *mad_recv_wc)
798 {
799 	struct ib_rmpp_mad *rmpp_mad;
800 
801 	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
802 	if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
803 		return mad_recv_wc;
804 
805 	if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
806 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
807 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
808 		goto out;
809 	}
810 
811 	switch (rmpp_mad->rmpp_hdr.rmpp_type) {
812 	case IB_MGMT_RMPP_TYPE_DATA:
813 		return process_rmpp_data(agent, mad_recv_wc);
814 	case IB_MGMT_RMPP_TYPE_ACK:
815 		process_rmpp_ack(agent, mad_recv_wc);
816 		break;
817 	case IB_MGMT_RMPP_TYPE_STOP:
818 		process_rmpp_stop(agent, mad_recv_wc);
819 		break;
820 	case IB_MGMT_RMPP_TYPE_ABORT:
821 		process_rmpp_abort(agent, mad_recv_wc);
822 		break;
823 	default:
824 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
825 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
826 		break;
827 	}
828 out:
829 	ib_free_recv_mad(mad_recv_wc);
830 	return NULL;
831 }
832 
833 static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
834 {
835 	struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
836 	struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
837 	struct mad_rmpp_recv *rmpp_recv;
838 	struct ib_ah_attr ah_attr;
839 	unsigned long flags;
840 	int newwin = 1;
841 
842 	if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
843 		goto out;
844 
845 	spin_lock_irqsave(&agent->lock, flags);
846 	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
847 		if (rmpp_recv->tid != mad_hdr->tid ||
848 		    rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
849 		    rmpp_recv->class_version != mad_hdr->class_version ||
850 		    (rmpp_recv->method & IB_MGMT_METHOD_RESP))
851 			continue;
852 
853 		if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
854 			continue;
855 
856 		if (rmpp_recv->slid == ah_attr.dlid) {
857 			newwin = rmpp_recv->repwin;
858 			break;
859 		}
860 	}
861 	spin_unlock_irqrestore(&agent->lock, flags);
862 out:
863 	return newwin;
864 }
865 
866 int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
867 {
868 	struct ib_rmpp_mad *rmpp_mad;
869 	int ret;
870 
871 	rmpp_mad = mad_send_wr->send_buf.mad;
872 	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
873 	      IB_MGMT_RMPP_FLAG_ACTIVE))
874 		return IB_RMPP_RESULT_UNHANDLED;
875 
876 	if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
877 		mad_send_wr->seg_num = 1;
878 		return IB_RMPP_RESULT_INTERNAL;
879 	}
880 
881 	mad_send_wr->newwin = init_newwin(mad_send_wr);
882 
883 	/* We need to wait for the final ACK even if there isn't a response */
884 	mad_send_wr->refcount += (mad_send_wr->timeout == 0);
885 	ret = send_next_seg(mad_send_wr);
886 	if (!ret)
887 		return IB_RMPP_RESULT_CONSUMED;
888 	return ret;
889 }
890 
891 int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
892 			    struct ib_mad_send_wc *mad_send_wc)
893 {
894 	struct ib_rmpp_mad *rmpp_mad;
895 	int ret;
896 
897 	rmpp_mad = mad_send_wr->send_buf.mad;
898 	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
899 	      IB_MGMT_RMPP_FLAG_ACTIVE))
900 		return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
901 
902 	if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
903 		return IB_RMPP_RESULT_INTERNAL;	 /* ACK, STOP, or ABORT */
904 
905 	if (mad_send_wc->status != IB_WC_SUCCESS ||
906 	    mad_send_wr->status != IB_WC_SUCCESS)
907 		return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
908 
909 	if (!mad_send_wr->timeout)
910 		return IB_RMPP_RESULT_PROCESSED; /* Response received */
911 
912 	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
913 		mad_send_wr->timeout =
914 			msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
915 		return IB_RMPP_RESULT_PROCESSED; /* Send done */
916 	}
917 
918 	if (mad_send_wr->seg_num == mad_send_wr->newwin ||
919 	    mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count)
920 		return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
921 
922 	ret = send_next_seg(mad_send_wr);
923 	if (ret) {
924 		mad_send_wc->status = IB_WC_GENERAL_ERR;
925 		return IB_RMPP_RESULT_PROCESSED;
926 	}
927 	return IB_RMPP_RESULT_CONSUMED;
928 }
929 
930 int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
931 {
932 	struct ib_rmpp_mad *rmpp_mad;
933 	int ret;
934 
935 	rmpp_mad = mad_send_wr->send_buf.mad;
936 	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
937 	      IB_MGMT_RMPP_FLAG_ACTIVE))
938 		return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
939 
940 	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count)
941 		return IB_RMPP_RESULT_PROCESSED;
942 
943 	mad_send_wr->seg_num = mad_send_wr->last_ack;
944 	mad_send_wr->cur_seg = mad_send_wr->last_ack_seg;
945 
946 	ret = send_next_seg(mad_send_wr);
947 	if (ret)
948 		return IB_RMPP_RESULT_PROCESSED;
949 
950 	return IB_RMPP_RESULT_CONSUMED;
951 }
952