1 /*
2  * Copyright (c) 2005 Intel Inc. All rights reserved.
3  * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include "mad_priv.h"
35 #include "mad_rmpp.h"
36 
37 enum rmpp_state {
38 	RMPP_STATE_ACTIVE,
39 	RMPP_STATE_TIMEOUT,
40 	RMPP_STATE_COMPLETE
41 };
42 
43 struct mad_rmpp_recv {
44 	struct ib_mad_agent_private *agent;
45 	struct list_head list;
46 	struct delayed_work timeout_work;
47 	struct delayed_work cleanup_work;
48 	struct completion comp;
49 	enum rmpp_state state;
50 	spinlock_t lock;
51 	atomic_t refcount;
52 
53 	struct ib_ah *ah;
54 	struct ib_mad_recv_wc *rmpp_wc;
55 	struct ib_mad_recv_buf *cur_seg_buf;
56 	int last_ack;
57 	int seg_num;
58 	int newwin;
59 	int repwin;
60 
61 	__be64 tid;
62 	u32 src_qp;
63 	u16 slid;
64 	u8 mgmt_class;
65 	u8 class_version;
66 	u8 method;
67 };
68 
69 static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
70 {
71 	if (atomic_dec_and_test(&rmpp_recv->refcount))
72 		complete(&rmpp_recv->comp);
73 }
74 
75 static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
76 {
77 	deref_rmpp_recv(rmpp_recv);
78 	wait_for_completion(&rmpp_recv->comp);
79 	ib_destroy_ah(rmpp_recv->ah);
80 	kfree(rmpp_recv);
81 }
82 
83 void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
84 {
85 	struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
86 	unsigned long flags;
87 
88 	spin_lock_irqsave(&agent->lock, flags);
89 	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
90 		cancel_delayed_work(&rmpp_recv->timeout_work);
91 		cancel_delayed_work(&rmpp_recv->cleanup_work);
92 	}
93 	spin_unlock_irqrestore(&agent->lock, flags);
94 
95 	flush_workqueue(agent->qp_info->port_priv->wq);
96 
97 	list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
98 				 &agent->rmpp_list, list) {
99 		list_del(&rmpp_recv->list);
100 		if (rmpp_recv->state != RMPP_STATE_COMPLETE)
101 			ib_free_recv_mad(rmpp_recv->rmpp_wc);
102 		destroy_rmpp_recv(rmpp_recv);
103 	}
104 }
105 
106 static void format_ack(struct ib_mad_send_buf *msg,
107 		       struct ib_rmpp_mad *data,
108 		       struct mad_rmpp_recv *rmpp_recv)
109 {
110 	struct ib_rmpp_mad *ack = msg->mad;
111 	unsigned long flags;
112 
113 	memcpy(ack, &data->mad_hdr, msg->hdr_len);
114 
115 	ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
116 	ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
117 	ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
118 
119 	spin_lock_irqsave(&rmpp_recv->lock, flags);
120 	rmpp_recv->last_ack = rmpp_recv->seg_num;
121 	ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
122 	ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
123 	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
124 }
125 
126 static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
127 		     struct ib_mad_recv_wc *recv_wc)
128 {
129 	struct ib_mad_send_buf *msg;
130 	int ret, hdr_len;
131 
132 	hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
133 	msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
134 				 recv_wc->wc->pkey_index, 1, hdr_len,
135 				 0, GFP_KERNEL);
136 	if (IS_ERR(msg))
137 		return;
138 
139 	format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
140 	msg->ah = rmpp_recv->ah;
141 	ret = ib_post_send_mad(msg, NULL);
142 	if (ret)
143 		ib_free_send_mad(msg);
144 }
145 
146 static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
147 						  struct ib_mad_recv_wc *recv_wc)
148 {
149 	struct ib_mad_send_buf *msg;
150 	struct ib_ah *ah;
151 	int hdr_len;
152 
153 	ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
154 				  recv_wc->recv_buf.grh, agent->port_num);
155 	if (IS_ERR(ah))
156 		return (void *) ah;
157 
158 	hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
159 	msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
160 				 recv_wc->wc->pkey_index, 1,
161 				 hdr_len, 0, GFP_KERNEL);
162 	if (IS_ERR(msg))
163 		ib_destroy_ah(ah);
164 	else {
165 		msg->ah = ah;
166 		msg->context[0] = ah;
167 	}
168 
169 	return msg;
170 }
171 
172 static void ack_ds_ack(struct ib_mad_agent_private *agent,
173 		       struct ib_mad_recv_wc *recv_wc)
174 {
175 	struct ib_mad_send_buf *msg;
176 	struct ib_rmpp_mad *rmpp_mad;
177 	int ret;
178 
179 	msg = alloc_response_msg(&agent->agent, recv_wc);
180 	if (IS_ERR(msg))
181 		return;
182 
183 	rmpp_mad = msg->mad;
184 	memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
185 
186 	rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
187 	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
188 	rmpp_mad->rmpp_hdr.seg_num = 0;
189 	rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
190 
191 	ret = ib_post_send_mad(msg, NULL);
192 	if (ret) {
193 		ib_destroy_ah(msg->ah);
194 		ib_free_send_mad(msg);
195 	}
196 }
197 
198 void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
199 {
200 	if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah)
201 		ib_destroy_ah(mad_send_wc->send_buf->ah);
202 	ib_free_send_mad(mad_send_wc->send_buf);
203 }
204 
205 static void nack_recv(struct ib_mad_agent_private *agent,
206 		      struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
207 {
208 	struct ib_mad_send_buf *msg;
209 	struct ib_rmpp_mad *rmpp_mad;
210 	int ret;
211 
212 	msg = alloc_response_msg(&agent->agent, recv_wc);
213 	if (IS_ERR(msg))
214 		return;
215 
216 	rmpp_mad = msg->mad;
217 	memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
218 
219 	rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
220 	rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
221 	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
222 	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
223 	rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
224 	rmpp_mad->rmpp_hdr.seg_num = 0;
225 	rmpp_mad->rmpp_hdr.paylen_newwin = 0;
226 
227 	ret = ib_post_send_mad(msg, NULL);
228 	if (ret) {
229 		ib_destroy_ah(msg->ah);
230 		ib_free_send_mad(msg);
231 	}
232 }
233 
234 static void recv_timeout_handler(struct work_struct *work)
235 {
236 	struct mad_rmpp_recv *rmpp_recv =
237 		container_of(work, struct mad_rmpp_recv, timeout_work.work);
238 	struct ib_mad_recv_wc *rmpp_wc;
239 	unsigned long flags;
240 
241 	spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
242 	if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
243 		spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
244 		return;
245 	}
246 	rmpp_recv->state = RMPP_STATE_TIMEOUT;
247 	list_del(&rmpp_recv->list);
248 	spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
249 
250 	rmpp_wc = rmpp_recv->rmpp_wc;
251 	nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
252 	destroy_rmpp_recv(rmpp_recv);
253 	ib_free_recv_mad(rmpp_wc);
254 }
255 
256 static void recv_cleanup_handler(struct work_struct *work)
257 {
258 	struct mad_rmpp_recv *rmpp_recv =
259 		container_of(work, struct mad_rmpp_recv, cleanup_work.work);
260 	unsigned long flags;
261 
262 	spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
263 	list_del(&rmpp_recv->list);
264 	spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
265 	destroy_rmpp_recv(rmpp_recv);
266 }
267 
268 static struct mad_rmpp_recv *
269 create_rmpp_recv(struct ib_mad_agent_private *agent,
270 		 struct ib_mad_recv_wc *mad_recv_wc)
271 {
272 	struct mad_rmpp_recv *rmpp_recv;
273 	struct ib_mad_hdr *mad_hdr;
274 
275 	rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
276 	if (!rmpp_recv)
277 		return NULL;
278 
279 	rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
280 					     mad_recv_wc->wc,
281 					     mad_recv_wc->recv_buf.grh,
282 					     agent->agent.port_num);
283 	if (IS_ERR(rmpp_recv->ah))
284 		goto error;
285 
286 	rmpp_recv->agent = agent;
287 	init_completion(&rmpp_recv->comp);
288 	INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
289 	INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
290 	spin_lock_init(&rmpp_recv->lock);
291 	rmpp_recv->state = RMPP_STATE_ACTIVE;
292 	atomic_set(&rmpp_recv->refcount, 1);
293 
294 	rmpp_recv->rmpp_wc = mad_recv_wc;
295 	rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
296 	rmpp_recv->newwin = 1;
297 	rmpp_recv->seg_num = 1;
298 	rmpp_recv->last_ack = 0;
299 	rmpp_recv->repwin = 1;
300 
301 	mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
302 	rmpp_recv->tid = mad_hdr->tid;
303 	rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
304 	rmpp_recv->slid = mad_recv_wc->wc->slid;
305 	rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
306 	rmpp_recv->class_version = mad_hdr->class_version;
307 	rmpp_recv->method  = mad_hdr->method;
308 	return rmpp_recv;
309 
310 error:	kfree(rmpp_recv);
311 	return NULL;
312 }
313 
314 static struct mad_rmpp_recv *
315 find_rmpp_recv(struct ib_mad_agent_private *agent,
316 	       struct ib_mad_recv_wc *mad_recv_wc)
317 {
318 	struct mad_rmpp_recv *rmpp_recv;
319 	struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
320 
321 	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
322 		if (rmpp_recv->tid == mad_hdr->tid &&
323 		    rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
324 		    rmpp_recv->slid == mad_recv_wc->wc->slid &&
325 		    rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
326 		    rmpp_recv->class_version == mad_hdr->class_version &&
327 		    rmpp_recv->method == mad_hdr->method)
328 			return rmpp_recv;
329 	}
330 	return NULL;
331 }
332 
333 static struct mad_rmpp_recv *
334 acquire_rmpp_recv(struct ib_mad_agent_private *agent,
335 		  struct ib_mad_recv_wc *mad_recv_wc)
336 {
337 	struct mad_rmpp_recv *rmpp_recv;
338 	unsigned long flags;
339 
340 	spin_lock_irqsave(&agent->lock, flags);
341 	rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
342 	if (rmpp_recv)
343 		atomic_inc(&rmpp_recv->refcount);
344 	spin_unlock_irqrestore(&agent->lock, flags);
345 	return rmpp_recv;
346 }
347 
348 static struct mad_rmpp_recv *
349 insert_rmpp_recv(struct ib_mad_agent_private *agent,
350 		 struct mad_rmpp_recv *rmpp_recv)
351 {
352 	struct mad_rmpp_recv *cur_rmpp_recv;
353 
354 	cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
355 	if (!cur_rmpp_recv)
356 		list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
357 
358 	return cur_rmpp_recv;
359 }
360 
361 static inline int get_last_flag(struct ib_mad_recv_buf *seg)
362 {
363 	struct ib_rmpp_mad *rmpp_mad;
364 
365 	rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
366 	return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
367 }
368 
369 static inline int get_seg_num(struct ib_mad_recv_buf *seg)
370 {
371 	struct ib_rmpp_mad *rmpp_mad;
372 
373 	rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
374 	return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
375 }
376 
377 static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
378 						    struct ib_mad_recv_buf *seg)
379 {
380 	if (seg->list.next == rmpp_list)
381 		return NULL;
382 
383 	return container_of(seg->list.next, struct ib_mad_recv_buf, list);
384 }
385 
386 static inline int window_size(struct ib_mad_agent_private *agent)
387 {
388 	return max(agent->qp_info->recv_queue.max_active >> 3, 1);
389 }
390 
391 static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
392 						  int seg_num)
393 {
394 	struct ib_mad_recv_buf *seg_buf;
395 	int cur_seg_num;
396 
397 	list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
398 		cur_seg_num = get_seg_num(seg_buf);
399 		if (seg_num > cur_seg_num)
400 			return seg_buf;
401 		if (seg_num == cur_seg_num)
402 			break;
403 	}
404 	return NULL;
405 }
406 
407 static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
408 			   struct ib_mad_recv_buf *new_buf)
409 {
410 	struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
411 
412 	while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
413 		rmpp_recv->cur_seg_buf = new_buf;
414 		rmpp_recv->seg_num++;
415 		new_buf = get_next_seg(rmpp_list, new_buf);
416 	}
417 }
418 
419 static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
420 {
421 	struct ib_rmpp_mad *rmpp_mad;
422 	int hdr_size, data_size, pad;
423 
424 	rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
425 
426 	hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
427 	data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
428 	pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
429 	if (pad > IB_MGMT_RMPP_DATA || pad < 0)
430 		pad = 0;
431 
432 	return hdr_size + rmpp_recv->seg_num * data_size - pad;
433 }
434 
435 static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
436 {
437 	struct ib_mad_recv_wc *rmpp_wc;
438 
439 	ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
440 	if (rmpp_recv->seg_num > 1)
441 		cancel_delayed_work(&rmpp_recv->timeout_work);
442 
443 	rmpp_wc = rmpp_recv->rmpp_wc;
444 	rmpp_wc->mad_len = get_mad_len(rmpp_recv);
445 	/* 10 seconds until we can find the packet lifetime */
446 	queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
447 			   &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
448 	return rmpp_wc;
449 }
450 
451 static struct ib_mad_recv_wc *
452 continue_rmpp(struct ib_mad_agent_private *agent,
453 	      struct ib_mad_recv_wc *mad_recv_wc)
454 {
455 	struct mad_rmpp_recv *rmpp_recv;
456 	struct ib_mad_recv_buf *prev_buf;
457 	struct ib_mad_recv_wc *done_wc;
458 	int seg_num;
459 	unsigned long flags;
460 
461 	rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
462 	if (!rmpp_recv)
463 		goto drop1;
464 
465 	seg_num = get_seg_num(&mad_recv_wc->recv_buf);
466 
467 	spin_lock_irqsave(&rmpp_recv->lock, flags);
468 	if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
469 	    (seg_num > rmpp_recv->newwin))
470 		goto drop3;
471 
472 	if ((seg_num <= rmpp_recv->last_ack) ||
473 	    (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
474 		spin_unlock_irqrestore(&rmpp_recv->lock, flags);
475 		ack_recv(rmpp_recv, mad_recv_wc);
476 		goto drop2;
477 	}
478 
479 	prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
480 	if (!prev_buf)
481 		goto drop3;
482 
483 	done_wc = NULL;
484 	list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
485 	if (rmpp_recv->cur_seg_buf == prev_buf) {
486 		update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
487 		if (get_last_flag(rmpp_recv->cur_seg_buf)) {
488 			rmpp_recv->state = RMPP_STATE_COMPLETE;
489 			spin_unlock_irqrestore(&rmpp_recv->lock, flags);
490 			done_wc = complete_rmpp(rmpp_recv);
491 			goto out;
492 		} else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
493 			rmpp_recv->newwin += window_size(agent);
494 			spin_unlock_irqrestore(&rmpp_recv->lock, flags);
495 			ack_recv(rmpp_recv, mad_recv_wc);
496 			goto out;
497 		}
498 	}
499 	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
500 out:
501 	deref_rmpp_recv(rmpp_recv);
502 	return done_wc;
503 
504 drop3:	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
505 drop2:	deref_rmpp_recv(rmpp_recv);
506 drop1:	ib_free_recv_mad(mad_recv_wc);
507 	return NULL;
508 }
509 
510 static struct ib_mad_recv_wc *
511 start_rmpp(struct ib_mad_agent_private *agent,
512 	   struct ib_mad_recv_wc *mad_recv_wc)
513 {
514 	struct mad_rmpp_recv *rmpp_recv;
515 	unsigned long flags;
516 
517 	rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
518 	if (!rmpp_recv) {
519 		ib_free_recv_mad(mad_recv_wc);
520 		return NULL;
521 	}
522 
523 	spin_lock_irqsave(&agent->lock, flags);
524 	if (insert_rmpp_recv(agent, rmpp_recv)) {
525 		spin_unlock_irqrestore(&agent->lock, flags);
526 		/* duplicate first MAD */
527 		destroy_rmpp_recv(rmpp_recv);
528 		return continue_rmpp(agent, mad_recv_wc);
529 	}
530 	atomic_inc(&rmpp_recv->refcount);
531 
532 	if (get_last_flag(&mad_recv_wc->recv_buf)) {
533 		rmpp_recv->state = RMPP_STATE_COMPLETE;
534 		spin_unlock_irqrestore(&agent->lock, flags);
535 		complete_rmpp(rmpp_recv);
536 	} else {
537 		spin_unlock_irqrestore(&agent->lock, flags);
538 		/* 40 seconds until we can find the packet lifetimes */
539 		queue_delayed_work(agent->qp_info->port_priv->wq,
540 				   &rmpp_recv->timeout_work,
541 				   msecs_to_jiffies(40000));
542 		rmpp_recv->newwin += window_size(agent);
543 		ack_recv(rmpp_recv, mad_recv_wc);
544 		mad_recv_wc = NULL;
545 	}
546 	deref_rmpp_recv(rmpp_recv);
547 	return mad_recv_wc;
548 }
549 
550 static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
551 {
552 	struct ib_rmpp_mad *rmpp_mad;
553 	int timeout;
554 	u32 paylen = 0;
555 
556 	rmpp_mad = mad_send_wr->send_buf.mad;
557 	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
558 	rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num);
559 
560 	if (mad_send_wr->seg_num == 1) {
561 		rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
562 		paylen = mad_send_wr->send_buf.seg_count * IB_MGMT_RMPP_DATA -
563 			 mad_send_wr->pad;
564 	}
565 
566 	if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
567 		rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
568 		paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
569 	}
570 	rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
571 
572 	/* 2 seconds for an ACK until we can find the packet lifetime */
573 	timeout = mad_send_wr->send_buf.timeout_ms;
574 	if (!timeout || timeout > 2000)
575 		mad_send_wr->timeout = msecs_to_jiffies(2000);
576 
577 	return ib_send_mad(mad_send_wr);
578 }
579 
580 static void abort_send(struct ib_mad_agent_private *agent,
581 		       struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
582 {
583 	struct ib_mad_send_wr_private *mad_send_wr;
584 	struct ib_mad_send_wc wc;
585 	unsigned long flags;
586 
587 	spin_lock_irqsave(&agent->lock, flags);
588 	mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
589 	if (!mad_send_wr)
590 		goto out;	/* Unmatched send */
591 
592 	if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
593 	    (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
594 		goto out;	/* Send is already done */
595 
596 	ib_mark_mad_done(mad_send_wr);
597 	spin_unlock_irqrestore(&agent->lock, flags);
598 
599 	wc.status = IB_WC_REM_ABORT_ERR;
600 	wc.vendor_err = rmpp_status;
601 	wc.send_buf = &mad_send_wr->send_buf;
602 	ib_mad_complete_send_wr(mad_send_wr, &wc);
603 	return;
604 out:
605 	spin_unlock_irqrestore(&agent->lock, flags);
606 }
607 
608 static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
609 				   int seg_num)
610 {
611 	struct list_head *list;
612 
613 	wr->last_ack = seg_num;
614 	list = &wr->last_ack_seg->list;
615 	list_for_each_entry(wr->last_ack_seg, list, list)
616 		if (wr->last_ack_seg->num == seg_num)
617 			break;
618 }
619 
620 static void process_ds_ack(struct ib_mad_agent_private *agent,
621 			   struct ib_mad_recv_wc *mad_recv_wc, int newwin)
622 {
623 	struct mad_rmpp_recv *rmpp_recv;
624 
625 	rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
626 	if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
627 		rmpp_recv->repwin = newwin;
628 }
629 
630 static void process_rmpp_ack(struct ib_mad_agent_private *agent,
631 			     struct ib_mad_recv_wc *mad_recv_wc)
632 {
633 	struct ib_mad_send_wr_private *mad_send_wr;
634 	struct ib_rmpp_mad *rmpp_mad;
635 	unsigned long flags;
636 	int seg_num, newwin, ret;
637 
638 	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
639 	if (rmpp_mad->rmpp_hdr.rmpp_status) {
640 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
641 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
642 		return;
643 	}
644 
645 	seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
646 	newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
647 	if (newwin < seg_num) {
648 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
649 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
650 		return;
651 	}
652 
653 	spin_lock_irqsave(&agent->lock, flags);
654 	mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
655 	if (!mad_send_wr) {
656 		if (!seg_num)
657 			process_ds_ack(agent, mad_recv_wc, newwin);
658 		goto out;	/* Unmatched or DS RMPP ACK */
659 	}
660 
661 	if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
662 	    (mad_send_wr->timeout)) {
663 		spin_unlock_irqrestore(&agent->lock, flags);
664 		ack_ds_ack(agent, mad_recv_wc);
665 		return;		/* Repeated ACK for DS RMPP transaction */
666 	}
667 
668 	if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
669 	    (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
670 		goto out;	/* Send is already done */
671 
672 	if (seg_num > mad_send_wr->send_buf.seg_count ||
673 	    seg_num > mad_send_wr->newwin) {
674 		spin_unlock_irqrestore(&agent->lock, flags);
675 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
676 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
677 		return;
678 	}
679 
680 	if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
681 		goto out;	/* Old ACK */
682 
683 	if (seg_num > mad_send_wr->last_ack) {
684 		adjust_last_ack(mad_send_wr, seg_num);
685 		mad_send_wr->retries_left = mad_send_wr->max_retries;
686 	}
687 	mad_send_wr->newwin = newwin;
688 	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
689 		/* If no response is expected, the ACK completes the send */
690 		if (!mad_send_wr->send_buf.timeout_ms) {
691 			struct ib_mad_send_wc wc;
692 
693 			ib_mark_mad_done(mad_send_wr);
694 			spin_unlock_irqrestore(&agent->lock, flags);
695 
696 			wc.status = IB_WC_SUCCESS;
697 			wc.vendor_err = 0;
698 			wc.send_buf = &mad_send_wr->send_buf;
699 			ib_mad_complete_send_wr(mad_send_wr, &wc);
700 			return;
701 		}
702 		if (mad_send_wr->refcount == 1)
703 			ib_reset_mad_timeout(mad_send_wr,
704 					     mad_send_wr->send_buf.timeout_ms);
705 		spin_unlock_irqrestore(&agent->lock, flags);
706 		ack_ds_ack(agent, mad_recv_wc);
707 		return;
708 	} else if (mad_send_wr->refcount == 1 &&
709 		   mad_send_wr->seg_num < mad_send_wr->newwin &&
710 		   mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
711 		/* Send failure will just result in a timeout/retry */
712 		ret = send_next_seg(mad_send_wr);
713 		if (ret)
714 			goto out;
715 
716 		mad_send_wr->refcount++;
717 		list_move_tail(&mad_send_wr->agent_list,
718 			      &mad_send_wr->mad_agent_priv->send_list);
719 	}
720 out:
721 	spin_unlock_irqrestore(&agent->lock, flags);
722 }
723 
724 static struct ib_mad_recv_wc *
725 process_rmpp_data(struct ib_mad_agent_private *agent,
726 		  struct ib_mad_recv_wc *mad_recv_wc)
727 {
728 	struct ib_rmpp_hdr *rmpp_hdr;
729 	u8 rmpp_status;
730 
731 	rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
732 
733 	if (rmpp_hdr->rmpp_status) {
734 		rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
735 		goto bad;
736 	}
737 
738 	if (rmpp_hdr->seg_num == __constant_htonl(1)) {
739 		if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
740 			rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
741 			goto bad;
742 		}
743 		return start_rmpp(agent, mad_recv_wc);
744 	} else {
745 		if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
746 			rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
747 			goto bad;
748 		}
749 		return continue_rmpp(agent, mad_recv_wc);
750 	}
751 bad:
752 	nack_recv(agent, mad_recv_wc, rmpp_status);
753 	ib_free_recv_mad(mad_recv_wc);
754 	return NULL;
755 }
756 
757 static void process_rmpp_stop(struct ib_mad_agent_private *agent,
758 			      struct ib_mad_recv_wc *mad_recv_wc)
759 {
760 	struct ib_rmpp_mad *rmpp_mad;
761 
762 	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
763 
764 	if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
765 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
766 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
767 	} else
768 		abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
769 }
770 
771 static void process_rmpp_abort(struct ib_mad_agent_private *agent,
772 			       struct ib_mad_recv_wc *mad_recv_wc)
773 {
774 	struct ib_rmpp_mad *rmpp_mad;
775 
776 	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
777 
778 	if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
779 	    rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
780 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
781 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
782 	} else
783 		abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
784 }
785 
786 struct ib_mad_recv_wc *
787 ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
788 			struct ib_mad_recv_wc *mad_recv_wc)
789 {
790 	struct ib_rmpp_mad *rmpp_mad;
791 
792 	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
793 	if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
794 		return mad_recv_wc;
795 
796 	if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
797 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
798 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
799 		goto out;
800 	}
801 
802 	switch (rmpp_mad->rmpp_hdr.rmpp_type) {
803 	case IB_MGMT_RMPP_TYPE_DATA:
804 		return process_rmpp_data(agent, mad_recv_wc);
805 	case IB_MGMT_RMPP_TYPE_ACK:
806 		process_rmpp_ack(agent, mad_recv_wc);
807 		break;
808 	case IB_MGMT_RMPP_TYPE_STOP:
809 		process_rmpp_stop(agent, mad_recv_wc);
810 		break;
811 	case IB_MGMT_RMPP_TYPE_ABORT:
812 		process_rmpp_abort(agent, mad_recv_wc);
813 		break;
814 	default:
815 		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
816 		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
817 		break;
818 	}
819 out:
820 	ib_free_recv_mad(mad_recv_wc);
821 	return NULL;
822 }
823 
824 static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
825 {
826 	struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
827 	struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
828 	struct mad_rmpp_recv *rmpp_recv;
829 	struct ib_ah_attr ah_attr;
830 	unsigned long flags;
831 	int newwin = 1;
832 
833 	if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
834 		goto out;
835 
836 	spin_lock_irqsave(&agent->lock, flags);
837 	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
838 		if (rmpp_recv->tid != mad_hdr->tid ||
839 		    rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
840 		    rmpp_recv->class_version != mad_hdr->class_version ||
841 		    (rmpp_recv->method & IB_MGMT_METHOD_RESP))
842 			continue;
843 
844 		if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
845 			continue;
846 
847 		if (rmpp_recv->slid == ah_attr.dlid) {
848 			newwin = rmpp_recv->repwin;
849 			break;
850 		}
851 	}
852 	spin_unlock_irqrestore(&agent->lock, flags);
853 out:
854 	return newwin;
855 }
856 
857 int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
858 {
859 	struct ib_rmpp_mad *rmpp_mad;
860 	int ret;
861 
862 	rmpp_mad = mad_send_wr->send_buf.mad;
863 	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
864 	      IB_MGMT_RMPP_FLAG_ACTIVE))
865 		return IB_RMPP_RESULT_UNHANDLED;
866 
867 	if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
868 		mad_send_wr->seg_num = 1;
869 		return IB_RMPP_RESULT_INTERNAL;
870 	}
871 
872 	mad_send_wr->newwin = init_newwin(mad_send_wr);
873 
874 	/* We need to wait for the final ACK even if there isn't a response */
875 	mad_send_wr->refcount += (mad_send_wr->timeout == 0);
876 	ret = send_next_seg(mad_send_wr);
877 	if (!ret)
878 		return IB_RMPP_RESULT_CONSUMED;
879 	return ret;
880 }
881 
882 int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
883 			    struct ib_mad_send_wc *mad_send_wc)
884 {
885 	struct ib_rmpp_mad *rmpp_mad;
886 	int ret;
887 
888 	rmpp_mad = mad_send_wr->send_buf.mad;
889 	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
890 	      IB_MGMT_RMPP_FLAG_ACTIVE))
891 		return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
892 
893 	if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
894 		return IB_RMPP_RESULT_INTERNAL;	 /* ACK, STOP, or ABORT */
895 
896 	if (mad_send_wc->status != IB_WC_SUCCESS ||
897 	    mad_send_wr->status != IB_WC_SUCCESS)
898 		return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
899 
900 	if (!mad_send_wr->timeout)
901 		return IB_RMPP_RESULT_PROCESSED; /* Response received */
902 
903 	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
904 		mad_send_wr->timeout =
905 			msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
906 		return IB_RMPP_RESULT_PROCESSED; /* Send done */
907 	}
908 
909 	if (mad_send_wr->seg_num == mad_send_wr->newwin ||
910 	    mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count)
911 		return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
912 
913 	ret = send_next_seg(mad_send_wr);
914 	if (ret) {
915 		mad_send_wc->status = IB_WC_GENERAL_ERR;
916 		return IB_RMPP_RESULT_PROCESSED;
917 	}
918 	return IB_RMPP_RESULT_CONSUMED;
919 }
920 
921 int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
922 {
923 	struct ib_rmpp_mad *rmpp_mad;
924 	int ret;
925 
926 	rmpp_mad = mad_send_wr->send_buf.mad;
927 	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
928 	      IB_MGMT_RMPP_FLAG_ACTIVE))
929 		return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
930 
931 	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count)
932 		return IB_RMPP_RESULT_PROCESSED;
933 
934 	mad_send_wr->seg_num = mad_send_wr->last_ack;
935 	mad_send_wr->cur_seg = mad_send_wr->last_ack_seg;
936 
937 	ret = send_next_seg(mad_send_wr);
938 	if (ret)
939 		return IB_RMPP_RESULT_PROCESSED;
940 
941 	return IB_RMPP_RESULT_CONSUMED;
942 }
943