xref: /openbmc/linux/drivers/infiniband/core/mad_rmpp.c (revision 8be98d2f2a0a262f8bf8a0bc1fdf522b3c7aab17)
1  /*
2   * Copyright (c) 2005 Intel Inc. All rights reserved.
3   * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
4   * Copyright (c) 2014 Intel Corporation.  All rights reserved.
5   *
6   * This software is available to you under a choice of one of two
7   * licenses.  You may choose to be licensed under the terms of the GNU
8   * General Public License (GPL) Version 2, available from the file
9   * COPYING in the main directory of this source tree, or the
10   * OpenIB.org BSD license below:
11   *
12   *     Redistribution and use in source and binary forms, with or
13   *     without modification, are permitted provided that the following
14   *     conditions are met:
15   *
16   *      - Redistributions of source code must retain the above
17   *        copyright notice, this list of conditions and the following
18   *        disclaimer.
19   *
20   *      - Redistributions in binary form must reproduce the above
21   *        copyright notice, this list of conditions and the following
22   *        disclaimer in the documentation and/or other materials
23   *        provided with the distribution.
24   *
25   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26   * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28   * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29   * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30   * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32   * SOFTWARE.
33   */
34  
35  #include <linux/slab.h>
36  
37  #include "mad_priv.h"
38  #include "mad_rmpp.h"
39  
40  enum rmpp_state {
41  	RMPP_STATE_ACTIVE,
42  	RMPP_STATE_TIMEOUT,
43  	RMPP_STATE_COMPLETE
44  };
45  
46  struct mad_rmpp_recv {
47  	struct ib_mad_agent_private *agent;
48  	struct list_head list;
49  	struct delayed_work timeout_work;
50  	struct delayed_work cleanup_work;
51  	struct completion comp;
52  	enum rmpp_state state;
53  	spinlock_t lock;
54  	refcount_t refcount;
55  
56  	struct ib_ah *ah;
57  	struct ib_mad_recv_wc *rmpp_wc;
58  	struct ib_mad_recv_buf *cur_seg_buf;
59  	int last_ack;
60  	int seg_num;
61  	int newwin;
62  	int repwin;
63  
64  	__be64 tid;
65  	u32 src_qp;
66  	u32 slid;
67  	u8 mgmt_class;
68  	u8 class_version;
69  	u8 method;
70  	u8 base_version;
71  };
72  
deref_rmpp_recv(struct mad_rmpp_recv * rmpp_recv)73  static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
74  {
75  	if (refcount_dec_and_test(&rmpp_recv->refcount))
76  		complete(&rmpp_recv->comp);
77  }
78  
destroy_rmpp_recv(struct mad_rmpp_recv * rmpp_recv)79  static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
80  {
81  	deref_rmpp_recv(rmpp_recv);
82  	wait_for_completion(&rmpp_recv->comp);
83  	rdma_destroy_ah(rmpp_recv->ah, RDMA_DESTROY_AH_SLEEPABLE);
84  	kfree(rmpp_recv);
85  }
86  
ib_cancel_rmpp_recvs(struct ib_mad_agent_private * agent)87  void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
88  {
89  	struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
90  	unsigned long flags;
91  
92  	spin_lock_irqsave(&agent->lock, flags);
93  	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
94  		cancel_delayed_work(&rmpp_recv->timeout_work);
95  		cancel_delayed_work(&rmpp_recv->cleanup_work);
96  	}
97  	spin_unlock_irqrestore(&agent->lock, flags);
98  
99  	flush_workqueue(agent->qp_info->port_priv->wq);
100  
101  	list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
102  				 &agent->rmpp_list, list) {
103  		list_del(&rmpp_recv->list);
104  		if (rmpp_recv->state != RMPP_STATE_COMPLETE)
105  			ib_free_recv_mad(rmpp_recv->rmpp_wc);
106  		destroy_rmpp_recv(rmpp_recv);
107  	}
108  }
109  
format_ack(struct ib_mad_send_buf * msg,struct ib_rmpp_mad * data,struct mad_rmpp_recv * rmpp_recv)110  static void format_ack(struct ib_mad_send_buf *msg,
111  		       struct ib_rmpp_mad *data,
112  		       struct mad_rmpp_recv *rmpp_recv)
113  {
114  	struct ib_rmpp_mad *ack = msg->mad;
115  	unsigned long flags;
116  
117  	memcpy(ack, &data->mad_hdr, msg->hdr_len);
118  
119  	ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
120  	ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
121  	ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
122  
123  	spin_lock_irqsave(&rmpp_recv->lock, flags);
124  	rmpp_recv->last_ack = rmpp_recv->seg_num;
125  	ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
126  	ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
127  	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
128  }
129  
ack_recv(struct mad_rmpp_recv * rmpp_recv,struct ib_mad_recv_wc * recv_wc)130  static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
131  		     struct ib_mad_recv_wc *recv_wc)
132  {
133  	struct ib_mad_send_buf *msg;
134  	int ret, hdr_len;
135  
136  	hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
137  	msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
138  				 recv_wc->wc->pkey_index, 1, hdr_len,
139  				 0, GFP_KERNEL,
140  				 IB_MGMT_BASE_VERSION);
141  	if (IS_ERR(msg))
142  		return;
143  
144  	format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
145  	msg->ah = rmpp_recv->ah;
146  	ret = ib_post_send_mad(msg, NULL);
147  	if (ret)
148  		ib_free_send_mad(msg);
149  }
150  
alloc_response_msg(struct ib_mad_agent * agent,struct ib_mad_recv_wc * recv_wc)151  static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
152  						  struct ib_mad_recv_wc *recv_wc)
153  {
154  	struct ib_mad_send_buf *msg;
155  	struct ib_ah *ah;
156  	int hdr_len;
157  
158  	ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
159  				  recv_wc->recv_buf.grh, agent->port_num);
160  	if (IS_ERR(ah))
161  		return (void *) ah;
162  
163  	hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
164  	msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
165  				 recv_wc->wc->pkey_index, 1,
166  				 hdr_len, 0, GFP_KERNEL,
167  				 IB_MGMT_BASE_VERSION);
168  	if (IS_ERR(msg))
169  		rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
170  	else {
171  		msg->ah = ah;
172  		msg->context[0] = ah;
173  	}
174  
175  	return msg;
176  }
177  
ack_ds_ack(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * recv_wc)178  static void ack_ds_ack(struct ib_mad_agent_private *agent,
179  		       struct ib_mad_recv_wc *recv_wc)
180  {
181  	struct ib_mad_send_buf *msg;
182  	struct ib_rmpp_mad *rmpp_mad;
183  	int ret;
184  
185  	msg = alloc_response_msg(&agent->agent, recv_wc);
186  	if (IS_ERR(msg))
187  		return;
188  
189  	rmpp_mad = msg->mad;
190  	memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
191  
192  	rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
193  	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
194  	rmpp_mad->rmpp_hdr.seg_num = 0;
195  	rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
196  
197  	ret = ib_post_send_mad(msg, NULL);
198  	if (ret) {
199  		rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
200  		ib_free_send_mad(msg);
201  	}
202  }
203  
ib_rmpp_send_handler(struct ib_mad_send_wc * mad_send_wc)204  void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
205  {
206  	if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah)
207  		rdma_destroy_ah(mad_send_wc->send_buf->ah,
208  				RDMA_DESTROY_AH_SLEEPABLE);
209  	ib_free_send_mad(mad_send_wc->send_buf);
210  }
211  
nack_recv(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * recv_wc,u8 rmpp_status)212  static void nack_recv(struct ib_mad_agent_private *agent,
213  		      struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
214  {
215  	struct ib_mad_send_buf *msg;
216  	struct ib_rmpp_mad *rmpp_mad;
217  	int ret;
218  
219  	msg = alloc_response_msg(&agent->agent, recv_wc);
220  	if (IS_ERR(msg))
221  		return;
222  
223  	rmpp_mad = msg->mad;
224  	memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
225  
226  	rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
227  	rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
228  	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
229  	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
230  	rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
231  	rmpp_mad->rmpp_hdr.seg_num = 0;
232  	rmpp_mad->rmpp_hdr.paylen_newwin = 0;
233  
234  	ret = ib_post_send_mad(msg, NULL);
235  	if (ret) {
236  		rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
237  		ib_free_send_mad(msg);
238  	}
239  }
240  
recv_timeout_handler(struct work_struct * work)241  static void recv_timeout_handler(struct work_struct *work)
242  {
243  	struct mad_rmpp_recv *rmpp_recv =
244  		container_of(work, struct mad_rmpp_recv, timeout_work.work);
245  	struct ib_mad_recv_wc *rmpp_wc;
246  	unsigned long flags;
247  
248  	spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
249  	if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
250  		spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
251  		return;
252  	}
253  	rmpp_recv->state = RMPP_STATE_TIMEOUT;
254  	list_del(&rmpp_recv->list);
255  	spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
256  
257  	rmpp_wc = rmpp_recv->rmpp_wc;
258  	nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
259  	destroy_rmpp_recv(rmpp_recv);
260  	ib_free_recv_mad(rmpp_wc);
261  }
262  
recv_cleanup_handler(struct work_struct * work)263  static void recv_cleanup_handler(struct work_struct *work)
264  {
265  	struct mad_rmpp_recv *rmpp_recv =
266  		container_of(work, struct mad_rmpp_recv, cleanup_work.work);
267  	unsigned long flags;
268  
269  	spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
270  	list_del(&rmpp_recv->list);
271  	spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
272  	destroy_rmpp_recv(rmpp_recv);
273  }
274  
275  static struct mad_rmpp_recv *
create_rmpp_recv(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * mad_recv_wc)276  create_rmpp_recv(struct ib_mad_agent_private *agent,
277  		 struct ib_mad_recv_wc *mad_recv_wc)
278  {
279  	struct mad_rmpp_recv *rmpp_recv;
280  	struct ib_mad_hdr *mad_hdr;
281  
282  	rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
283  	if (!rmpp_recv)
284  		return NULL;
285  
286  	rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
287  					     mad_recv_wc->wc,
288  					     mad_recv_wc->recv_buf.grh,
289  					     agent->agent.port_num);
290  	if (IS_ERR(rmpp_recv->ah))
291  		goto error;
292  
293  	rmpp_recv->agent = agent;
294  	init_completion(&rmpp_recv->comp);
295  	INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
296  	INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
297  	spin_lock_init(&rmpp_recv->lock);
298  	rmpp_recv->state = RMPP_STATE_ACTIVE;
299  	refcount_set(&rmpp_recv->refcount, 1);
300  
301  	rmpp_recv->rmpp_wc = mad_recv_wc;
302  	rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
303  	rmpp_recv->newwin = 1;
304  	rmpp_recv->seg_num = 1;
305  	rmpp_recv->last_ack = 0;
306  	rmpp_recv->repwin = 1;
307  
308  	mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
309  	rmpp_recv->tid = mad_hdr->tid;
310  	rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
311  	rmpp_recv->slid = mad_recv_wc->wc->slid;
312  	rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
313  	rmpp_recv->class_version = mad_hdr->class_version;
314  	rmpp_recv->method  = mad_hdr->method;
315  	rmpp_recv->base_version  = mad_hdr->base_version;
316  	return rmpp_recv;
317  
318  error:	kfree(rmpp_recv);
319  	return NULL;
320  }
321  
322  static struct mad_rmpp_recv *
find_rmpp_recv(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * mad_recv_wc)323  find_rmpp_recv(struct ib_mad_agent_private *agent,
324  	       struct ib_mad_recv_wc *mad_recv_wc)
325  {
326  	struct mad_rmpp_recv *rmpp_recv;
327  	struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
328  
329  	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
330  		if (rmpp_recv->tid == mad_hdr->tid &&
331  		    rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
332  		    rmpp_recv->slid == mad_recv_wc->wc->slid &&
333  		    rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
334  		    rmpp_recv->class_version == mad_hdr->class_version &&
335  		    rmpp_recv->method == mad_hdr->method)
336  			return rmpp_recv;
337  	}
338  	return NULL;
339  }
340  
341  static struct mad_rmpp_recv *
acquire_rmpp_recv(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * mad_recv_wc)342  acquire_rmpp_recv(struct ib_mad_agent_private *agent,
343  		  struct ib_mad_recv_wc *mad_recv_wc)
344  {
345  	struct mad_rmpp_recv *rmpp_recv;
346  	unsigned long flags;
347  
348  	spin_lock_irqsave(&agent->lock, flags);
349  	rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
350  	if (rmpp_recv)
351  		refcount_inc(&rmpp_recv->refcount);
352  	spin_unlock_irqrestore(&agent->lock, flags);
353  	return rmpp_recv;
354  }
355  
356  static struct mad_rmpp_recv *
insert_rmpp_recv(struct ib_mad_agent_private * agent,struct mad_rmpp_recv * rmpp_recv)357  insert_rmpp_recv(struct ib_mad_agent_private *agent,
358  		 struct mad_rmpp_recv *rmpp_recv)
359  {
360  	struct mad_rmpp_recv *cur_rmpp_recv;
361  
362  	cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
363  	if (!cur_rmpp_recv)
364  		list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
365  
366  	return cur_rmpp_recv;
367  }
368  
get_last_flag(struct ib_mad_recv_buf * seg)369  static inline int get_last_flag(struct ib_mad_recv_buf *seg)
370  {
371  	struct ib_rmpp_mad *rmpp_mad;
372  
373  	rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
374  	return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
375  }
376  
get_seg_num(struct ib_mad_recv_buf * seg)377  static inline int get_seg_num(struct ib_mad_recv_buf *seg)
378  {
379  	struct ib_rmpp_mad *rmpp_mad;
380  
381  	rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
382  	return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
383  }
384  
get_next_seg(struct list_head * rmpp_list,struct ib_mad_recv_buf * seg)385  static inline struct ib_mad_recv_buf *get_next_seg(struct list_head *rmpp_list,
386  						   struct ib_mad_recv_buf *seg)
387  {
388  	if (seg->list.next == rmpp_list)
389  		return NULL;
390  
391  	return container_of(seg->list.next, struct ib_mad_recv_buf, list);
392  }
393  
window_size(struct ib_mad_agent_private * agent)394  static inline int window_size(struct ib_mad_agent_private *agent)
395  {
396  	return max(agent->qp_info->recv_queue.max_active >> 3, 1);
397  }
398  
find_seg_location(struct list_head * rmpp_list,int seg_num)399  static struct ib_mad_recv_buf *find_seg_location(struct list_head *rmpp_list,
400  						 int seg_num)
401  {
402  	struct ib_mad_recv_buf *seg_buf;
403  	int cur_seg_num;
404  
405  	list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
406  		cur_seg_num = get_seg_num(seg_buf);
407  		if (seg_num > cur_seg_num)
408  			return seg_buf;
409  		if (seg_num == cur_seg_num)
410  			break;
411  	}
412  	return NULL;
413  }
414  
update_seg_num(struct mad_rmpp_recv * rmpp_recv,struct ib_mad_recv_buf * new_buf)415  static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
416  			   struct ib_mad_recv_buf *new_buf)
417  {
418  	struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
419  
420  	while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
421  		rmpp_recv->cur_seg_buf = new_buf;
422  		rmpp_recv->seg_num++;
423  		new_buf = get_next_seg(rmpp_list, new_buf);
424  	}
425  }
426  
get_mad_len(struct mad_rmpp_recv * rmpp_recv)427  static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
428  {
429  	struct ib_rmpp_mad *rmpp_mad;
430  	int hdr_size, data_size, pad;
431  	bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device,
432  				    rmpp_recv->agent->qp_info->port_priv->port_num);
433  
434  	rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
435  
436  	hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
437  	if (opa && rmpp_recv->base_version == OPA_MGMT_BASE_VERSION) {
438  		data_size = sizeof(struct opa_rmpp_mad) - hdr_size;
439  		pad = OPA_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
440  		if (pad > OPA_MGMT_RMPP_DATA || pad < 0)
441  			pad = 0;
442  	} else {
443  		data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
444  		pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
445  		if (pad > IB_MGMT_RMPP_DATA || pad < 0)
446  			pad = 0;
447  	}
448  
449  	return hdr_size + rmpp_recv->seg_num * data_size - pad;
450  }
451  
complete_rmpp(struct mad_rmpp_recv * rmpp_recv)452  static struct ib_mad_recv_wc *complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
453  {
454  	struct ib_mad_recv_wc *rmpp_wc;
455  
456  	ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
457  	if (rmpp_recv->seg_num > 1)
458  		cancel_delayed_work(&rmpp_recv->timeout_work);
459  
460  	rmpp_wc = rmpp_recv->rmpp_wc;
461  	rmpp_wc->mad_len = get_mad_len(rmpp_recv);
462  	/* 10 seconds until we can find the packet lifetime */
463  	queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
464  			   &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
465  	return rmpp_wc;
466  }
467  
468  static struct ib_mad_recv_wc *
continue_rmpp(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * mad_recv_wc)469  continue_rmpp(struct ib_mad_agent_private *agent,
470  	      struct ib_mad_recv_wc *mad_recv_wc)
471  {
472  	struct mad_rmpp_recv *rmpp_recv;
473  	struct ib_mad_recv_buf *prev_buf;
474  	struct ib_mad_recv_wc *done_wc;
475  	int seg_num;
476  	unsigned long flags;
477  
478  	rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
479  	if (!rmpp_recv)
480  		goto drop1;
481  
482  	seg_num = get_seg_num(&mad_recv_wc->recv_buf);
483  
484  	spin_lock_irqsave(&rmpp_recv->lock, flags);
485  	if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
486  	    (seg_num > rmpp_recv->newwin))
487  		goto drop3;
488  
489  	if ((seg_num <= rmpp_recv->last_ack) ||
490  	    (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
491  		spin_unlock_irqrestore(&rmpp_recv->lock, flags);
492  		ack_recv(rmpp_recv, mad_recv_wc);
493  		goto drop2;
494  	}
495  
496  	prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
497  	if (!prev_buf)
498  		goto drop3;
499  
500  	done_wc = NULL;
501  	list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
502  	if (rmpp_recv->cur_seg_buf == prev_buf) {
503  		update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
504  		if (get_last_flag(rmpp_recv->cur_seg_buf)) {
505  			rmpp_recv->state = RMPP_STATE_COMPLETE;
506  			spin_unlock_irqrestore(&rmpp_recv->lock, flags);
507  			done_wc = complete_rmpp(rmpp_recv);
508  			goto out;
509  		} else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
510  			rmpp_recv->newwin += window_size(agent);
511  			spin_unlock_irqrestore(&rmpp_recv->lock, flags);
512  			ack_recv(rmpp_recv, mad_recv_wc);
513  			goto out;
514  		}
515  	}
516  	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
517  out:
518  	deref_rmpp_recv(rmpp_recv);
519  	return done_wc;
520  
521  drop3:	spin_unlock_irqrestore(&rmpp_recv->lock, flags);
522  drop2:	deref_rmpp_recv(rmpp_recv);
523  drop1:	ib_free_recv_mad(mad_recv_wc);
524  	return NULL;
525  }
526  
527  static struct ib_mad_recv_wc *
start_rmpp(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * mad_recv_wc)528  start_rmpp(struct ib_mad_agent_private *agent,
529  	   struct ib_mad_recv_wc *mad_recv_wc)
530  {
531  	struct mad_rmpp_recv *rmpp_recv;
532  	unsigned long flags;
533  
534  	rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
535  	if (!rmpp_recv) {
536  		ib_free_recv_mad(mad_recv_wc);
537  		return NULL;
538  	}
539  
540  	spin_lock_irqsave(&agent->lock, flags);
541  	if (insert_rmpp_recv(agent, rmpp_recv)) {
542  		spin_unlock_irqrestore(&agent->lock, flags);
543  		/* duplicate first MAD */
544  		destroy_rmpp_recv(rmpp_recv);
545  		return continue_rmpp(agent, mad_recv_wc);
546  	}
547  	refcount_inc(&rmpp_recv->refcount);
548  
549  	if (get_last_flag(&mad_recv_wc->recv_buf)) {
550  		rmpp_recv->state = RMPP_STATE_COMPLETE;
551  		spin_unlock_irqrestore(&agent->lock, flags);
552  		complete_rmpp(rmpp_recv);
553  	} else {
554  		spin_unlock_irqrestore(&agent->lock, flags);
555  		/* 40 seconds until we can find the packet lifetimes */
556  		queue_delayed_work(agent->qp_info->port_priv->wq,
557  				   &rmpp_recv->timeout_work,
558  				   msecs_to_jiffies(40000));
559  		rmpp_recv->newwin += window_size(agent);
560  		ack_recv(rmpp_recv, mad_recv_wc);
561  		mad_recv_wc = NULL;
562  	}
563  	deref_rmpp_recv(rmpp_recv);
564  	return mad_recv_wc;
565  }
566  
send_next_seg(struct ib_mad_send_wr_private * mad_send_wr)567  static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
568  {
569  	struct ib_rmpp_mad *rmpp_mad;
570  	int timeout;
571  	u32 paylen = 0;
572  
573  	rmpp_mad = mad_send_wr->send_buf.mad;
574  	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
575  	rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num);
576  
577  	if (mad_send_wr->seg_num == 1) {
578  		rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
579  		paylen = (mad_send_wr->send_buf.seg_count *
580  			  mad_send_wr->send_buf.seg_rmpp_size) -
581  			  mad_send_wr->pad;
582  	}
583  
584  	if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
585  		rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
586  		paylen = mad_send_wr->send_buf.seg_rmpp_size - mad_send_wr->pad;
587  	}
588  	rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
589  
590  	/* 2 seconds for an ACK until we can find the packet lifetime */
591  	timeout = mad_send_wr->send_buf.timeout_ms;
592  	if (!timeout || timeout > 2000)
593  		mad_send_wr->timeout = msecs_to_jiffies(2000);
594  
595  	return ib_send_mad(mad_send_wr);
596  }
597  
abort_send(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * mad_recv_wc,u8 rmpp_status)598  static void abort_send(struct ib_mad_agent_private *agent,
599  		       struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
600  {
601  	struct ib_mad_send_wr_private *mad_send_wr;
602  	struct ib_mad_send_wc wc;
603  	unsigned long flags;
604  
605  	spin_lock_irqsave(&agent->lock, flags);
606  	mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
607  	if (!mad_send_wr)
608  		goto out;	/* Unmatched send */
609  
610  	if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
611  	    (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
612  		goto out;	/* Send is already done */
613  
614  	ib_mark_mad_done(mad_send_wr);
615  	spin_unlock_irqrestore(&agent->lock, flags);
616  
617  	wc.status = IB_WC_REM_ABORT_ERR;
618  	wc.vendor_err = rmpp_status;
619  	wc.send_buf = &mad_send_wr->send_buf;
620  	ib_mad_complete_send_wr(mad_send_wr, &wc);
621  	return;
622  out:
623  	spin_unlock_irqrestore(&agent->lock, flags);
624  }
625  
adjust_last_ack(struct ib_mad_send_wr_private * wr,int seg_num)626  static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
627  				   int seg_num)
628  {
629  	struct list_head *list;
630  
631  	wr->last_ack = seg_num;
632  	list = &wr->last_ack_seg->list;
633  	list_for_each_entry(wr->last_ack_seg, list, list)
634  		if (wr->last_ack_seg->num == seg_num)
635  			break;
636  }
637  
process_ds_ack(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * mad_recv_wc,int newwin)638  static void process_ds_ack(struct ib_mad_agent_private *agent,
639  			   struct ib_mad_recv_wc *mad_recv_wc, int newwin)
640  {
641  	struct mad_rmpp_recv *rmpp_recv;
642  
643  	rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
644  	if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
645  		rmpp_recv->repwin = newwin;
646  }
647  
process_rmpp_ack(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * mad_recv_wc)648  static void process_rmpp_ack(struct ib_mad_agent_private *agent,
649  			     struct ib_mad_recv_wc *mad_recv_wc)
650  {
651  	struct ib_mad_send_wr_private *mad_send_wr;
652  	struct ib_rmpp_mad *rmpp_mad;
653  	unsigned long flags;
654  	int seg_num, newwin, ret;
655  
656  	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
657  	if (rmpp_mad->rmpp_hdr.rmpp_status) {
658  		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
659  		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
660  		return;
661  	}
662  
663  	seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
664  	newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
665  	if (newwin < seg_num) {
666  		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
667  		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
668  		return;
669  	}
670  
671  	spin_lock_irqsave(&agent->lock, flags);
672  	mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
673  	if (!mad_send_wr) {
674  		if (!seg_num)
675  			process_ds_ack(agent, mad_recv_wc, newwin);
676  		goto out;	/* Unmatched or DS RMPP ACK */
677  	}
678  
679  	if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
680  	    (mad_send_wr->timeout)) {
681  		spin_unlock_irqrestore(&agent->lock, flags);
682  		ack_ds_ack(agent, mad_recv_wc);
683  		return;		/* Repeated ACK for DS RMPP transaction */
684  	}
685  
686  	if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
687  	    (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
688  		goto out;	/* Send is already done */
689  
690  	if (seg_num > mad_send_wr->send_buf.seg_count ||
691  	    seg_num > mad_send_wr->newwin) {
692  		spin_unlock_irqrestore(&agent->lock, flags);
693  		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
694  		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
695  		return;
696  	}
697  
698  	if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
699  		goto out;	/* Old ACK */
700  
701  	if (seg_num > mad_send_wr->last_ack) {
702  		adjust_last_ack(mad_send_wr, seg_num);
703  		mad_send_wr->retries_left = mad_send_wr->max_retries;
704  	}
705  	mad_send_wr->newwin = newwin;
706  	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
707  		/* If no response is expected, the ACK completes the send */
708  		if (!mad_send_wr->send_buf.timeout_ms) {
709  			struct ib_mad_send_wc wc;
710  
711  			ib_mark_mad_done(mad_send_wr);
712  			spin_unlock_irqrestore(&agent->lock, flags);
713  
714  			wc.status = IB_WC_SUCCESS;
715  			wc.vendor_err = 0;
716  			wc.send_buf = &mad_send_wr->send_buf;
717  			ib_mad_complete_send_wr(mad_send_wr, &wc);
718  			return;
719  		}
720  		if (mad_send_wr->refcount == 1)
721  			ib_reset_mad_timeout(mad_send_wr,
722  					     mad_send_wr->send_buf.timeout_ms);
723  		spin_unlock_irqrestore(&agent->lock, flags);
724  		ack_ds_ack(agent, mad_recv_wc);
725  		return;
726  	} else if (mad_send_wr->refcount == 1 &&
727  		   mad_send_wr->seg_num < mad_send_wr->newwin &&
728  		   mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
729  		/* Send failure will just result in a timeout/retry */
730  		ret = send_next_seg(mad_send_wr);
731  		if (ret)
732  			goto out;
733  
734  		mad_send_wr->refcount++;
735  		list_move_tail(&mad_send_wr->agent_list,
736  			      &mad_send_wr->mad_agent_priv->send_list);
737  	}
738  out:
739  	spin_unlock_irqrestore(&agent->lock, flags);
740  }
741  
742  static struct ib_mad_recv_wc *
process_rmpp_data(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * mad_recv_wc)743  process_rmpp_data(struct ib_mad_agent_private *agent,
744  		  struct ib_mad_recv_wc *mad_recv_wc)
745  {
746  	struct ib_rmpp_hdr *rmpp_hdr;
747  	u8 rmpp_status;
748  
749  	rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
750  
751  	if (rmpp_hdr->rmpp_status) {
752  		rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
753  		goto bad;
754  	}
755  
756  	if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
757  		if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
758  			rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
759  			goto bad;
760  		}
761  		return start_rmpp(agent, mad_recv_wc);
762  	} else {
763  		if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
764  			rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
765  			goto bad;
766  		}
767  		return continue_rmpp(agent, mad_recv_wc);
768  	}
769  bad:
770  	nack_recv(agent, mad_recv_wc, rmpp_status);
771  	ib_free_recv_mad(mad_recv_wc);
772  	return NULL;
773  }
774  
process_rmpp_stop(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * mad_recv_wc)775  static void process_rmpp_stop(struct ib_mad_agent_private *agent,
776  			      struct ib_mad_recv_wc *mad_recv_wc)
777  {
778  	struct ib_rmpp_mad *rmpp_mad;
779  
780  	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
781  
782  	if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
783  		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
784  		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
785  	} else
786  		abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
787  }
788  
process_rmpp_abort(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * mad_recv_wc)789  static void process_rmpp_abort(struct ib_mad_agent_private *agent,
790  			       struct ib_mad_recv_wc *mad_recv_wc)
791  {
792  	struct ib_rmpp_mad *rmpp_mad;
793  
794  	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
795  
796  	if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
797  	    rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
798  		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
799  		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
800  	} else
801  		abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
802  }
803  
804  struct ib_mad_recv_wc *
ib_process_rmpp_recv_wc(struct ib_mad_agent_private * agent,struct ib_mad_recv_wc * mad_recv_wc)805  ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
806  			struct ib_mad_recv_wc *mad_recv_wc)
807  {
808  	struct ib_rmpp_mad *rmpp_mad;
809  
810  	rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
811  	if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
812  		return mad_recv_wc;
813  
814  	if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
815  		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
816  		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
817  		goto out;
818  	}
819  
820  	switch (rmpp_mad->rmpp_hdr.rmpp_type) {
821  	case IB_MGMT_RMPP_TYPE_DATA:
822  		return process_rmpp_data(agent, mad_recv_wc);
823  	case IB_MGMT_RMPP_TYPE_ACK:
824  		process_rmpp_ack(agent, mad_recv_wc);
825  		break;
826  	case IB_MGMT_RMPP_TYPE_STOP:
827  		process_rmpp_stop(agent, mad_recv_wc);
828  		break;
829  	case IB_MGMT_RMPP_TYPE_ABORT:
830  		process_rmpp_abort(agent, mad_recv_wc);
831  		break;
832  	default:
833  		abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
834  		nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
835  		break;
836  	}
837  out:
838  	ib_free_recv_mad(mad_recv_wc);
839  	return NULL;
840  }
841  
init_newwin(struct ib_mad_send_wr_private * mad_send_wr)842  static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
843  {
844  	struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
845  	struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
846  	struct mad_rmpp_recv *rmpp_recv;
847  	struct rdma_ah_attr ah_attr;
848  	unsigned long flags;
849  	int newwin = 1;
850  
851  	if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
852  		goto out;
853  
854  	spin_lock_irqsave(&agent->lock, flags);
855  	list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
856  		if (rmpp_recv->tid != mad_hdr->tid ||
857  		    rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
858  		    rmpp_recv->class_version != mad_hdr->class_version ||
859  		    (rmpp_recv->method & IB_MGMT_METHOD_RESP))
860  			continue;
861  
862  		if (rdma_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
863  			continue;
864  
865  		if (rmpp_recv->slid == rdma_ah_get_dlid(&ah_attr)) {
866  			newwin = rmpp_recv->repwin;
867  			break;
868  		}
869  	}
870  	spin_unlock_irqrestore(&agent->lock, flags);
871  out:
872  	return newwin;
873  }
874  
ib_send_rmpp_mad(struct ib_mad_send_wr_private * mad_send_wr)875  int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
876  {
877  	struct ib_rmpp_mad *rmpp_mad;
878  	int ret;
879  
880  	rmpp_mad = mad_send_wr->send_buf.mad;
881  	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
882  	      IB_MGMT_RMPP_FLAG_ACTIVE))
883  		return IB_RMPP_RESULT_UNHANDLED;
884  
885  	if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
886  		mad_send_wr->seg_num = 1;
887  		return IB_RMPP_RESULT_INTERNAL;
888  	}
889  
890  	mad_send_wr->newwin = init_newwin(mad_send_wr);
891  
892  	/* We need to wait for the final ACK even if there isn't a response */
893  	mad_send_wr->refcount += (mad_send_wr->timeout == 0);
894  	ret = send_next_seg(mad_send_wr);
895  	if (!ret)
896  		return IB_RMPP_RESULT_CONSUMED;
897  	return ret;
898  }
899  
ib_process_rmpp_send_wc(struct ib_mad_send_wr_private * mad_send_wr,struct ib_mad_send_wc * mad_send_wc)900  int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
901  			    struct ib_mad_send_wc *mad_send_wc)
902  {
903  	struct ib_rmpp_mad *rmpp_mad;
904  	int ret;
905  
906  	rmpp_mad = mad_send_wr->send_buf.mad;
907  	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
908  	      IB_MGMT_RMPP_FLAG_ACTIVE))
909  		return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
910  
911  	if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
912  		return IB_RMPP_RESULT_INTERNAL;	 /* ACK, STOP, or ABORT */
913  
914  	if (mad_send_wc->status != IB_WC_SUCCESS ||
915  	    mad_send_wr->status != IB_WC_SUCCESS)
916  		return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
917  
918  	if (!mad_send_wr->timeout)
919  		return IB_RMPP_RESULT_PROCESSED; /* Response received */
920  
921  	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
922  		mad_send_wr->timeout =
923  			msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
924  		return IB_RMPP_RESULT_PROCESSED; /* Send done */
925  	}
926  
927  	if (mad_send_wr->seg_num == mad_send_wr->newwin ||
928  	    mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count)
929  		return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
930  
931  	ret = send_next_seg(mad_send_wr);
932  	if (ret) {
933  		mad_send_wc->status = IB_WC_GENERAL_ERR;
934  		return IB_RMPP_RESULT_PROCESSED;
935  	}
936  	return IB_RMPP_RESULT_CONSUMED;
937  }
938  
ib_retry_rmpp(struct ib_mad_send_wr_private * mad_send_wr)939  int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
940  {
941  	struct ib_rmpp_mad *rmpp_mad;
942  	int ret;
943  
944  	rmpp_mad = mad_send_wr->send_buf.mad;
945  	if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
946  	      IB_MGMT_RMPP_FLAG_ACTIVE))
947  		return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
948  
949  	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count)
950  		return IB_RMPP_RESULT_PROCESSED;
951  
952  	mad_send_wr->seg_num = mad_send_wr->last_ack;
953  	mad_send_wr->cur_seg = mad_send_wr->last_ack_seg;
954  
955  	ret = send_next_seg(mad_send_wr);
956  	if (ret)
957  		return IB_RMPP_RESULT_PROCESSED;
958  
959  	return IB_RMPP_RESULT_CONSUMED;
960  }
961