send.c (712cba5d87a6c0e980ee5fad45734e189c4d7151) send.c (905dd4184e0732de41d6ee3c7b06e0cfdd9f0aad)
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 245 unchanged lines hidden (view full) ---

254
255 /* Unfortunately, the way Infiniband deals with
256 * RDMA to a bad MR key is by moving the entire
257 * queue pair to error state. We cold possibly
258 * recover from that, but right now we drop the
259 * connection.
260 * Therefore, we never retransmit messages with RDMA ops.
261 */
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 245 unchanged lines hidden (view full) ---

254
255 /* Unfortunately, the way Infiniband deals with
256 * RDMA to a bad MR key is by moving the entire
257 * queue pair to error state. We cold possibly
258 * recover from that, but right now we drop the
259 * connection.
260 * Therefore, we never retransmit messages with RDMA ops.
261 */
262 if (rm->rdma.op_active &&
263 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
262 if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) ||
263 (rm->rdma.op_active &&
264 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) {
264 spin_lock_irqsave(&cp->cp_lock, flags);
265 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
266 list_move(&rm->m_conn_item, &to_be_dropped);
267 spin_unlock_irqrestore(&cp->cp_lock, flags);
268 continue;
269 }
270
271 /* Require an ACK every once in a while */

--- 932 unchanged lines hidden (view full) ---

1204 cp->cp_next_tx_seq++;
1205
1206 if (RDS_HS_PROBE(sport, dport) && cp->cp_conn->c_trans->t_mp_capable) {
1207 u16 npaths = RDS_MPATH_WORKERS;
1208
1209 rds_message_add_extension(&rm->m_inc.i_hdr,
1210 RDS_EXTHDR_NPATHS, &npaths,
1211 sizeof(npaths));
265 spin_lock_irqsave(&cp->cp_lock, flags);
266 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
267 list_move(&rm->m_conn_item, &to_be_dropped);
268 spin_unlock_irqrestore(&cp->cp_lock, flags);
269 continue;
270 }
271
272 /* Require an ACK every once in a while */

--- 932 unchanged lines hidden (view full) ---

1205 cp->cp_next_tx_seq++;
1206
1207 if (RDS_HS_PROBE(sport, dport) && cp->cp_conn->c_trans->t_mp_capable) {
1208 u16 npaths = RDS_MPATH_WORKERS;
1209
1210 rds_message_add_extension(&rm->m_inc.i_hdr,
1211 RDS_EXTHDR_NPATHS, &npaths,
1212 sizeof(npaths));
1213 rds_message_add_extension(&rm->m_inc.i_hdr,
1214 RDS_EXTHDR_GEN_NUM,
1215 &cp->cp_conn->c_my_gen_num,
1216 sizeof(u32));
1212 }
1213 spin_unlock_irqrestore(&cp->cp_lock, flags);
1214
1215 rds_stats_inc(s_send_queued);
1216 rds_stats_inc(s_send_pong);
1217
1218 /* schedule the send work on rds_wq */
1219 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);

--- 31 unchanged lines hidden ---
1217 }
1218 spin_unlock_irqrestore(&cp->cp_lock, flags);
1219
1220 rds_stats_inc(s_send_queued);
1221 rds_stats_inc(s_send_pong);
1222
1223 /* schedule the send work on rds_wq */
1224 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);

--- 31 unchanged lines hidden ---