send.c (ce8ee02d519ab20c5b87d3b3929b5e44ad89e26f) send.c (ebeeb1ad9b8adcc37c2ec21a96f39e9d35199b46)
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 148 unchanged lines hidden (view full) ---

157 * caches per message.
158 */
159 if (!acquire_in_xmit(cp)) {
160 rds_stats_inc(s_send_lock_contention);
161 ret = -ENOMEM;
162 goto out;
163 }
164
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 148 unchanged lines hidden (view full) ---

157 * caches per message.
158 */
159 if (!acquire_in_xmit(cp)) {
160 rds_stats_inc(s_send_lock_contention);
161 ret = -ENOMEM;
162 goto out;
163 }
164
165 if (rds_destroy_pending(cp->cp_conn)) {
166 release_in_xmit(cp);
167 ret = -ENETUNREACH; /* dont requeue send work */
168 goto out;
169 }
170
165 /*
166 * we record the send generation after doing the xmit acquire.
167 * if someone else manages to jump in and do some work, we'll use
168 * this to avoid a goto restart farther down.
169 *
170 * The acquire_in_xmit() check above ensures that only one
171 * caller can increment c_send_gen at any time.
172 */

--- 259 unchanged lines hidden (view full) ---

432
433 smp_mb();
434 raced = send_gen != READ_ONCE(cp->cp_send_gen);
435
436 if ((test_bit(0, &conn->c_map_queued) ||
437 !list_empty(&cp->cp_send_queue)) && !raced) {
438 if (batch_count < send_batch_count)
439 goto restart;
171 /*
172 * we record the send generation after doing the xmit acquire.
173 * if someone else manages to jump in and do some work, we'll use
174 * this to avoid a goto restart farther down.
175 *
176 * The acquire_in_xmit() check above ensures that only one
177 * caller can increment c_send_gen at any time.
178 */

--- 259 unchanged lines hidden (view full) ---

438
439 smp_mb();
440 raced = send_gen != READ_ONCE(cp->cp_send_gen);
441
442 if ((test_bit(0, &conn->c_map_queued) ||
443 !list_empty(&cp->cp_send_queue)) && !raced) {
444 if (batch_count < send_batch_count)
445 goto restart;
440 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
446 rcu_read_lock();
447 if (rds_destroy_pending(cp->cp_conn))
448 ret = -ENETUNREACH;
449 else
450 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
451 rcu_read_unlock();
441 } else if (raced) {
442 rds_stats_inc(s_send_lock_queue_raced);
443 }
444 }
445out:
446 return ret;
447}
448EXPORT_SYMBOL_GPL(rds_send_xmit);

--- 697 unchanged lines hidden (view full) ---

1146 goto out;
1147 }
1148
1149 if (conn->c_trans->t_mp_capable)
1150 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
1151 else
1152 cpath = &conn->c_path[0];
1153
452 } else if (raced) {
453 rds_stats_inc(s_send_lock_queue_raced);
454 }
455 }
456out:
457 return ret;
458}
459EXPORT_SYMBOL_GPL(rds_send_xmit);

--- 697 unchanged lines hidden (view full) ---

1157 goto out;
1158 }
1159
1160 if (conn->c_trans->t_mp_capable)
1161 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
1162 else
1163 cpath = &conn->c_path[0];
1164
1165 if (rds_destroy_pending(conn)) {
1166 ret = -EAGAIN;
1167 goto out;
1168 }
1169
1154 rds_conn_path_connect_if_down(cpath);
1155
1156 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1157 if (ret) {
1158 rs->rs_seen_congestion = 1;
1159 goto out;
1160 }
1161 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,

--- 23 unchanged lines hidden (view full) ---

1185
1186 /*
1187 * By now we've committed to the send. We reuse rds_send_worker()
1188 * to retry sends in the rds thread if the transport asks us to.
1189 */
1190 rds_stats_inc(s_send_queued);
1191
1192 ret = rds_send_xmit(cpath);
1170 rds_conn_path_connect_if_down(cpath);
1171
1172 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1173 if (ret) {
1174 rs->rs_seen_congestion = 1;
1175 goto out;
1176 }
1177 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,

--- 23 unchanged lines hidden (view full) ---

1201
1202 /*
1203 * By now we've committed to the send. We reuse rds_send_worker()
1204 * to retry sends in the rds thread if the transport asks us to.
1205 */
1206 rds_stats_inc(s_send_queued);
1207
1208 ret = rds_send_xmit(cpath);
1193 if (ret == -ENOMEM || ret == -EAGAIN)
1194 queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
1195
1209 if (ret == -ENOMEM || ret == -EAGAIN) {
1210 ret = 0;
1211 rcu_read_lock();
1212 if (rds_destroy_pending(cpath->cp_conn))
1213 ret = -ENETUNREACH;
1214 else
1215 queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
1216 rcu_read_unlock();
1217 }
1218 if (ret)
1219 goto out;
1196 rds_message_put(rm);
1197 return payload_len;
1198
1199out:
1200 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1201 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1202 * or in any other way, we need to destroy the MR again */
1203 if (allocated_mr)

--- 61 unchanged lines hidden (view full) ---

1265 sizeof(u32));
1266 }
1267 spin_unlock_irqrestore(&cp->cp_lock, flags);
1268
1269 rds_stats_inc(s_send_queued);
1270 rds_stats_inc(s_send_pong);
1271
1272 /* schedule the send work on rds_wq */
1220 rds_message_put(rm);
1221 return payload_len;
1222
1223out:
1224 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1225 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1226 * or in any other way, we need to destroy the MR again */
1227 if (allocated_mr)

--- 61 unchanged lines hidden (view full) ---

1289 sizeof(u32));
1290 }
1291 spin_unlock_irqrestore(&cp->cp_lock, flags);
1292
1293 rds_stats_inc(s_send_queued);
1294 rds_stats_inc(s_send_pong);
1295
1296 /* schedule the send work on rds_wq */
1273 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
1297 rcu_read_lock();
1298 if (!rds_destroy_pending(cp->cp_conn))
1299 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
1300 rcu_read_unlock();
1274
1275 rds_message_put(rm);
1276 return 0;
1277
1278out:
1279 if (rm)
1280 rds_message_put(rm);
1281 return ret;

--- 24 unchanged lines hidden ---
1301
1302 rds_message_put(rm);
1303 return 0;
1304
1305out:
1306 if (rm)
1307 rds_message_put(rm);
1308 return ret;

--- 24 unchanged lines hidden ---