Lines Matching full:conn
48 if (atomic_read(&smc->conn.sndbuf_space) && sock) { in smc_tx_write_space()
50 SMC_STAT_RMB_TX_FULL(smc, !smc->conn.lnk); in smc_tx_write_space()
80 struct smc_connection *conn = &smc->conn; in smc_tx_wait() local
92 conn->killed || in smc_tx_wait()
93 conn->local_tx_ctrl.conn_state_flags.peer_done_writing) { in smc_tx_wait()
97 if (smc_cdc_rxed_any_close(conn)) { in smc_tx_wait()
112 if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend) in smc_tx_wait()
118 smc_cdc_rxed_any_close(conn) || in smc_tx_wait()
119 (atomic_read(&conn->sndbuf_space) && in smc_tx_wait()
120 !conn->urg_tx_pend), in smc_tx_wait()
147 struct smc_connection *conn = &smc->conn; in smc_should_autocork() local
150 corking_size = min_t(unsigned int, conn->sndbuf_desc->len >> 1, in smc_should_autocork()
153 if (atomic_read(&conn->cdc_pend_tx_wr) == 0 || in smc_should_autocork()
154 smc_tx_prepared_sends(conn) > corking_size) in smc_should_autocork()
161 struct smc_connection *conn = &smc->conn; in smc_tx_should_cork() local
172 atomic_read(&conn->sndbuf_space)) in smc_tx_should_cork()
185 struct smc_connection *conn = &smc->conn; in smc_tx_sendmsg() local
204 if (len > conn->sndbuf_desc->len) in smc_tx_sendmsg()
205 SMC_STAT_RMB_TX_SIZE_SMALL(smc, !conn->lnk); in smc_tx_sendmsg()
207 if (len > conn->peer_rmbe_size) in smc_tx_sendmsg()
208 SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, !conn->lnk); in smc_tx_sendmsg()
216 conn->killed) in smc_tx_sendmsg()
218 if (smc_cdc_rxed_any_close(conn)) in smc_tx_sendmsg()
222 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; in smc_tx_sendmsg()
224 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { in smc_tx_sendmsg()
235 writespace = atomic_read(&conn->sndbuf_space); in smc_tx_sendmsg()
239 sndbuf_base = conn->sndbuf_desc->cpu_addr; in smc_tx_sendmsg()
240 smc_curs_copy(&prep, &conn->tx_curs_prep, conn); in smc_tx_sendmsg()
244 chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len - in smc_tx_sendmsg()
252 smc_sndbuf_sync_sg_for_device(conn); in smc_tx_sendmsg()
267 smc_sndbuf_sync_sg_for_device(conn); in smc_tx_sendmsg()
269 smc_curs_add(conn->sndbuf_desc->len, &prep, copylen); in smc_tx_sendmsg()
270 smc_curs_copy(&conn->tx_curs_prep, &prep, conn); in smc_tx_sendmsg()
273 atomic_sub(copylen, &conn->sndbuf_space); in smc_tx_sendmsg()
280 conn->urg_tx_pend = true; in smc_tx_sendmsg()
285 smc_tx_sndbuf_nonempty(conn); in smc_tx_sendmsg()
303 int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, in smcd_tx_ism_write() argument
308 rc = smc_ism_write(conn->lgr->smcd, conn->peer_token, in smcd_tx_ism_write()
309 conn->peer_rmbe_idx, signal, conn->tx_off + offset, in smcd_tx_ism_write()
312 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; in smcd_tx_ism_write()
317 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, in smc_tx_rdma_write() argument
320 struct smc_link_group *lgr = conn->lgr; in smc_tx_rdma_write()
321 struct smc_link *link = conn->lnk; in smc_tx_rdma_write()
327 lgr->rtokens[conn->rtoken_idx][link->link_idx].dma_addr + in smc_tx_rdma_write()
329 conn->tx_off + in smc_tx_rdma_write()
332 rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][link->link_idx].rkey; in smc_tx_rdma_write()
340 static inline void smc_tx_advance_cursors(struct smc_connection *conn, in smc_tx_advance_cursors() argument
345 smc_curs_add(conn->peer_rmbe_size, prod, len); in smc_tx_advance_cursors()
349 atomic_sub(len, &conn->peer_rmbe_space); in smc_tx_advance_cursors()
352 smc_curs_add(conn->sndbuf_desc->len, sent, len); in smc_tx_advance_cursors()
356 static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, in smcr_tx_rdma_writes() argument
361 struct smc_link *link = conn->lnk; in smcr_tx_rdma_writes()
364 sg_dma_address(conn->sndbuf_desc->sgt[link->link_idx].sgl); in smcr_tx_rdma_writes()
365 u64 virt_addr = (uintptr_t)conn->sndbuf_desc->cpu_addr; in smcr_tx_rdma_writes()
386 sge[srcchunk].addr = conn->sndbuf_desc->is_vm ? in smcr_tx_rdma_writes()
389 if (conn->sndbuf_desc->is_vm) in smcr_tx_rdma_writes()
391 conn->sndbuf_desc->mr[link->link_idx]->lkey; in smcr_tx_rdma_writes()
395 if (src_off >= conn->sndbuf_desc->len) in smcr_tx_rdma_writes()
396 src_off -= conn->sndbuf_desc->len; in smcr_tx_rdma_writes()
404 rc = smc_tx_rdma_write(conn, dst_off, num_sges, wr); in smcr_tx_rdma_writes()
413 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - in smcr_tx_rdma_writes()
421 static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len, in smcd_tx_rdma_writes() argument
431 void *data = conn->sndbuf_desc->cpu_addr + src_off; in smcd_tx_rdma_writes()
433 rc = smcd_tx_ism_write(conn, data, src_len, dst_off + in smcd_tx_rdma_writes()
439 if (src_off >= conn->sndbuf_desc->len) in smcd_tx_rdma_writes()
440 src_off -= conn->sndbuf_desc->len; in smcd_tx_rdma_writes()
454 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off); in smcd_tx_rdma_writes()
463 static int smc_tx_rdma_writes(struct smc_connection *conn, in smc_tx_rdma_writes() argument
473 smc_curs_copy(&sent, &conn->tx_curs_sent, conn); in smc_tx_rdma_writes()
474 smc_curs_copy(&prep, &conn->tx_curs_prep, conn); in smc_tx_rdma_writes()
476 to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep); in smc_tx_rdma_writes()
482 rmbespace = atomic_read(&conn->peer_rmbe_space); in smc_tx_rdma_writes()
484 struct smc_sock *smc = container_of(conn, struct smc_sock, in smc_tx_rdma_writes()
485 conn); in smc_tx_rdma_writes()
486 SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk); in smc_tx_rdma_writes()
489 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn); in smc_tx_rdma_writes()
490 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn); in smc_tx_rdma_writes()
493 pflags = &conn->local_tx_ctrl.prod_flags; in smc_tx_rdma_writes()
507 conn->peer_rmbe_size - prod.count, len); in smc_tx_rdma_writes()
516 if (sent.count + dst_len <= conn->sndbuf_desc->len) { in smc_tx_rdma_writes()
521 src_len = conn->sndbuf_desc->len - sent.count; in smc_tx_rdma_writes()
524 if (conn->lgr->is_smcd) in smc_tx_rdma_writes()
525 rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len, in smc_tx_rdma_writes()
528 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, in smc_tx_rdma_writes()
533 if (conn->urg_tx_pend && len == to_send) in smc_tx_rdma_writes()
535 smc_tx_advance_cursors(conn, &prod, &sent, len); in smc_tx_rdma_writes()
537 smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn); in smc_tx_rdma_writes()
539 smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */ in smc_tx_rdma_writes()
547 static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) in smcr_tx_sndbuf_nonempty() argument
549 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; in smcr_tx_sndbuf_nonempty()
550 struct smc_link *link = conn->lnk; in smcr_tx_sndbuf_nonempty()
558 rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend); in smcr_tx_sndbuf_nonempty()
563 container_of(conn, struct smc_sock, conn); in smcr_tx_sndbuf_nonempty()
567 if (conn->killed) in smcr_tx_sndbuf_nonempty()
570 mod_delayed_work(conn->lgr->tx_wq, &conn->tx_work, in smcr_tx_sndbuf_nonempty()
576 spin_lock_bh(&conn->send_lock); in smcr_tx_sndbuf_nonempty()
577 if (link != conn->lnk) { in smcr_tx_sndbuf_nonempty()
585 rc = smc_tx_rdma_writes(conn, wr_rdma_buf); in smcr_tx_sndbuf_nonempty()
593 rc = smc_cdc_msg_send(conn, wr_buf, pend); in smcr_tx_sndbuf_nonempty()
600 spin_unlock_bh(&conn->send_lock); in smcr_tx_sndbuf_nonempty()
605 static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) in smcd_tx_sndbuf_nonempty() argument
607 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; in smcd_tx_sndbuf_nonempty()
610 spin_lock_bh(&conn->send_lock); in smcd_tx_sndbuf_nonempty()
612 rc = smc_tx_rdma_writes(conn, NULL); in smcd_tx_sndbuf_nonempty()
614 rc = smcd_cdc_msg_send(conn); in smcd_tx_sndbuf_nonempty()
620 spin_unlock_bh(&conn->send_lock); in smcd_tx_sndbuf_nonempty()
624 static int __smc_tx_sndbuf_nonempty(struct smc_connection *conn) in __smc_tx_sndbuf_nonempty() argument
626 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); in __smc_tx_sndbuf_nonempty()
630 if (unlikely(smc_tx_prepared_sends(conn) <= 0)) in __smc_tx_sndbuf_nonempty()
634 if (unlikely(atomic_read(&conn->peer_rmbe_space) <= 0)) { in __smc_tx_sndbuf_nonempty()
635 SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk); in __smc_tx_sndbuf_nonempty()
639 if (conn->killed || in __smc_tx_sndbuf_nonempty()
640 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) { in __smc_tx_sndbuf_nonempty()
644 if (conn->lgr->is_smcd) in __smc_tx_sndbuf_nonempty()
645 rc = smcd_tx_sndbuf_nonempty(conn); in __smc_tx_sndbuf_nonempty()
647 rc = smcr_tx_sndbuf_nonempty(conn); in __smc_tx_sndbuf_nonempty()
658 int smc_tx_sndbuf_nonempty(struct smc_connection *conn) in smc_tx_sndbuf_nonempty() argument
666 if (atomic_inc_return(&conn->tx_pushing) > 1) in smc_tx_sndbuf_nonempty()
670 atomic_set(&conn->tx_pushing, 1); in smc_tx_sndbuf_nonempty()
672 rc = __smc_tx_sndbuf_nonempty(conn); in smc_tx_sndbuf_nonempty()
680 if (unlikely(!atomic_dec_and_test(&conn->tx_pushing))) in smc_tx_sndbuf_nonempty()
690 void smc_tx_pending(struct smc_connection *conn) in smc_tx_pending() argument
692 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); in smc_tx_pending()
698 rc = smc_tx_sndbuf_nonempty(conn); in smc_tx_pending()
699 if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked && in smc_tx_pending()
700 !atomic_read(&conn->bytes_to_rcv)) in smc_tx_pending()
701 conn->local_rx_ctrl.prod_flags.write_blocked = 0; in smc_tx_pending()
710 struct smc_connection *conn = container_of(to_delayed_work(work), in smc_tx_work() local
713 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); in smc_tx_work()
716 smc_tx_pending(conn); in smc_tx_work()
720 void smc_tx_consumer_update(struct smc_connection *conn, bool force) in smc_tx_consumer_update() argument
723 int sender_free = conn->rmb_desc->len; in smc_tx_consumer_update()
726 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); in smc_tx_consumer_update()
727 smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn); in smc_tx_consumer_update()
728 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); in smc_tx_consumer_update()
729 if (to_confirm > conn->rmbe_update_limit) { in smc_tx_consumer_update()
730 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); in smc_tx_consumer_update()
731 sender_free = conn->rmb_desc->len - in smc_tx_consumer_update()
732 smc_curs_diff_large(conn->rmb_desc->len, in smc_tx_consumer_update()
736 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || in smc_tx_consumer_update()
738 ((to_confirm > conn->rmbe_update_limit) && in smc_tx_consumer_update()
739 ((sender_free <= (conn->rmb_desc->len / 2)) || in smc_tx_consumer_update()
740 conn->local_rx_ctrl.prod_flags.write_blocked))) { in smc_tx_consumer_update()
741 if (conn->killed || in smc_tx_consumer_update()
742 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) in smc_tx_consumer_update()
744 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && in smc_tx_consumer_update()
745 !conn->killed) { in smc_tx_consumer_update()
746 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, in smc_tx_consumer_update()
751 if (conn->local_rx_ctrl.prod_flags.write_blocked && in smc_tx_consumer_update()
752 !atomic_read(&conn->bytes_to_rcv)) in smc_tx_consumer_update()
753 conn->local_rx_ctrl.prod_flags.write_blocked = 0; in smc_tx_consumer_update()