Lines Matching +full:recv +full:- +full:empty

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
37 #include <linux/dma-mapping.h>
50 struct rds_ib_recv_work *recv; in rds_ib_recv_init_ring() local
53 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) { in rds_ib_recv_init_ring()
56 recv->r_ibinc = NULL; in rds_ib_recv_init_ring()
57 recv->r_frag = NULL; in rds_ib_recv_init_ring()
59 recv->r_wr.next = NULL; in rds_ib_recv_init_ring()
60 recv->r_wr.wr_id = i; in rds_ib_recv_init_ring()
61 recv->r_wr.sg_list = recv->r_sge; in rds_ib_recv_init_ring()
62 recv->r_wr.num_sge = RDS_IB_RECV_SGE; in rds_ib_recv_init_ring()
64 sge = &recv->r_sge[0]; in rds_ib_recv_init_ring()
65 sge->addr = ic->i_recv_hdrs_dma[i]; in rds_ib_recv_init_ring()
66 sge->length = sizeof(struct rds_header); in rds_ib_recv_init_ring()
67 sge->lkey = ic->i_pd->local_dma_lkey; in rds_ib_recv_init_ring()
69 sge = &recv->r_sge[1]; in rds_ib_recv_init_ring()
70 sge->addr = 0; in rds_ib_recv_init_ring()
71 sge->length = RDS_FRAG_SIZE; in rds_ib_recv_init_ring()
72 sge->lkey = ic->i_pd->local_dma_lkey; in rds_ib_recv_init_ring()
83 struct list_head *from_last = from->prev; in list_splice_entire_tail()
93 tmp = xchg(&cache->xfer, NULL); in rds_ib_cache_xfer_to_ready()
95 if (cache->ready) in rds_ib_cache_xfer_to_ready()
96 list_splice_entire_tail(tmp, cache->ready); in rds_ib_cache_xfer_to_ready()
98 cache->ready = tmp; in rds_ib_cache_xfer_to_ready()
107 cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp); in rds_ib_recv_alloc_cache()
108 if (!cache->percpu) in rds_ib_recv_alloc_cache()
109 return -ENOMEM; in rds_ib_recv_alloc_cache()
112 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache()
113 head->first = NULL; in rds_ib_recv_alloc_cache()
114 head->count = 0; in rds_ib_recv_alloc_cache()
116 cache->xfer = NULL; in rds_ib_recv_alloc_cache()
117 cache->ready = NULL; in rds_ib_recv_alloc_cache()
126 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp); in rds_ib_recv_alloc_caches()
128 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp); in rds_ib_recv_alloc_caches()
130 free_percpu(ic->i_cache_incs.percpu); in rds_ib_recv_alloc_caches()
143 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists()
144 if (head->first) { in rds_ib_cache_splice_all_lists()
145 list_splice_entire_tail(head->first, caller_list); in rds_ib_cache_splice_all_lists()
146 head->first = NULL; in rds_ib_cache_splice_all_lists()
150 if (cache->ready) { in rds_ib_cache_splice_all_lists()
151 list_splice_entire_tail(cache->ready, caller_list); in rds_ib_cache_splice_all_lists()
152 cache->ready = NULL; in rds_ib_cache_splice_all_lists()
164 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); in rds_ib_recv_free_caches()
165 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list); in rds_ib_recv_free_caches()
166 free_percpu(ic->i_cache_incs.percpu); in rds_ib_recv_free_caches()
169 list_del(&inc->ii_cache_entry); in rds_ib_recv_free_caches()
170 WARN_ON(!list_empty(&inc->ii_frags)); in rds_ib_recv_free_caches()
175 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); in rds_ib_recv_free_caches()
176 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list); in rds_ib_recv_free_caches()
177 free_percpu(ic->i_cache_frags.percpu); in rds_ib_recv_free_caches()
180 list_del(&frag->f_cache_entry); in rds_ib_recv_free_caches()
181 WARN_ON(!list_empty(&frag->f_item)); in rds_ib_recv_free_caches()
192 /* Recycle frag and attached recv buffer f_sg */
196 rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg)); in rds_ib_frag_free()
198 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags); in rds_ib_frag_free()
199 atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs); in rds_ib_frag_free()
209 struct rds_ib_connection *ic = inc->i_conn->c_transport_data; in rds_ib_inc_free()
214 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) { in rds_ib_inc_free()
215 list_del_init(&frag->f_item); in rds_ib_inc_free()
218 BUG_ON(!list_empty(&ibinc->ii_frags)); in rds_ib_inc_free()
221 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs); in rds_ib_inc_free()
225 struct rds_ib_recv_work *recv) in rds_ib_recv_clear_one() argument
227 if (recv->r_ibinc) { in rds_ib_recv_clear_one()
228 rds_inc_put(&recv->r_ibinc->ii_inc); in rds_ib_recv_clear_one()
229 recv->r_ibinc = NULL; in rds_ib_recv_clear_one()
231 if (recv->r_frag) { in rds_ib_recv_clear_one()
232 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); in rds_ib_recv_clear_one()
233 rds_ib_frag_free(ic, recv->r_frag); in rds_ib_recv_clear_one()
234 recv->r_frag = NULL; in rds_ib_recv_clear_one()
242 for (i = 0; i < ic->i_recv_ring.w_nr; i++) in rds_ib_recv_clear_ring()
243 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]); in rds_ib_recv_clear_ring()
253 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs); in rds_ib_refill_one_inc()
270 INIT_LIST_HEAD(&ibinc->ii_frags); in rds_ib_refill_one_inc()
271 rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr); in rds_ib_refill_one_inc()
283 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags); in rds_ib_refill_one_frag()
286 atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs); in rds_ib_refill_one_frag()
293 sg_init_table(&frag->f_sg, 1); in rds_ib_refill_one_frag()
294 ret = rds_page_remainder_alloc(&frag->f_sg, in rds_ib_refill_one_frag()
303 INIT_LIST_HEAD(&frag->f_item); in rds_ib_refill_one_frag()
309 struct rds_ib_recv_work *recv, gfp_t gfp) in rds_ib_recv_refill_one() argument
311 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_recv_refill_one()
313 int ret = -ENOMEM; in rds_ib_recv_refill_one()
322 if (!ic->i_cache_incs.ready) in rds_ib_recv_refill_one()
323 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); in rds_ib_recv_refill_one()
324 if (!ic->i_cache_frags.ready) in rds_ib_recv_refill_one()
325 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); in rds_ib_recv_refill_one()
328 * ibinc was taken from recv if recv contained the start of a message. in rds_ib_recv_refill_one()
331 if (!recv->r_ibinc) { in rds_ib_recv_refill_one()
332 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask); in rds_ib_recv_refill_one()
333 if (!recv->r_ibinc) in rds_ib_recv_refill_one()
337 WARN_ON(recv->r_frag); /* leak! */ in rds_ib_recv_refill_one()
338 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask); in rds_ib_recv_refill_one()
339 if (!recv->r_frag) in rds_ib_recv_refill_one()
342 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, in rds_ib_recv_refill_one()
346 sge = &recv->r_sge[0]; in rds_ib_recv_refill_one()
347 sge->addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs]; in rds_ib_recv_refill_one()
348 sge->length = sizeof(struct rds_header); in rds_ib_recv_refill_one()
350 sge = &recv->r_sge[1]; in rds_ib_recv_refill_one()
351 sge->addr = sg_dma_address(&recv->r_frag->f_sg); in rds_ib_recv_refill_one()
352 sge->length = sg_dma_len(&recv->r_frag->f_sg); in rds_ib_recv_refill_one()
361 return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0; in acquire_refill()
366 clear_bit(RDS_RECV_REFILL, &conn->c_flags); in release_refill()
371 * the system-wide hashed waitqueue buckets in the fast path only to in release_refill()
374 if (waitqueue_active(&conn->c_waitq)) in release_refill()
375 wake_up_all(&conn->c_waitq); in release_refill()
385 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_recv_refill()
386 struct rds_ib_recv_work *recv; in rds_ib_recv_refill() local
401 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { in rds_ib_recv_refill()
402 if (pos >= ic->i_recv_ring.w_nr) { in rds_ib_recv_refill()
403 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", in rds_ib_recv_refill()
408 recv = &ic->i_recvs[pos]; in rds_ib_recv_refill()
409 ret = rds_ib_recv_refill_one(conn, recv, gfp); in rds_ib_recv_refill()
415 rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv, in rds_ib_recv_refill()
416 recv->r_ibinc, sg_page(&recv->r_frag->f_sg), in rds_ib_recv_refill()
417 (long)sg_dma_address(&recv->r_frag->f_sg)); in rds_ib_recv_refill()
420 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL); in rds_ib_recv_refill()
422 rds_ib_conn_error(conn, "recv post on " in rds_ib_recv_refill()
424 "reconnecting\n", &conn->c_faddr, in rds_ib_recv_refill()
437 /* We're doing flow control - update the window. */ in rds_ib_recv_refill()
438 if (ic->i_flowctl && posted) in rds_ib_recv_refill()
442 rds_ib_ring_unalloc(&ic->i_recv_ring, 1); in rds_ib_recv_refill()
449 * ring is completely empty. in rds_ib_recv_refill()
458 (can_wait && rds_ib_ring_low(&ic->i_recv_ring)) || in rds_ib_recv_refill()
459 rds_ib_ring_empty(&ic->i_recv_ring))) { in rds_ib_recv_refill()
460 queue_delayed_work(rds_wq, &conn->c_recv_w, 1); in rds_ib_recv_refill()
467 * We want to recycle several types of recv allocations, like incs and frags.
472 * We move it to an intermediate non-percpu list in a lockless manner, with some
476 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
487 chpfirst = __this_cpu_read(cache->percpu->first); in rds_ib_recv_cache_put()
493 __this_cpu_write(cache->percpu->first, new_item); in rds_ib_recv_cache_put()
494 __this_cpu_inc(cache->percpu->count); in rds_ib_recv_cache_put()
496 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) in rds_ib_recv_cache_put()
500 * Return our per-cpu first list to the cache's xfer by atomically in rds_ib_recv_cache_put()
501 * grabbing the current xfer list, appending it to our per-cpu list, in rds_ib_recv_cache_put()
503 * cache's xfer list as long as it's still empty. in rds_ib_recv_cache_put()
506 old = xchg(&cache->xfer, NULL); in rds_ib_recv_cache_put()
509 old = cmpxchg(&cache->xfer, NULL, chpfirst); in rds_ib_recv_cache_put()
513 __this_cpu_write(cache->percpu->first, NULL); in rds_ib_recv_cache_put()
514 __this_cpu_write(cache->percpu->count, 0); in rds_ib_recv_cache_put()
521 struct list_head *head = cache->ready; in rds_ib_recv_cache_get()
525 cache->ready = head->next; in rds_ib_recv_cache_get()
528 cache->ready = NULL; in rds_ib_recv_cache_get()
545 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); in rds_ib_inc_copy_to_user()
546 len = be32_to_cpu(inc->i_hdr.h_len); in rds_ib_inc_copy_to_user()
550 frag = list_entry(frag->f_item.next, in rds_ib_inc_copy_to_user()
555 RDS_FRAG_SIZE - frag_off); in rds_ib_inc_copy_to_user()
556 to_copy = min_t(unsigned long, to_copy, len - copied); in rds_ib_inc_copy_to_user()
560 ret = copy_page_to_iter(sg_page(&frag->f_sg), in rds_ib_inc_copy_to_user()
561 frag->f_sg.offset + frag_off, in rds_ib_inc_copy_to_user()
565 return -EFAULT; in rds_ib_inc_copy_to_user()
577 struct ib_send_wr *wr = &ic->i_ack_wr; in rds_ib_recv_init_ack()
578 struct ib_sge *sge = &ic->i_ack_sge; in rds_ib_recv_init_ack()
580 sge->addr = ic->i_ack_dma; in rds_ib_recv_init_ack()
581 sge->length = sizeof(struct rds_header); in rds_ib_recv_init_ack()
582 sge->lkey = ic->i_pd->local_dma_lkey; in rds_ib_recv_init_ack()
584 wr->sg_list = sge; in rds_ib_recv_init_ack()
585 wr->num_sge = 1; in rds_ib_recv_init_ack()
586 wr->opcode = IB_WR_SEND; in rds_ib_recv_init_ack()
587 wr->wr_id = RDS_IB_ACK_WR_ID; in rds_ib_recv_init_ack()
588 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; in rds_ib_recv_init_ack()
597 * potential issue if another HCA is available for fail-over.
607 * This is implemented by have a long-lived send_wr and sge which point to a
618 spin_lock_irqsave(&ic->i_ack_lock, flags); in rds_ib_set_ack()
619 ic->i_ack_next = seq; in rds_ib_set_ack()
621 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); in rds_ib_set_ack()
622 spin_unlock_irqrestore(&ic->i_ack_lock, flags); in rds_ib_set_ack()
630 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); in rds_ib_get_ack()
632 spin_lock_irqsave(&ic->i_ack_lock, flags); in rds_ib_get_ack()
633 seq = ic->i_ack_next; in rds_ib_get_ack()
634 spin_unlock_irqrestore(&ic->i_ack_lock, flags); in rds_ib_get_ack()
641 atomic64_set(&ic->i_ack_next, seq); in rds_ib_set_ack()
644 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); in rds_ib_set_ack()
650 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); in rds_ib_get_ack()
653 return atomic64_read(&ic->i_ack_next); in rds_ib_get_ack()
660 struct rds_header *hdr = ic->i_ack; in rds_ib_send_ack()
668 ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, ic->i_ack_dma, in rds_ib_send_ack()
671 hdr->h_ack = cpu_to_be64(seq); in rds_ib_send_ack()
672 hdr->h_credit = adv_credits; in rds_ib_send_ack()
674 ib_dma_sync_single_for_device(ic->rds_ibdev->dev, ic->i_ack_dma, in rds_ib_send_ack()
677 ic->i_ack_queued = jiffies; in rds_ib_send_ack()
679 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL); in rds_ib_send_ack()
684 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); in rds_ib_send_ack()
685 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); in rds_ib_send_ack()
689 rds_ib_conn_error(ic->conn, "sending ack failed\n"); in rds_ib_send_ack()
696 * 1. We call rds_ib_attempt_ack from the recv completion handler
697 * to send an ACK-only frame.
709 * - i_ack_flags, which keeps track of whether the ACK WR
711 * - i_ack_next, which is the last sequence number we received
729 * When we get here, we're called from the recv queue handler.
736 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) in rds_ib_attempt_ack()
739 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) { in rds_ib_attempt_ack()
747 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); in rds_ib_attempt_ack()
751 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); in rds_ib_attempt_ack()
761 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); in rds_ib_ack_send_complete()
771 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) in rds_ib_piggyb_ack()
778 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
798 if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES) in rds_ib_cong_recv()
801 map = conn->c_fcong; in rds_ib_cong_recv()
805 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); in rds_ib_cong_recv()
814 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); in rds_ib_cong_recv()
817 addr = kmap_atomic(sg_page(&frag->f_sg)); in rds_ib_cong_recv()
819 src = addr + frag->f_sg.offset + frag_off; in rds_ib_cong_recv()
820 dst = (void *)map->m_page_addrs[map_page] + map_off; in rds_ib_cong_recv()
839 frag = list_entry(frag->f_item.next, in rds_ib_cong_recv()
850 struct rds_ib_recv_work *recv, u32 data_len, in rds_ib_process_recv() argument
853 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_process_recv()
854 struct rds_ib_incoming *ibinc = ic->i_ibinc; in rds_ib_process_recv()
856 dma_addr_t dma_addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs]; in rds_ib_process_recv()
860 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv, in rds_ib_process_recv()
868 &conn->c_faddr); in rds_ib_process_recv()
871 data_len -= sizeof(struct rds_header); in rds_ib_process_recv()
873 ihdr = ic->i_recv_hdrs[recv - ic->i_recvs]; in rds_ib_process_recv()
875 ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, dma_addr, in rds_ib_process_recv()
880 "from %pI6c has corrupted header - " in rds_ib_process_recv()
882 &conn->c_faddr); in rds_ib_process_recv()
888 state->ack_recv = be64_to_cpu(ihdr->h_ack); in rds_ib_process_recv()
889 state->ack_recv_valid = 1; in rds_ib_process_recv()
892 if (ihdr->h_credit) in rds_ib_process_recv()
893 rds_ib_send_add_credits(conn, ihdr->h_credit); in rds_ib_process_recv()
895 if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) { in rds_ib_process_recv()
896 /* This is an ACK-only packet. The fact that it gets in rds_ib_process_recv()
905 * page ref ourselves. We can't just leave the page on the recv in rds_ib_process_recv()
906 * because that confuses the dma mapping of pages and each recv's use in rds_ib_process_recv()
911 rds_ib_frag_free(ic, recv->r_frag); in rds_ib_process_recv()
912 recv->r_frag = NULL; in rds_ib_process_recv()
923 ibinc = recv->r_ibinc; in rds_ib_process_recv()
924 recv->r_ibinc = NULL; in rds_ib_process_recv()
925 ic->i_ibinc = ibinc; in rds_ib_process_recv()
927 hdr = &ibinc->ii_inc.i_hdr; in rds_ib_process_recv()
928 ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] = in rds_ib_process_recv()
931 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len); in rds_ib_process_recv()
932 ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_START] = in rds_ib_process_recv()
936 ic->i_recv_data_rem, hdr->h_flags); in rds_ib_process_recv()
938 hdr = &ibinc->ii_inc.i_hdr; in rds_ib_process_recv()
941 if (hdr->h_sequence != ihdr->h_sequence || in rds_ib_process_recv()
942 hdr->h_len != ihdr->h_len || in rds_ib_process_recv()
943 hdr->h_sport != ihdr->h_sport || in rds_ib_process_recv()
944 hdr->h_dport != ihdr->h_dport) { in rds_ib_process_recv()
951 list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags); in rds_ib_process_recv()
952 recv->r_frag = NULL; in rds_ib_process_recv()
954 if (ic->i_recv_data_rem > RDS_FRAG_SIZE) in rds_ib_process_recv()
955 ic->i_recv_data_rem -= RDS_FRAG_SIZE; in rds_ib_process_recv()
957 ic->i_recv_data_rem = 0; in rds_ib_process_recv()
958 ic->i_ibinc = NULL; in rds_ib_process_recv()
960 if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) { in rds_ib_process_recv()
963 rds_recv_incoming(conn, &conn->c_faddr, &conn->c_laddr, in rds_ib_process_recv()
964 &ibinc->ii_inc, GFP_ATOMIC); in rds_ib_process_recv()
965 state->ack_next = be64_to_cpu(hdr->h_sequence); in rds_ib_process_recv()
966 state->ack_next_valid = 1; in rds_ib_process_recv()
972 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) { in rds_ib_process_recv()
974 state->ack_required = 1; in rds_ib_process_recv()
977 rds_inc_put(&ibinc->ii_inc); in rds_ib_process_recv()
980 ib_dma_sync_single_for_device(ic->rds_ibdev->dev, dma_addr, in rds_ib_process_recv()
988 struct rds_connection *conn = ic->conn; in rds_ib_recv_cqe_handler()
989 struct rds_ib_recv_work *recv; in rds_ib_recv_cqe_handler() local
992 (unsigned long long)wc->wr_id, wc->status, in rds_ib_recv_cqe_handler()
993 ib_wc_status_msg(wc->status), wc->byte_len, in rds_ib_recv_cqe_handler()
994 be32_to_cpu(wc->ex.imm_data)); in rds_ib_recv_cqe_handler()
997 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; in rds_ib_recv_cqe_handler()
998 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, in rds_ib_recv_cqe_handler()
1002 * to get a recv completion _before_ the rdmacm ESTABLISHED in rds_ib_recv_cqe_handler()
1005 if (wc->status == IB_WC_SUCCESS) { in rds_ib_recv_cqe_handler()
1006 rds_ib_process_recv(conn, recv, wc->byte_len, state); in rds_ib_recv_cqe_handler()
1010 …rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), vendor err 0x%x,… in rds_ib_recv_cqe_handler()
1011 &conn->c_laddr, &conn->c_faddr, in rds_ib_recv_cqe_handler()
1012 conn->c_tos, wc->status, in rds_ib_recv_cqe_handler()
1013 ib_wc_status_msg(wc->status), in rds_ib_recv_cqe_handler()
1014 wc->vendor_err); in rds_ib_recv_cqe_handler()
1024 if (recv->r_frag) { in rds_ib_recv_cqe_handler()
1025 rds_ib_frag_free(ic, recv->r_frag); in rds_ib_recv_cqe_handler()
1026 recv->r_frag = NULL; in rds_ib_recv_cqe_handler()
1028 rds_ib_ring_free(&ic->i_recv_ring, 1); in rds_ib_recv_cqe_handler()
1030 /* If we ever end up with a really empty receive ring, we're in rds_ib_recv_cqe_handler()
1033 if (rds_ib_ring_empty(&ic->i_recv_ring)) in rds_ib_recv_cqe_handler()
1036 if (rds_ib_ring_low(&ic->i_recv_ring)) { in rds_ib_recv_cqe_handler()
1044 struct rds_connection *conn = cp->cp_conn; in rds_ib_recv_path()
1045 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_recv_path()
1060 int ret = -ENOMEM; in rds_ib_recv_init()
1062 /* Default to 30% of all available RAM for recv memory */ in rds_ib_recv_init()