Lines Matching +full:no +full:- +full:memory +full:- +full:wc
1 // SPDX-License-Identifier: GPL-2.0-or-later
65 /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
77 /* No need to retry on Receiver Not Ready since SMBD manages credits */
82 * as defined in [MS-SMBD] 3.1.1.1
94 /* The maximum fragmented upper-layer payload receive size supported */
97 /* The maximum single-message size which can be received */
168 if (info->transport_status == SMBD_CONNECTED) { in smbd_disconnect_rdma_work()
169 info->transport_status = SMBD_DISCONNECTING; in smbd_disconnect_rdma_work()
170 rdma_disconnect(info->id); in smbd_disconnect_rdma_work()
176 queue_work(info->workqueue, &info->disconnect_work); in smbd_disconnect_rdma_connection()
183 struct smbd_connection *info = id->context; in smbd_conn_upcall()
186 event->event, event->status); in smbd_conn_upcall()
188 switch (event->event) { in smbd_conn_upcall()
191 info->ri_rc = 0; in smbd_conn_upcall()
192 complete(&info->ri_done); in smbd_conn_upcall()
196 info->ri_rc = -EHOSTUNREACH; in smbd_conn_upcall()
197 complete(&info->ri_done); in smbd_conn_upcall()
201 info->ri_rc = -ENETUNREACH; in smbd_conn_upcall()
202 complete(&info->ri_done); in smbd_conn_upcall()
206 log_rdma_event(INFO, "connected event=%d\n", event->event); in smbd_conn_upcall()
207 info->transport_status = SMBD_CONNECTED; in smbd_conn_upcall()
208 wake_up_interruptible(&info->conn_wait); in smbd_conn_upcall()
214 log_rdma_event(INFO, "connecting failed event=%d\n", event->event); in smbd_conn_upcall()
215 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
216 wake_up_interruptible(&info->conn_wait); in smbd_conn_upcall()
222 if (info->transport_status == SMBD_NEGOTIATE_FAILED) { in smbd_conn_upcall()
223 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
224 wake_up(&info->conn_wait); in smbd_conn_upcall()
228 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
229 wake_up_interruptible(&info->disconn_wait); in smbd_conn_upcall()
230 wake_up_interruptible(&info->wait_reassembly_queue); in smbd_conn_upcall()
231 wake_up_interruptible_all(&info->wait_send_queue); in smbd_conn_upcall()
248 ib_event_msg(event->event), event->device->name, info); in smbd_qp_async_error_upcall()
250 switch (event->event) { in smbd_qp_async_error_upcall()
263 return (void *)request->packet; in smbd_request_payload()
268 return (void *)response->packet; in smbd_response_payload()
272 static void send_done(struct ib_cq *cq, struct ib_wc *wc) in send_done() argument
276 container_of(wc->wr_cqe, struct smbd_request, cqe); in send_done()
278 log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n", in send_done()
279 request, wc->status); in send_done()
281 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { in send_done()
282 log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", in send_done()
283 wc->status, wc->opcode); in send_done()
284 smbd_disconnect_rdma_connection(request->info); in send_done()
287 for (i = 0; i < request->num_sge; i++) in send_done()
288 ib_dma_unmap_single(request->info->id->device, in send_done()
289 request->sge[i].addr, in send_done()
290 request->sge[i].length, in send_done()
293 if (atomic_dec_and_test(&request->info->send_pending)) in send_done()
294 wake_up(&request->info->wait_send_pending); in send_done()
296 wake_up(&request->info->wait_post_send); in send_done()
298 mempool_free(request, request->info->request_mempool); in send_done()
304 resp->min_version, resp->max_version, in dump_smbd_negotiate_resp()
305 resp->negotiated_version, resp->credits_requested, in dump_smbd_negotiate_resp()
306 resp->credits_granted, resp->status, in dump_smbd_negotiate_resp()
307 resp->max_readwrite_size, resp->preferred_send_size, in dump_smbd_negotiate_resp()
308 resp->max_receive_size, resp->max_fragmented_size); in dump_smbd_negotiate_resp()
312 * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
319 struct smbd_connection *info = response->info; in process_negotiation_response()
328 if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) { in process_negotiation_response()
330 le16_to_cpu(packet->negotiated_version)); in process_negotiation_response()
333 info->protocol = le16_to_cpu(packet->negotiated_version); in process_negotiation_response()
335 if (packet->credits_requested == 0) { in process_negotiation_response()
339 info->receive_credit_target = le16_to_cpu(packet->credits_requested); in process_negotiation_response()
341 if (packet->credits_granted == 0) { in process_negotiation_response()
345 atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted)); in process_negotiation_response()
347 atomic_set(&info->receive_credits, 0); in process_negotiation_response()
349 if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) { in process_negotiation_response()
351 le32_to_cpu(packet->preferred_send_size)); in process_negotiation_response()
354 info->max_receive_size = le32_to_cpu(packet->preferred_send_size); in process_negotiation_response()
356 if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) { in process_negotiation_response()
358 le32_to_cpu(packet->max_receive_size)); in process_negotiation_response()
361 info->max_send_size = min_t(int, info->max_send_size, in process_negotiation_response()
362 le32_to_cpu(packet->max_receive_size)); in process_negotiation_response()
364 if (le32_to_cpu(packet->max_fragmented_size) < in process_negotiation_response()
367 le32_to_cpu(packet->max_fragmented_size)); in process_negotiation_response()
370 info->max_fragmented_send_size = in process_negotiation_response()
371 le32_to_cpu(packet->max_fragmented_size); in process_negotiation_response()
372 info->rdma_readwrite_threshold = in process_negotiation_response()
373 rdma_readwrite_threshold > info->max_fragmented_send_size ? in process_negotiation_response()
374 info->max_fragmented_send_size : in process_negotiation_response()
378 info->max_readwrite_size = min_t(u32, in process_negotiation_response()
379 le32_to_cpu(packet->max_readwrite_size), in process_negotiation_response()
380 info->max_frmr_depth * PAGE_SIZE); in process_negotiation_response()
381 info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE; in process_negotiation_response()
396 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_credits()
397 wake_up(&info->wait_receive_queues); in smbd_post_send_credits()
401 if (info->receive_credit_target > in smbd_post_send_credits()
402 atomic_read(&info->receive_credits)) { in smbd_post_send_credits()
417 response->type = SMBD_TRANSFER_DATA; in smbd_post_send_credits()
418 response->first_segment = false; in smbd_post_send_credits()
431 spin_lock(&info->lock_new_credits_offered); in smbd_post_send_credits()
432 info->new_credits_offered += ret; in smbd_post_send_credits()
433 spin_unlock(&info->lock_new_credits_offered); in smbd_post_send_credits()
435 /* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */ in smbd_post_send_credits()
436 info->send_immediate = true; in smbd_post_send_credits()
437 if (atomic_read(&info->receive_credits) < in smbd_post_send_credits()
438 info->receive_credit_target - 1) { in smbd_post_send_credits()
439 if (info->keep_alive_requested == KEEP_ALIVE_PENDING || in smbd_post_send_credits()
440 info->send_immediate) { in smbd_post_send_credits()
448 static void recv_done(struct ib_cq *cq, struct ib_wc *wc) in recv_done() argument
452 container_of(wc->wr_cqe, struct smbd_response, cqe); in recv_done()
453 struct smbd_connection *info = response->info; in recv_done()
456 log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n", in recv_done()
457 response, response->type, wc->status, wc->opcode, in recv_done()
458 wc->byte_len, wc->pkey_index); in recv_done()
460 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { in recv_done()
461 log_rdma_recv(INFO, "wc->status=%d opcode=%d\n", in recv_done()
462 wc->status, wc->opcode); in recv_done()
468 wc->qp->device, in recv_done()
469 response->sge.addr, in recv_done()
470 response->sge.length, in recv_done()
473 switch (response->type) { in recv_done()
477 info->full_packet_received = true; in recv_done()
478 info->negotiate_done = in recv_done()
479 process_negotiation_response(response, wc->byte_len); in recv_done()
480 complete(&info->negotiate_completion); in recv_done()
486 data_length = le32_to_cpu(data_transfer->data_length); in recv_done()
493 if (info->full_packet_received) in recv_done()
494 response->first_segment = true; in recv_done()
496 if (le32_to_cpu(data_transfer->remaining_data_length)) in recv_done()
497 info->full_packet_received = false; in recv_done()
499 info->full_packet_received = true; in recv_done()
509 wake_up_interruptible(&info->wait_reassembly_queue); in recv_done()
511 atomic_dec(&info->receive_credits); in recv_done()
512 info->receive_credit_target = in recv_done()
513 le16_to_cpu(data_transfer->credits_requested); in recv_done()
514 if (le16_to_cpu(data_transfer->credits_granted)) { in recv_done()
515 atomic_add(le16_to_cpu(data_transfer->credits_granted), in recv_done()
516 &info->send_credits); in recv_done()
521 wake_up_interruptible(&info->wait_send_queue); in recv_done()
525 le16_to_cpu(data_transfer->flags), in recv_done()
526 le32_to_cpu(data_transfer->data_offset), in recv_done()
527 le32_to_cpu(data_transfer->data_length), in recv_done()
528 le32_to_cpu(data_transfer->remaining_data_length)); in recv_done()
531 info->keep_alive_requested = KEEP_ALIVE_NONE; in recv_done()
532 if (le16_to_cpu(data_transfer->flags) & in recv_done()
534 info->keep_alive_requested = KEEP_ALIVE_PENDING; in recv_done()
541 "unexpected response type=%d\n", response->type); in recv_done()
564 if (dstaddr->sa_family == AF_INET6) in smbd_create_id()
565 sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port; in smbd_create_id()
567 sport = &((struct sockaddr_in *)dstaddr)->sin_port; in smbd_create_id()
571 init_completion(&info->ri_done); in smbd_create_id()
572 info->ri_rc = -ETIMEDOUT; in smbd_create_id()
581 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); in smbd_create_id()
582 /* e.g. if interrupted returns -ERESTARTSYS */ in smbd_create_id()
587 rc = info->ri_rc; in smbd_create_id()
593 info->ri_rc = -ETIMEDOUT; in smbd_create_id()
600 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); in smbd_create_id()
601 /* e.g. if interrupted returns -ERESTARTSYS */ in smbd_create_id()
606 rc = info->ri_rc; in smbd_create_id()
626 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) in frwr_is_supported()
628 if (attrs->max_fast_reg_page_list_len == 0) in frwr_is_supported()
639 info->id = smbd_create_id(info, dstaddr, port); in smbd_ia_open()
640 if (IS_ERR(info->id)) { in smbd_ia_open()
641 rc = PTR_ERR(info->id); in smbd_ia_open()
645 if (!frwr_is_supported(&info->id->device->attrs)) { in smbd_ia_open()
648 info->id->device->attrs.device_cap_flags, in smbd_ia_open()
649 info->id->device->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
650 rc = -EPROTONOSUPPORT; in smbd_ia_open()
653 info->max_frmr_depth = min_t(int, in smbd_ia_open()
655 info->id->device->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
656 info->mr_type = IB_MR_TYPE_MEM_REG; in smbd_ia_open()
657 if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) in smbd_ia_open()
658 info->mr_type = IB_MR_TYPE_SG_GAPS; in smbd_ia_open()
660 info->pd = ib_alloc_pd(info->id->device, 0); in smbd_ia_open()
661 if (IS_ERR(info->pd)) { in smbd_ia_open()
662 rc = PTR_ERR(info->pd); in smbd_ia_open()
670 rdma_destroy_id(info->id); in smbd_ia_open()
671 info->id = NULL; in smbd_ia_open()
679 * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
686 int rc = -ENOMEM; in smbd_post_send_negotiate_req()
690 request = mempool_alloc(info->request_mempool, GFP_KERNEL); in smbd_post_send_negotiate_req()
694 request->info = info; in smbd_post_send_negotiate_req()
697 packet->min_version = cpu_to_le16(SMBD_V1); in smbd_post_send_negotiate_req()
698 packet->max_version = cpu_to_le16(SMBD_V1); in smbd_post_send_negotiate_req()
699 packet->reserved = 0; in smbd_post_send_negotiate_req()
700 packet->credits_requested = cpu_to_le16(info->send_credit_target); in smbd_post_send_negotiate_req()
701 packet->preferred_send_size = cpu_to_le32(info->max_send_size); in smbd_post_send_negotiate_req()
702 packet->max_receive_size = cpu_to_le32(info->max_receive_size); in smbd_post_send_negotiate_req()
703 packet->max_fragmented_size = in smbd_post_send_negotiate_req()
704 cpu_to_le32(info->max_fragmented_recv_size); in smbd_post_send_negotiate_req()
706 request->num_sge = 1; in smbd_post_send_negotiate_req()
707 request->sge[0].addr = ib_dma_map_single( in smbd_post_send_negotiate_req()
708 info->id->device, (void *)packet, in smbd_post_send_negotiate_req()
710 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { in smbd_post_send_negotiate_req()
711 rc = -EIO; in smbd_post_send_negotiate_req()
715 request->sge[0].length = sizeof(*packet); in smbd_post_send_negotiate_req()
716 request->sge[0].lkey = info->pd->local_dma_lkey; in smbd_post_send_negotiate_req()
719 info->id->device, request->sge[0].addr, in smbd_post_send_negotiate_req()
720 request->sge[0].length, DMA_TO_DEVICE); in smbd_post_send_negotiate_req()
722 request->cqe.done = send_done; in smbd_post_send_negotiate_req()
725 send_wr.wr_cqe = &request->cqe; in smbd_post_send_negotiate_req()
726 send_wr.sg_list = request->sge; in smbd_post_send_negotiate_req()
727 send_wr.num_sge = request->num_sge; in smbd_post_send_negotiate_req()
732 request->sge[0].addr, in smbd_post_send_negotiate_req()
733 request->sge[0].length, request->sge[0].lkey); in smbd_post_send_negotiate_req()
735 atomic_inc(&info->send_pending); in smbd_post_send_negotiate_req()
736 rc = ib_post_send(info->id->qp, &send_wr, NULL); in smbd_post_send_negotiate_req()
742 atomic_dec(&info->send_pending); in smbd_post_send_negotiate_req()
743 ib_dma_unmap_single(info->id->device, request->sge[0].addr, in smbd_post_send_negotiate_req()
744 request->sge[0].length, DMA_TO_DEVICE); in smbd_post_send_negotiate_req()
749 mempool_free(request, info->request_mempool); in smbd_post_send_negotiate_req()
755 * This implements [MS-SMBD] 3.1.5.9
765 spin_lock(&info->lock_new_credits_offered); in manage_credits_prior_sending()
766 new_credits = info->new_credits_offered; in manage_credits_prior_sending()
767 info->new_credits_offered = 0; in manage_credits_prior_sending()
768 spin_unlock(&info->lock_new_credits_offered); in manage_credits_prior_sending()
784 if (info->keep_alive_requested == KEEP_ALIVE_PENDING) { in manage_keep_alive_before_sending()
785 info->keep_alive_requested = KEEP_ALIVE_SENT; in manage_keep_alive_before_sending()
798 for (i = 0; i < request->num_sge; i++) { in smbd_post_send()
801 i, request->sge[i].addr, request->sge[i].length); in smbd_post_send()
803 info->id->device, in smbd_post_send()
804 request->sge[i].addr, in smbd_post_send()
805 request->sge[i].length, in smbd_post_send()
809 request->cqe.done = send_done; in smbd_post_send()
812 send_wr.wr_cqe = &request->cqe; in smbd_post_send()
813 send_wr.sg_list = request->sge; in smbd_post_send()
814 send_wr.num_sge = request->num_sge; in smbd_post_send()
818 rc = ib_post_send(info->id->qp, &send_wr, NULL); in smbd_post_send()
822 rc = -EAGAIN; in smbd_post_send()
825 mod_delayed_work(info->workqueue, &info->idle_timer_work, in smbd_post_send()
826 info->keep_alive_interval*HZ); in smbd_post_send()
844 rc = wait_event_interruptible(info->wait_send_queue, in smbd_post_send_iter()
845 atomic_read(&info->send_credits) > 0 || in smbd_post_send_iter()
846 info->transport_status != SMBD_CONNECTED); in smbd_post_send_iter()
850 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_iter()
852 rc = -EAGAIN; in smbd_post_send_iter()
855 if (unlikely(atomic_dec_return(&info->send_credits) < 0)) { in smbd_post_send_iter()
856 atomic_inc(&info->send_credits); in smbd_post_send_iter()
861 wait_event(info->wait_post_send, in smbd_post_send_iter()
862 atomic_read(&info->send_pending) < info->send_credit_target || in smbd_post_send_iter()
863 info->transport_status != SMBD_CONNECTED); in smbd_post_send_iter()
865 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_iter()
867 rc = -EAGAIN; in smbd_post_send_iter()
871 if (unlikely(atomic_inc_return(&info->send_pending) > in smbd_post_send_iter()
872 info->send_credit_target)) { in smbd_post_send_iter()
873 atomic_dec(&info->send_pending); in smbd_post_send_iter()
877 request = mempool_alloc(info->request_mempool, GFP_KERNEL); in smbd_post_send_iter()
879 rc = -ENOMEM; in smbd_post_send_iter()
883 request->info = info; in smbd_post_send_iter()
884 memset(request->sge, 0, sizeof(request->sge)); in smbd_post_send_iter()
891 .sge = request->sge, in smbd_post_send_iter()
892 .device = info->id->device, in smbd_post_send_iter()
893 .local_dma_lkey = info->pd->local_dma_lkey, in smbd_post_send_iter()
902 request->num_sge = extract.nr_sge; in smbd_post_send_iter()
903 *_remaining_data_length -= data_length; in smbd_post_send_iter()
906 request->num_sge = 1; in smbd_post_send_iter()
911 packet->credits_requested = cpu_to_le16(info->send_credit_target); in smbd_post_send_iter()
914 atomic_add(new_credits, &info->receive_credits); in smbd_post_send_iter()
915 packet->credits_granted = cpu_to_le16(new_credits); in smbd_post_send_iter()
917 info->send_immediate = false; in smbd_post_send_iter()
919 packet->flags = 0; in smbd_post_send_iter()
921 packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED); in smbd_post_send_iter()
923 packet->reserved = 0; in smbd_post_send_iter()
925 packet->data_offset = 0; in smbd_post_send_iter()
927 packet->data_offset = cpu_to_le32(24); in smbd_post_send_iter()
928 packet->data_length = cpu_to_le32(data_length); in smbd_post_send_iter()
929 packet->remaining_data_length = cpu_to_le32(*_remaining_data_length); in smbd_post_send_iter()
930 packet->padding = 0; in smbd_post_send_iter()
933 le16_to_cpu(packet->credits_requested), in smbd_post_send_iter()
934 le16_to_cpu(packet->credits_granted), in smbd_post_send_iter()
935 le32_to_cpu(packet->data_offset), in smbd_post_send_iter()
936 le32_to_cpu(packet->data_length), in smbd_post_send_iter()
937 le32_to_cpu(packet->remaining_data_length)); in smbd_post_send_iter()
945 request->sge[0].addr = ib_dma_map_single(info->id->device, in smbd_post_send_iter()
949 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { in smbd_post_send_iter()
950 rc = -EIO; in smbd_post_send_iter()
951 request->sge[0].addr = 0; in smbd_post_send_iter()
955 request->sge[0].length = header_length; in smbd_post_send_iter()
956 request->sge[0].lkey = info->pd->local_dma_lkey; in smbd_post_send_iter()
963 for (i = 0; i < request->num_sge; i++) in smbd_post_send_iter()
964 if (request->sge[i].addr) in smbd_post_send_iter()
965 ib_dma_unmap_single(info->id->device, in smbd_post_send_iter()
966 request->sge[i].addr, in smbd_post_send_iter()
967 request->sge[i].length, in smbd_post_send_iter()
969 mempool_free(request, info->request_mempool); in smbd_post_send_iter()
972 spin_lock(&info->lock_new_credits_offered); in smbd_post_send_iter()
973 info->new_credits_offered += new_credits; in smbd_post_send_iter()
974 spin_unlock(&info->lock_new_credits_offered); in smbd_post_send_iter()
975 atomic_sub(new_credits, &info->receive_credits); in smbd_post_send_iter()
978 if (atomic_dec_and_test(&info->send_pending)) in smbd_post_send_iter()
979 wake_up(&info->wait_send_pending); in smbd_post_send_iter()
983 atomic_inc(&info->send_credits); in smbd_post_send_iter()
992 * while there is no upper layer payload to send at the time
998 info->count_send_empty++; in smbd_post_send_empty()
1011 int rc = -EIO; in smbd_post_recv()
1013 response->sge.addr = ib_dma_map_single( in smbd_post_recv()
1014 info->id->device, response->packet, in smbd_post_recv()
1015 info->max_receive_size, DMA_FROM_DEVICE); in smbd_post_recv()
1016 if (ib_dma_mapping_error(info->id->device, response->sge.addr)) in smbd_post_recv()
1019 response->sge.length = info->max_receive_size; in smbd_post_recv()
1020 response->sge.lkey = info->pd->local_dma_lkey; in smbd_post_recv()
1022 response->cqe.done = recv_done; in smbd_post_recv()
1024 recv_wr.wr_cqe = &response->cqe; in smbd_post_recv()
1026 recv_wr.sg_list = &response->sge; in smbd_post_recv()
1029 rc = ib_post_recv(info->id->qp, &recv_wr, NULL); in smbd_post_recv()
1031 ib_dma_unmap_single(info->id->device, response->sge.addr, in smbd_post_recv()
1032 response->sge.length, DMA_FROM_DEVICE); in smbd_post_recv()
1040 /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
1046 response->type = SMBD_NEGOTIATE_RESP; in smbd_negotiate()
1049 rc, response->sge.addr, in smbd_negotiate()
1050 response->sge.length, response->sge.lkey); in smbd_negotiate()
1054 init_completion(&info->negotiate_completion); in smbd_negotiate()
1055 info->negotiate_done = false; in smbd_negotiate()
1061 &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ); in smbd_negotiate()
1064 if (info->negotiate_done) in smbd_negotiate()
1068 rc = -ETIMEDOUT; in smbd_negotiate()
1069 else if (rc == -ERESTARTSYS) in smbd_negotiate()
1070 rc = -EINTR; in smbd_negotiate()
1072 rc = -ENOTCONN; in smbd_negotiate()
1080 spin_lock(&info->empty_packet_queue_lock); in put_empty_packet()
1081 list_add_tail(&response->list, &info->empty_packet_queue); in put_empty_packet()
1082 info->count_empty_packet_queue++; in put_empty_packet()
1083 spin_unlock(&info->empty_packet_queue_lock); in put_empty_packet()
1085 queue_work(info->workqueue, &info->post_send_credits_work); in put_empty_packet()
1089 * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
1103 spin_lock(&info->reassembly_queue_lock); in enqueue_reassembly()
1104 list_add_tail(&response->list, &info->reassembly_queue); in enqueue_reassembly()
1105 info->reassembly_queue_length++; in enqueue_reassembly()
1113 info->reassembly_data_length += data_length; in enqueue_reassembly()
1114 spin_unlock(&info->reassembly_queue_lock); in enqueue_reassembly()
1115 info->count_reassembly_queue++; in enqueue_reassembly()
1116 info->count_enqueue_reassembly_queue++; in enqueue_reassembly()
1128 if (!list_empty(&info->reassembly_queue)) { in _get_first_reassembly()
1130 &info->reassembly_queue, in _get_first_reassembly()
1142 spin_lock_irqsave(&info->empty_packet_queue_lock, flags); in get_empty_queue_buffer()
1143 if (!list_empty(&info->empty_packet_queue)) { in get_empty_queue_buffer()
1145 &info->empty_packet_queue, in get_empty_queue_buffer()
1147 list_del(&ret->list); in get_empty_queue_buffer()
1148 info->count_empty_packet_queue--; in get_empty_queue_buffer()
1150 spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags); in get_empty_queue_buffer()
1158 * pre-allocated in advance.
1166 spin_lock_irqsave(&info->receive_queue_lock, flags); in get_receive_buffer()
1167 if (!list_empty(&info->receive_queue)) { in get_receive_buffer()
1169 &info->receive_queue, in get_receive_buffer()
1171 list_del(&ret->list); in get_receive_buffer()
1172 info->count_receive_queue--; in get_receive_buffer()
1173 info->count_get_receive_buffer++; in get_receive_buffer()
1175 spin_unlock_irqrestore(&info->receive_queue_lock, flags); in get_receive_buffer()
1191 ib_dma_unmap_single(info->id->device, response->sge.addr, in put_receive_buffer()
1192 response->sge.length, DMA_FROM_DEVICE); in put_receive_buffer()
1194 spin_lock_irqsave(&info->receive_queue_lock, flags); in put_receive_buffer()
1195 list_add_tail(&response->list, &info->receive_queue); in put_receive_buffer()
1196 info->count_receive_queue++; in put_receive_buffer()
1197 info->count_put_receive_buffer++; in put_receive_buffer()
1198 spin_unlock_irqrestore(&info->receive_queue_lock, flags); in put_receive_buffer()
1200 queue_work(info->workqueue, &info->post_send_credits_work); in put_receive_buffer()
1209 INIT_LIST_HEAD(&info->reassembly_queue); in allocate_receive_buffers()
1210 spin_lock_init(&info->reassembly_queue_lock); in allocate_receive_buffers()
1211 info->reassembly_data_length = 0; in allocate_receive_buffers()
1212 info->reassembly_queue_length = 0; in allocate_receive_buffers()
1214 INIT_LIST_HEAD(&info->receive_queue); in allocate_receive_buffers()
1215 spin_lock_init(&info->receive_queue_lock); in allocate_receive_buffers()
1216 info->count_receive_queue = 0; in allocate_receive_buffers()
1218 INIT_LIST_HEAD(&info->empty_packet_queue); in allocate_receive_buffers()
1219 spin_lock_init(&info->empty_packet_queue_lock); in allocate_receive_buffers()
1220 info->count_empty_packet_queue = 0; in allocate_receive_buffers()
1222 init_waitqueue_head(&info->wait_receive_queues); in allocate_receive_buffers()
1225 response = mempool_alloc(info->response_mempool, GFP_KERNEL); in allocate_receive_buffers()
1229 response->info = info; in allocate_receive_buffers()
1230 list_add_tail(&response->list, &info->receive_queue); in allocate_receive_buffers()
1231 info->count_receive_queue++; in allocate_receive_buffers()
1237 while (!list_empty(&info->receive_queue)) { in allocate_receive_buffers()
1239 &info->receive_queue, in allocate_receive_buffers()
1241 list_del(&response->list); in allocate_receive_buffers()
1242 info->count_receive_queue--; in allocate_receive_buffers()
1244 mempool_free(response, info->response_mempool); in allocate_receive_buffers()
1246 return -ENOMEM; in allocate_receive_buffers()
1254 mempool_free(response, info->response_mempool); in destroy_receive_buffers()
1257 mempool_free(response, info->response_mempool); in destroy_receive_buffers()
1260 /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
1267 if (info->keep_alive_requested != KEEP_ALIVE_NONE) { in idle_connection_timer()
1269 "error status info->keep_alive_requested=%d\n", in idle_connection_timer()
1270 info->keep_alive_requested); in idle_connection_timer()
1279 queue_delayed_work(info->workqueue, &info->idle_timer_work, in idle_connection_timer()
1280 info->keep_alive_interval*HZ); in idle_connection_timer()
1284 * Destroy the transport and related RDMA and memory resources
1290 struct smbd_connection *info = server->smbd_conn; in smbd_destroy()
1300 if (info->transport_status != SMBD_DISCONNECTED) { in smbd_destroy()
1301 rdma_disconnect(server->smbd_conn->id); in smbd_destroy()
1304 info->disconn_wait, in smbd_destroy()
1305 info->transport_status == SMBD_DISCONNECTED); in smbd_destroy()
1309 ib_drain_qp(info->id->qp); in smbd_destroy()
1310 rdma_destroy_qp(info->id); in smbd_destroy()
1313 cancel_delayed_work_sync(&info->idle_timer_work); in smbd_destroy()
1316 wait_event(info->wait_send_pending, in smbd_destroy()
1317 atomic_read(&info->send_pending) == 0); in smbd_destroy()
1322 spin_lock_irqsave(&info->reassembly_queue_lock, flags); in smbd_destroy()
1325 list_del(&response->list); in smbd_destroy()
1327 &info->reassembly_queue_lock, flags); in smbd_destroy()
1331 &info->reassembly_queue_lock, flags); in smbd_destroy()
1333 info->reassembly_data_length = 0; in smbd_destroy()
1336 wait_event(info->wait_receive_queues, in smbd_destroy()
1337 info->count_receive_queue + info->count_empty_packet_queue in smbd_destroy()
1338 == info->receive_credit_max); in smbd_destroy()
1342 * For performance reasons, memory registration and deregistration in smbd_destroy()
1344 * blocked on transport srv_mutex while holding memory registration. in smbd_destroy()
1346 * path when sending data, and then release memory registartions. in smbd_destroy()
1349 wake_up_interruptible_all(&info->wait_mr); in smbd_destroy()
1350 while (atomic_read(&info->mr_used_count)) { in smbd_destroy()
1357 ib_free_cq(info->send_cq); in smbd_destroy()
1358 ib_free_cq(info->recv_cq); in smbd_destroy()
1359 ib_dealloc_pd(info->pd); in smbd_destroy()
1360 rdma_destroy_id(info->id); in smbd_destroy()
1363 mempool_destroy(info->request_mempool); in smbd_destroy()
1364 kmem_cache_destroy(info->request_cache); in smbd_destroy()
1366 mempool_destroy(info->response_mempool); in smbd_destroy()
1367 kmem_cache_destroy(info->response_cache); in smbd_destroy()
1369 info->transport_status = SMBD_DESTROYED; in smbd_destroy()
1371 destroy_workqueue(info->workqueue); in smbd_destroy()
1374 server->smbd_conn = NULL; in smbd_destroy()
1385 if (!server->smbd_conn) { in smbd_reconnect()
1394 if (server->smbd_conn->transport_status == SMBD_CONNECTED) { in smbd_reconnect()
1401 server->smbd_conn = smbd_get_connection( in smbd_reconnect()
1402 server, (struct sockaddr *) &server->dstaddr); in smbd_reconnect()
1404 if (server->smbd_conn) { in smbd_reconnect()
1405 cifs_dbg(VFS, "RDMA transport re-established\n"); in smbd_reconnect()
1406 trace_smb3_smbd_connect_done(server->hostname, server->conn_id, &server->dstaddr); in smbd_reconnect()
1409 trace_smb3_smbd_connect_err(server->hostname, server->conn_id, &server->dstaddr); in smbd_reconnect()
1410 return -ENOENT; in smbd_reconnect()
1416 destroy_workqueue(info->workqueue); in destroy_caches_and_workqueue()
1417 mempool_destroy(info->response_mempool); in destroy_caches_and_workqueue()
1418 kmem_cache_destroy(info->response_cache); in destroy_caches_and_workqueue()
1419 mempool_destroy(info->request_mempool); in destroy_caches_and_workqueue()
1420 kmem_cache_destroy(info->request_cache); in destroy_caches_and_workqueue()
1430 info->request_cache = in allocate_caches_and_workqueue()
1436 if (!info->request_cache) in allocate_caches_and_workqueue()
1437 return -ENOMEM; in allocate_caches_and_workqueue()
1439 info->request_mempool = in allocate_caches_and_workqueue()
1440 mempool_create(info->send_credit_target, mempool_alloc_slab, in allocate_caches_and_workqueue()
1441 mempool_free_slab, info->request_cache); in allocate_caches_and_workqueue()
1442 if (!info->request_mempool) in allocate_caches_and_workqueue()
1446 info->response_cache = in allocate_caches_and_workqueue()
1450 info->max_receive_size, in allocate_caches_and_workqueue()
1452 if (!info->response_cache) in allocate_caches_and_workqueue()
1455 info->response_mempool = in allocate_caches_and_workqueue()
1456 mempool_create(info->receive_credit_max, mempool_alloc_slab, in allocate_caches_and_workqueue()
1457 mempool_free_slab, info->response_cache); in allocate_caches_and_workqueue()
1458 if (!info->response_mempool) in allocate_caches_and_workqueue()
1462 info->workqueue = create_workqueue(name); in allocate_caches_and_workqueue()
1463 if (!info->workqueue) in allocate_caches_and_workqueue()
1466 rc = allocate_receive_buffers(info, info->receive_credit_max); in allocate_caches_and_workqueue()
1475 destroy_workqueue(info->workqueue); in allocate_caches_and_workqueue()
1477 mempool_destroy(info->response_mempool); in allocate_caches_and_workqueue()
1479 kmem_cache_destroy(info->response_cache); in allocate_caches_and_workqueue()
1481 mempool_destroy(info->request_mempool); in allocate_caches_and_workqueue()
1483 kmem_cache_destroy(info->request_cache); in allocate_caches_and_workqueue()
1484 return -ENOMEM; in allocate_caches_and_workqueue()
1503 info->transport_status = SMBD_CONNECTING; in _smbd_get_connection()
1510 if (smbd_send_credit_target > info->id->device->attrs.max_cqe || in _smbd_get_connection()
1511 smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { in _smbd_get_connection()
1514 info->id->device->attrs.max_cqe, in _smbd_get_connection()
1515 info->id->device->attrs.max_qp_wr); in _smbd_get_connection()
1519 if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || in _smbd_get_connection()
1520 smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { in _smbd_get_connection()
1523 info->id->device->attrs.max_cqe, in _smbd_get_connection()
1524 info->id->device->attrs.max_qp_wr); in _smbd_get_connection()
1528 info->receive_credit_max = smbd_receive_credit_max; in _smbd_get_connection()
1529 info->send_credit_target = smbd_send_credit_target; in _smbd_get_connection()
1530 info->max_send_size = smbd_max_send_size; in _smbd_get_connection()
1531 info->max_fragmented_recv_size = smbd_max_fragmented_recv_size; in _smbd_get_connection()
1532 info->max_receive_size = smbd_max_receive_size; in _smbd_get_connection()
1533 info->keep_alive_interval = smbd_keep_alive_interval; in _smbd_get_connection()
1535 if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE || in _smbd_get_connection()
1536 info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) { in _smbd_get_connection()
1540 info->id->device->name, in _smbd_get_connection()
1541 info->id->device->attrs.max_send_sge, in _smbd_get_connection()
1542 info->id->device->attrs.max_recv_sge); in _smbd_get_connection()
1546 info->send_cq = NULL; in _smbd_get_connection()
1547 info->recv_cq = NULL; in _smbd_get_connection()
1548 info->send_cq = in _smbd_get_connection()
1549 ib_alloc_cq_any(info->id->device, info, in _smbd_get_connection()
1550 info->send_credit_target, IB_POLL_SOFTIRQ); in _smbd_get_connection()
1551 if (IS_ERR(info->send_cq)) { in _smbd_get_connection()
1552 info->send_cq = NULL; in _smbd_get_connection()
1556 info->recv_cq = in _smbd_get_connection()
1557 ib_alloc_cq_any(info->id->device, info, in _smbd_get_connection()
1558 info->receive_credit_max, IB_POLL_SOFTIRQ); in _smbd_get_connection()
1559 if (IS_ERR(info->recv_cq)) { in _smbd_get_connection()
1560 info->recv_cq = NULL; in _smbd_get_connection()
1567 qp_attr.cap.max_send_wr = info->send_credit_target; in _smbd_get_connection()
1568 qp_attr.cap.max_recv_wr = info->receive_credit_max; in _smbd_get_connection()
1574 qp_attr.send_cq = info->send_cq; in _smbd_get_connection()
1575 qp_attr.recv_cq = info->recv_cq; in _smbd_get_connection()
1578 rc = rdma_create_qp(info->id, info->pd, &qp_attr); in _smbd_get_connection()
1588 info->id->device->attrs.max_qp_rd_atom in _smbd_get_connection()
1590 info->id->device->attrs.max_qp_rd_atom : in _smbd_get_connection()
1592 info->responder_resources = conn_param.responder_resources; in _smbd_get_connection()
1594 info->responder_resources); in _smbd_get_connection()
1597 info->id->device->ops.get_port_immutable( in _smbd_get_connection()
1598 info->id->device, info->id->port_num, &port_immutable); in _smbd_get_connection()
1600 ird_ord_hdr[0] = info->responder_resources; in _smbd_get_connection()
1614 &addr_in->sin_addr, port); in _smbd_get_connection()
1616 init_waitqueue_head(&info->conn_wait); in _smbd_get_connection()
1617 init_waitqueue_head(&info->disconn_wait); in _smbd_get_connection()
1618 init_waitqueue_head(&info->wait_reassembly_queue); in _smbd_get_connection()
1619 rc = rdma_connect(info->id, &conn_param); in _smbd_get_connection()
1626 info->conn_wait, info->transport_status != SMBD_CONNECTING); in _smbd_get_connection()
1628 if (info->transport_status != SMBD_CONNECTED) { in _smbd_get_connection()
1641 init_waitqueue_head(&info->wait_send_queue); in _smbd_get_connection()
1642 INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer); in _smbd_get_connection()
1643 queue_delayed_work(info->workqueue, &info->idle_timer_work, in _smbd_get_connection()
1644 info->keep_alive_interval*HZ); in _smbd_get_connection()
1646 init_waitqueue_head(&info->wait_send_pending); in _smbd_get_connection()
1647 atomic_set(&info->send_pending, 0); in _smbd_get_connection()
1649 init_waitqueue_head(&info->wait_post_send); in _smbd_get_connection()
1651 INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work); in _smbd_get_connection()
1652 INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits); in _smbd_get_connection()
1653 info->new_credits_offered = 0; in _smbd_get_connection()
1654 spin_lock_init(&info->lock_new_credits_offered); in _smbd_get_connection()
1664 log_rdma_mr(ERR, "memory registration allocation failed\n"); in _smbd_get_connection()
1672 server->smbd_conn = info; in _smbd_get_connection()
1677 cancel_delayed_work_sync(&info->idle_timer_work); in _smbd_get_connection()
1679 info->transport_status = SMBD_NEGOTIATE_FAILED; in _smbd_get_connection()
1680 init_waitqueue_head(&info->conn_wait); in _smbd_get_connection()
1681 rdma_disconnect(info->id); in _smbd_get_connection()
1682 wait_event(info->conn_wait, in _smbd_get_connection()
1683 info->transport_status == SMBD_DISCONNECTED); in _smbd_get_connection()
1687 rdma_destroy_qp(info->id); in _smbd_get_connection()
1691 if (info->send_cq) in _smbd_get_connection()
1692 ib_free_cq(info->send_cq); in _smbd_get_connection()
1693 if (info->recv_cq) in _smbd_get_connection()
1694 ib_free_cq(info->recv_cq); in _smbd_get_connection()
1697 ib_dealloc_pd(info->pd); in _smbd_get_connection()
1698 rdma_destroy_id(info->id); in _smbd_get_connection()
1746 * No need to hold the reassembly queue lock all the time as we are in smbd_recv_buf()
1750 log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size, in smbd_recv_buf()
1751 info->reassembly_data_length); in smbd_recv_buf()
1752 if (info->reassembly_data_length >= size) { in smbd_recv_buf()
1764 queue_length = info->reassembly_queue_length; in smbd_recv_buf()
1767 offset = info->first_entry_offset; in smbd_recv_buf()
1771 data_length = le32_to_cpu(data_transfer->data_length); in smbd_recv_buf()
1774 data_transfer->remaining_data_length); in smbd_recv_buf()
1775 data_offset = le32_to_cpu(data_transfer->data_offset); in smbd_recv_buf()
1785 if (response->first_segment && size == 4) { in smbd_recv_buf()
1790 response->first_segment = false; in smbd_recv_buf()
1796 to_copy = min_t(int, data_length - offset, to_read); in smbd_recv_buf()
1803 if (to_copy == data_length - offset) { in smbd_recv_buf()
1804 queue_length--; in smbd_recv_buf()
1806 * No need to lock if we are not at the in smbd_recv_buf()
1810 list_del(&response->list); in smbd_recv_buf()
1813 &info->reassembly_queue_lock); in smbd_recv_buf()
1814 list_del(&response->list); in smbd_recv_buf()
1816 &info->reassembly_queue_lock); in smbd_recv_buf()
1819 info->count_reassembly_queue--; in smbd_recv_buf()
1820 info->count_dequeue_reassembly_queue++; in smbd_recv_buf()
1827 to_read -= to_copy; in smbd_recv_buf()
1830 …log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to… in smbd_recv_buf()
1831 to_copy, data_length - offset, in smbd_recv_buf()
1835 spin_lock_irq(&info->reassembly_queue_lock); in smbd_recv_buf()
1836 info->reassembly_data_length -= data_read; in smbd_recv_buf()
1837 info->reassembly_queue_length -= queue_removed; in smbd_recv_buf()
1838 spin_unlock_irq(&info->reassembly_queue_lock); in smbd_recv_buf()
1840 info->first_entry_offset = offset; in smbd_recv_buf()
1842 data_read, info->reassembly_data_length, in smbd_recv_buf()
1843 info->first_entry_offset); in smbd_recv_buf()
1850 info->wait_reassembly_queue, in smbd_recv_buf()
1851 info->reassembly_data_length >= size || in smbd_recv_buf()
1852 info->transport_status != SMBD_CONNECTED); in smbd_recv_buf()
1857 if (info->transport_status != SMBD_CONNECTED) { in smbd_recv_buf()
1859 return -ECONNABORTED; in smbd_recv_buf()
1881 info->wait_reassembly_queue, in smbd_recv_page()
1882 info->reassembly_data_length >= to_read || in smbd_recv_page()
1883 info->transport_status != SMBD_CONNECTED); in smbd_recv_page()
1912 if (iov_iter_rw(&msg->msg_iter) == WRITE) { in smbd_recv()
1915 iov_iter_rw(&msg->msg_iter)); in smbd_recv()
1916 rc = -EINVAL; in smbd_recv()
1920 switch (iov_iter_type(&msg->msg_iter)) { in smbd_recv()
1922 buf = msg->msg_iter.kvec->iov_base; in smbd_recv()
1923 to_read = msg->msg_iter.kvec->iov_len; in smbd_recv()
1928 page = msg->msg_iter.bvec->bv_page; in smbd_recv()
1929 page_offset = msg->msg_iter.bvec->bv_offset; in smbd_recv()
1930 to_read = msg->msg_iter.bvec->bv_len; in smbd_recv()
1937 iov_iter_type(&msg->msg_iter)); in smbd_recv()
1938 rc = -EINVAL; in smbd_recv()
1944 msg->msg_iter.count = 0; in smbd_recv()
1957 struct smbd_connection *info = server->smbd_conn; in smbd_send()
1963 if (info->transport_status != SMBD_CONNECTED) in smbd_send()
1964 return -EAGAIN; in smbd_send()
1975 if (unlikely(remaining_data_length > info->max_fragmented_send_size)) { in smbd_send()
1978 remaining_data_length, info->max_fragmented_send_size); in smbd_send()
1979 return -EINVAL; in smbd_send()
1991 for (i = 0; i < rqst->rq_nvec; i++) in smbd_send()
1992 dump_smb(rqst->rq_iov[i].iov_base, rqst->rq_iov[i].iov_len); in smbd_send()
1994 log_write(INFO, "RDMA-WR[%u] nvec=%d len=%u iter=%zu rqlen=%lu\n", in smbd_send()
1995 rqst_idx, rqst->rq_nvec, remaining_data_length, in smbd_send()
1996 iov_iter_count(&rqst->rq_iter), smb_rqst_len(server, rqst)); in smbd_send()
2000 for (i = 0; i < rqst->rq_nvec; i++) in smbd_send()
2001 klen += rqst->rq_iov[i].iov_len; in smbd_send()
2002 iov_iter_kvec(&iter, ITER_SOURCE, rqst->rq_iov, rqst->rq_nvec, klen); in smbd_send()
2008 if (iov_iter_count(&rqst->rq_iter) > 0) { in smbd_send()
2010 rc = smbd_post_send_iter(info, &rqst->rq_iter, in smbd_send()
2025 wait_event(info->wait_send_pending, in smbd_send()
2026 atomic_read(&info->send_pending) == 0); in smbd_send()
2031 static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc) in register_mr_done() argument
2036 if (wc->status) { in register_mr_done()
2037 log_rdma_mr(ERR, "status=%d\n", wc->status); in register_mr_done()
2038 cqe = wc->wr_cqe; in register_mr_done()
2040 smbd_disconnect_rdma_connection(mr->conn); in register_mr_done()
2049 * There is one workqueue that recovers MRs, there is no need to lock as the
2060 list_for_each_entry(smbdirect_mr, &info->mr_list, list) { in smbd_mr_recovery_work()
2061 if (smbdirect_mr->state == MR_ERROR) { in smbd_mr_recovery_work()
2064 rc = ib_dereg_mr(smbdirect_mr->mr); in smbd_mr_recovery_work()
2073 smbdirect_mr->mr = ib_alloc_mr( in smbd_mr_recovery_work()
2074 info->pd, info->mr_type, in smbd_mr_recovery_work()
2075 info->max_frmr_depth); in smbd_mr_recovery_work()
2076 if (IS_ERR(smbdirect_mr->mr)) { in smbd_mr_recovery_work()
2078 info->mr_type, in smbd_mr_recovery_work()
2079 info->max_frmr_depth); in smbd_mr_recovery_work()
2087 smbdirect_mr->state = MR_READY; in smbd_mr_recovery_work()
2089 /* smbdirect_mr->state is updated by this function in smbd_mr_recovery_work()
2092 * implicates a memory barrier and guarantees this in smbd_mr_recovery_work()
2096 if (atomic_inc_return(&info->mr_ready_count) == 1) in smbd_mr_recovery_work()
2097 wake_up_interruptible(&info->wait_mr); in smbd_mr_recovery_work()
2105 cancel_work_sync(&info->mr_recovery_work); in destroy_mr_list()
2106 list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { in destroy_mr_list()
2107 if (mr->state == MR_INVALIDATED) in destroy_mr_list()
2108 ib_dma_unmap_sg(info->id->device, mr->sgt.sgl, in destroy_mr_list()
2109 mr->sgt.nents, mr->dir); in destroy_mr_list()
2110 ib_dereg_mr(mr->mr); in destroy_mr_list()
2111 kfree(mr->sgt.sgl); in destroy_mr_list()
2128 INIT_LIST_HEAD(&info->mr_list); in allocate_mr_list()
2129 init_waitqueue_head(&info->wait_mr); in allocate_mr_list()
2130 spin_lock_init(&info->mr_list_lock); in allocate_mr_list()
2131 atomic_set(&info->mr_ready_count, 0); in allocate_mr_list()
2132 atomic_set(&info->mr_used_count, 0); in allocate_mr_list()
2133 init_waitqueue_head(&info->wait_for_mr_cleanup); in allocate_mr_list()
2134 INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work); in allocate_mr_list()
2136 for (i = 0; i < info->responder_resources * 2; i++) { in allocate_mr_list()
2140 smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type, in allocate_mr_list()
2141 info->max_frmr_depth); in allocate_mr_list()
2142 if (IS_ERR(smbdirect_mr->mr)) { in allocate_mr_list()
2144 info->mr_type, info->max_frmr_depth); in allocate_mr_list()
2147 smbdirect_mr->sgt.sgl = kcalloc(info->max_frmr_depth, in allocate_mr_list()
2150 if (!smbdirect_mr->sgt.sgl) { in allocate_mr_list()
2152 ib_dereg_mr(smbdirect_mr->mr); in allocate_mr_list()
2155 smbdirect_mr->state = MR_READY; in allocate_mr_list()
2156 smbdirect_mr->conn = info; in allocate_mr_list()
2158 list_add_tail(&smbdirect_mr->list, &info->mr_list); in allocate_mr_list()
2159 atomic_inc(&info->mr_ready_count); in allocate_mr_list()
2166 list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) { in allocate_mr_list()
2167 list_del(&smbdirect_mr->list); in allocate_mr_list()
2168 ib_dereg_mr(smbdirect_mr->mr); in allocate_mr_list()
2169 kfree(smbdirect_mr->sgt.sgl); in allocate_mr_list()
2172 return -ENOMEM; in allocate_mr_list()
2188 rc = wait_event_interruptible(info->wait_mr, in get_mr()
2189 atomic_read(&info->mr_ready_count) || in get_mr()
2190 info->transport_status != SMBD_CONNECTED); in get_mr()
2196 if (info->transport_status != SMBD_CONNECTED) { in get_mr()
2197 log_rdma_mr(ERR, "info->transport_status=%x\n", in get_mr()
2198 info->transport_status); in get_mr()
2202 spin_lock(&info->mr_list_lock); in get_mr()
2203 list_for_each_entry(ret, &info->mr_list, list) { in get_mr()
2204 if (ret->state == MR_READY) { in get_mr()
2205 ret->state = MR_REGISTERED; in get_mr()
2206 spin_unlock(&info->mr_list_lock); in get_mr()
2207 atomic_dec(&info->mr_ready_count); in get_mr()
2208 atomic_inc(&info->mr_used_count); in get_mr()
2213 spin_unlock(&info->mr_list_lock); in get_mr()
2231 memset(sgt->sgl, 0, max_sg * sizeof(struct scatterlist)); in smbd_iter_to_mr()
2235 if (sgt->nents > 0) in smbd_iter_to_mr()
2236 sg_mark_end(&sgt->sgl[sgt->nents - 1]); in smbd_iter_to_mr()
2241 * Register memory for RDMA read/write
2242 * iter: the buffer to register memory with
2256 num_pages = iov_iter_npages(iter, info->max_frmr_depth + 1); in smbd_register_mr()
2257 if (num_pages > info->max_frmr_depth) { in smbd_register_mr()
2259 num_pages, info->max_frmr_depth); in smbd_register_mr()
2271 smbdirect_mr->dir = dir; in smbd_register_mr()
2272 smbdirect_mr->need_invalidate = need_invalidate; in smbd_register_mr()
2273 smbdirect_mr->sgt.nents = 0; in smbd_register_mr()
2274 smbdirect_mr->sgt.orig_nents = 0; in smbd_register_mr()
2277 num_pages, iov_iter_count(iter), info->max_frmr_depth); in smbd_register_mr()
2278 smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth); in smbd_register_mr()
2280 rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgt.sgl, in smbd_register_mr()
2281 smbdirect_mr->sgt.nents, dir); in smbd_register_mr()
2288 rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgt.sgl, in smbd_register_mr()
2289 smbdirect_mr->sgt.nents, NULL, PAGE_SIZE); in smbd_register_mr()
2290 if (rc != smbdirect_mr->sgt.nents) { in smbd_register_mr()
2293 rc, smbdirect_mr->sgt.nents); in smbd_register_mr()
2297 ib_update_fast_reg_key(smbdirect_mr->mr, in smbd_register_mr()
2298 ib_inc_rkey(smbdirect_mr->mr->rkey)); in smbd_register_mr()
2299 reg_wr = &smbdirect_mr->wr; in smbd_register_mr()
2300 reg_wr->wr.opcode = IB_WR_REG_MR; in smbd_register_mr()
2301 smbdirect_mr->cqe.done = register_mr_done; in smbd_register_mr()
2302 reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; in smbd_register_mr()
2303 reg_wr->wr.num_sge = 0; in smbd_register_mr()
2304 reg_wr->wr.send_flags = IB_SEND_SIGNALED; in smbd_register_mr()
2305 reg_wr->mr = smbdirect_mr->mr; in smbd_register_mr()
2306 reg_wr->key = smbdirect_mr->mr->rkey; in smbd_register_mr()
2307 reg_wr->access = writing ? in smbd_register_mr()
2312 * There is no need for waiting for complemtion on ib_post_send in smbd_register_mr()
2316 rc = ib_post_send(info->id->qp, ®_wr->wr, NULL); in smbd_register_mr()
2320 log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n", in smbd_register_mr()
2321 rc, reg_wr->key); in smbd_register_mr()
2325 ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgt.sgl, in smbd_register_mr()
2326 smbdirect_mr->sgt.nents, smbdirect_mr->dir); in smbd_register_mr()
2329 smbdirect_mr->state = MR_ERROR; in smbd_register_mr()
2330 if (atomic_dec_and_test(&info->mr_used_count)) in smbd_register_mr()
2331 wake_up(&info->wait_for_mr_cleanup); in smbd_register_mr()
2338 static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc) in local_inv_done() argument
2343 cqe = wc->wr_cqe; in local_inv_done()
2345 smbdirect_mr->state = MR_INVALIDATED; in local_inv_done()
2346 if (wc->status != IB_WC_SUCCESS) { in local_inv_done()
2347 log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status); in local_inv_done()
2348 smbdirect_mr->state = MR_ERROR; in local_inv_done()
2350 complete(&smbdirect_mr->invalidate_done); in local_inv_done()
2362 struct smbd_connection *info = smbdirect_mr->conn; in smbd_deregister_mr()
2365 if (smbdirect_mr->need_invalidate) { in smbd_deregister_mr()
2367 wr = &smbdirect_mr->inv_wr; in smbd_deregister_mr()
2368 wr->opcode = IB_WR_LOCAL_INV; in smbd_deregister_mr()
2369 smbdirect_mr->cqe.done = local_inv_done; in smbd_deregister_mr()
2370 wr->wr_cqe = &smbdirect_mr->cqe; in smbd_deregister_mr()
2371 wr->num_sge = 0; in smbd_deregister_mr()
2372 wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey; in smbd_deregister_mr()
2373 wr->send_flags = IB_SEND_SIGNALED; in smbd_deregister_mr()
2375 init_completion(&smbdirect_mr->invalidate_done); in smbd_deregister_mr()
2376 rc = ib_post_send(info->id->qp, wr, NULL); in smbd_deregister_mr()
2382 wait_for_completion(&smbdirect_mr->invalidate_done); in smbd_deregister_mr()
2383 smbdirect_mr->need_invalidate = false; in smbd_deregister_mr()
2389 smbdirect_mr->state = MR_INVALIDATED; in smbd_deregister_mr()
2391 if (smbdirect_mr->state == MR_INVALIDATED) { in smbd_deregister_mr()
2393 info->id->device, smbdirect_mr->sgt.sgl, in smbd_deregister_mr()
2394 smbdirect_mr->sgt.nents, in smbd_deregister_mr()
2395 smbdirect_mr->dir); in smbd_deregister_mr()
2396 smbdirect_mr->state = MR_READY; in smbd_deregister_mr()
2397 if (atomic_inc_return(&info->mr_ready_count) == 1) in smbd_deregister_mr()
2398 wake_up_interruptible(&info->wait_mr); in smbd_deregister_mr()
2404 queue_work(info->workqueue, &info->mr_recovery_work); in smbd_deregister_mr()
2407 if (atomic_dec_and_test(&info->mr_used_count)) in smbd_deregister_mr()
2408 wake_up(&info->wait_for_mr_cleanup); in smbd_deregister_mr()
2416 struct ib_sge *sge = &rdma->sge[rdma->nr_sge]; in smb_set_sge()
2419 addr = ib_dma_map_page(rdma->device, lowest_page, in smb_set_sge()
2420 off, len, rdma->direction); in smb_set_sge()
2421 if (ib_dma_mapping_error(rdma->device, addr)) in smb_set_sge()
2424 sge->addr = addr; in smb_set_sge()
2425 sge->length = len; in smb_set_sge()
2426 sge->lkey = rdma->local_dma_lkey; in smb_set_sge()
2427 rdma->nr_sge++; in smb_set_sge()
2432 * Extract page fragments from a BVEC-class iterator and add them to an RDMA
2439 const struct bio_vec *bv = iter->bvec; in smb_extract_bvec_to_rdma()
2440 unsigned long start = iter->iov_offset; in smb_extract_bvec_to_rdma()
2444 for (i = 0; i < iter->nr_segs; i++) { in smb_extract_bvec_to_rdma()
2449 start -= len; in smb_extract_bvec_to_rdma()
2453 len = min_t(size_t, maxsize, len - start); in smb_extract_bvec_to_rdma()
2457 return -EIO; in smb_extract_bvec_to_rdma()
2460 maxsize -= len; in smb_extract_bvec_to_rdma()
2461 if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) in smb_extract_bvec_to_rdma()
2470 * Extract fragments from a KVEC-class iterator and add them to an RDMA list.
2478 const struct kvec *kv = iter->kvec; in smb_extract_kvec_to_rdma()
2479 unsigned long start = iter->iov_offset; in smb_extract_kvec_to_rdma()
2483 for (i = 0; i < iter->nr_segs; i++) { in smb_extract_kvec_to_rdma()
2490 start -= len; in smb_extract_kvec_to_rdma()
2496 len = min_t(size_t, maxsize, len - start); in smb_extract_kvec_to_rdma()
2499 maxsize -= len; in smb_extract_kvec_to_rdma()
2501 seg = min_t(size_t, len, PAGE_SIZE - off); in smb_extract_kvec_to_rdma()
2509 return -EIO; in smb_extract_kvec_to_rdma()
2512 len -= seg; in smb_extract_kvec_to_rdma()
2515 } while (len > 0 && rdma->nr_sge < rdma->max_sge); in smb_extract_kvec_to_rdma()
2517 if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) in smb_extract_kvec_to_rdma()
2526 * Extract folio fragments from an XARRAY-class iterator and add them to an
2533 struct xarray *xa = iter->xarray; in smb_extract_xarray_to_rdma()
2535 loff_t start = iter->xarray_start + iter->iov_offset; in smb_extract_xarray_to_rdma()
2552 len = min_t(size_t, maxsize, folio_size(folio) - off); in smb_extract_xarray_to_rdma()
2556 return -EIO; in smb_extract_xarray_to_rdma()
2559 maxsize -= len; in smb_extract_xarray_to_rdma()
2561 if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) in smb_extract_xarray_to_rdma()
2576 * IOVEC/UBUF-type iterator is to be used, it should be converted to a
2577 * BVEC-type iterator and the pages pinned, ref'd or otherwise held in some
2584 int before = rdma->nr_sge; in smb_extract_iter_to_rdma()
2598 return -EIO; in smb_extract_iter_to_rdma()
2604 while (rdma->nr_sge > before) { in smb_extract_iter_to_rdma()
2605 struct ib_sge *sge = &rdma->sge[rdma->nr_sge--]; in smb_extract_iter_to_rdma()
2607 ib_dma_unmap_single(rdma->device, sge->addr, sge->length, in smb_extract_iter_to_rdma()
2608 rdma->direction); in smb_extract_iter_to_rdma()
2609 sge->addr = 0; in smb_extract_iter_to_rdma()