Lines Matching refs:t
160 #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport)) argument
197 struct smb_direct_transport *t; member
226 static int smb_direct_post_send_data(struct smb_direct_transport *t,
232 smb_trans_direct_transfort(struct ksmbd_transport *t) in smb_trans_direct_transfort() argument
234 return container_of(t, struct smb_direct_transport, transport); in smb_trans_direct_transfort()
251 smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t) in get_free_recvmsg() argument
255 spin_lock(&t->recvmsg_queue_lock); in get_free_recvmsg()
256 if (!list_empty(&t->recvmsg_queue)) { in get_free_recvmsg()
257 recvmsg = list_first_entry(&t->recvmsg_queue, in get_free_recvmsg()
262 spin_unlock(&t->recvmsg_queue_lock); in get_free_recvmsg()
266 static void put_recvmsg(struct smb_direct_transport *t, in put_recvmsg() argument
269 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, in put_recvmsg()
272 spin_lock(&t->recvmsg_queue_lock); in put_recvmsg()
273 list_add(&recvmsg->list, &t->recvmsg_queue); in put_recvmsg()
274 spin_unlock(&t->recvmsg_queue_lock); in put_recvmsg()
278 smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t) in get_empty_recvmsg() argument
282 spin_lock(&t->empty_recvmsg_queue_lock); in get_empty_recvmsg()
283 if (!list_empty(&t->empty_recvmsg_queue)) { in get_empty_recvmsg()
284 recvmsg = list_first_entry(&t->empty_recvmsg_queue, in get_empty_recvmsg()
288 spin_unlock(&t->empty_recvmsg_queue_lock); in get_empty_recvmsg()
292 static void put_empty_recvmsg(struct smb_direct_transport *t, in put_empty_recvmsg() argument
295 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, in put_empty_recvmsg()
298 spin_lock(&t->empty_recvmsg_queue_lock); in put_empty_recvmsg()
299 list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue); in put_empty_recvmsg()
300 spin_unlock(&t->empty_recvmsg_queue_lock); in put_empty_recvmsg()
303 static void enqueue_reassembly(struct smb_direct_transport *t, in enqueue_reassembly() argument
307 spin_lock(&t->reassembly_queue_lock); in enqueue_reassembly()
308 list_add_tail(&recvmsg->list, &t->reassembly_queue); in enqueue_reassembly()
309 t->reassembly_queue_length++; in enqueue_reassembly()
317 t->reassembly_data_length += data_length; in enqueue_reassembly()
318 spin_unlock(&t->reassembly_queue_lock); in enqueue_reassembly()
321 static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t) in get_first_reassembly() argument
323 if (!list_empty(&t->reassembly_queue)) in get_first_reassembly()
324 return list_first_entry(&t->reassembly_queue, in get_first_reassembly()
332 struct smb_direct_transport *t = in smb_direct_disconnect_rdma_work() local
336 if (t->status == SMB_DIRECT_CS_CONNECTED) { in smb_direct_disconnect_rdma_work()
337 t->status = SMB_DIRECT_CS_DISCONNECTING; in smb_direct_disconnect_rdma_work()
338 rdma_disconnect(t->cm_id); in smb_direct_disconnect_rdma_work()
343 smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t) in smb_direct_disconnect_rdma_connection() argument
345 if (t->status == SMB_DIRECT_CS_CONNECTED) in smb_direct_disconnect_rdma_connection()
346 queue_work(smb_direct_wq, &t->disconnect_work); in smb_direct_disconnect_rdma_connection()
351 struct smb_direct_transport *t = container_of(work, in smb_direct_send_immediate_work() local
354 if (t->status != SMB_DIRECT_CS_CONNECTED) in smb_direct_send_immediate_work()
357 smb_direct_post_send_data(t, NULL, NULL, 0, 0); in smb_direct_send_immediate_work()
362 struct smb_direct_transport *t; in alloc_transport() local
365 t = kzalloc(sizeof(*t), GFP_KERNEL); in alloc_transport()
366 if (!t) in alloc_transport()
369 t->cm_id = cm_id; in alloc_transport()
370 cm_id->context = t; in alloc_transport()
372 t->status = SMB_DIRECT_CS_NEW; in alloc_transport()
373 init_waitqueue_head(&t->wait_status); in alloc_transport()
375 spin_lock_init(&t->reassembly_queue_lock); in alloc_transport()
376 INIT_LIST_HEAD(&t->reassembly_queue); in alloc_transport()
377 t->reassembly_data_length = 0; in alloc_transport()
378 t->reassembly_queue_length = 0; in alloc_transport()
379 init_waitqueue_head(&t->wait_reassembly_queue); in alloc_transport()
380 init_waitqueue_head(&t->wait_send_credits); in alloc_transport()
381 init_waitqueue_head(&t->wait_rw_credits); in alloc_transport()
383 spin_lock_init(&t->receive_credit_lock); in alloc_transport()
384 spin_lock_init(&t->recvmsg_queue_lock); in alloc_transport()
385 INIT_LIST_HEAD(&t->recvmsg_queue); in alloc_transport()
387 spin_lock_init(&t->empty_recvmsg_queue_lock); in alloc_transport()
388 INIT_LIST_HEAD(&t->empty_recvmsg_queue); in alloc_transport()
390 init_waitqueue_head(&t->wait_send_pending); in alloc_transport()
391 atomic_set(&t->send_pending, 0); in alloc_transport()
393 spin_lock_init(&t->lock_new_recv_credits); in alloc_transport()
395 INIT_DELAYED_WORK(&t->post_recv_credits_work, in alloc_transport()
397 INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work); in alloc_transport()
398 INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work); in alloc_transport()
403 conn->transport = KSMBD_TRANS(t); in alloc_transport()
404 KSMBD_TRANS(t)->conn = conn; in alloc_transport()
405 KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops; in alloc_transport()
406 return t; in alloc_transport()
408 kfree(t); in alloc_transport()
412 static void free_transport(struct smb_direct_transport *t) in free_transport() argument
416 wake_up_interruptible(&t->wait_send_credits); in free_transport()
419 wait_event(t->wait_send_pending, in free_transport()
420 atomic_read(&t->send_pending) == 0); in free_transport()
422 cancel_work_sync(&t->disconnect_work); in free_transport()
423 cancel_delayed_work_sync(&t->post_recv_credits_work); in free_transport()
424 cancel_work_sync(&t->send_immediate_work); in free_transport()
426 if (t->qp) { in free_transport()
427 ib_drain_qp(t->qp); in free_transport()
428 ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs); in free_transport()
429 ib_destroy_qp(t->qp); in free_transport()
434 spin_lock(&t->reassembly_queue_lock); in free_transport()
435 recvmsg = get_first_reassembly(t); in free_transport()
438 spin_unlock(&t->reassembly_queue_lock); in free_transport()
439 put_recvmsg(t, recvmsg); in free_transport()
441 spin_unlock(&t->reassembly_queue_lock); in free_transport()
444 t->reassembly_data_length = 0; in free_transport()
446 if (t->send_cq) in free_transport()
447 ib_free_cq(t->send_cq); in free_transport()
448 if (t->recv_cq) in free_transport()
449 ib_free_cq(t->recv_cq); in free_transport()
450 if (t->pd) in free_transport()
451 ib_dealloc_pd(t->pd); in free_transport()
452 if (t->cm_id) in free_transport()
453 rdma_destroy_id(t->cm_id); in free_transport()
455 smb_direct_destroy_pools(t); in free_transport()
456 ksmbd_conn_free(KSMBD_TRANS(t)->conn); in free_transport()
457 kfree(t); in free_transport()
461 *smb_direct_alloc_sendmsg(struct smb_direct_transport *t) in smb_direct_alloc_sendmsg() argument
465 msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL); in smb_direct_alloc_sendmsg()
468 msg->transport = t; in smb_direct_alloc_sendmsg()
474 static void smb_direct_free_sendmsg(struct smb_direct_transport *t, in smb_direct_free_sendmsg() argument
480 ib_dma_unmap_single(t->cm_id->device, in smb_direct_free_sendmsg()
484 ib_dma_unmap_page(t->cm_id->device, in smb_direct_free_sendmsg()
488 mempool_free(msg, t->sendmsg_mempool); in smb_direct_free_sendmsg()
538 struct smb_direct_transport *t; in recv_done() local
541 t = recvmsg->transport; in recv_done()
548 smb_direct_disconnect_rdma_connection(t); in recv_done()
550 put_empty_recvmsg(t, recvmsg); in recv_done()
564 put_empty_recvmsg(t, recvmsg); in recv_done()
567 t->negotiation_requested = true; in recv_done()
568 t->full_packet_received = true; in recv_done()
569 t->status = SMB_DIRECT_CS_CONNECTED; in recv_done()
570 enqueue_reassembly(t, recvmsg, 0); in recv_done()
571 wake_up_interruptible(&t->wait_status); in recv_done()
581 put_empty_recvmsg(t, recvmsg); in recv_done()
589 put_empty_recvmsg(t, recvmsg); in recv_done()
593 if (t->full_packet_received) in recv_done()
597 t->full_packet_received = false; in recv_done()
599 t->full_packet_received = true; in recv_done()
601 enqueue_reassembly(t, recvmsg, (int)data_length); in recv_done()
602 wake_up_interruptible(&t->wait_reassembly_queue); in recv_done()
604 spin_lock(&t->receive_credit_lock); in recv_done()
605 receive_credits = --(t->recv_credits); in recv_done()
606 avail_recvmsg_count = t->count_avail_recvmsg; in recv_done()
607 spin_unlock(&t->receive_credit_lock); in recv_done()
609 put_empty_recvmsg(t, recvmsg); in recv_done()
611 spin_lock(&t->receive_credit_lock); in recv_done()
612 receive_credits = --(t->recv_credits); in recv_done()
613 avail_recvmsg_count = ++(t->count_avail_recvmsg); in recv_done()
614 spin_unlock(&t->receive_credit_lock); in recv_done()
617 t->recv_credit_target = in recv_done()
620 &t->send_credits); in recv_done()
624 queue_work(smb_direct_wq, &t->send_immediate_work); in recv_done()
626 if (atomic_read(&t->send_credits) > 0) in recv_done()
627 wake_up_interruptible(&t->wait_send_credits); in recv_done()
631 &t->post_recv_credits_work, 0); in recv_done()
639 static int smb_direct_post_recv(struct smb_direct_transport *t, in smb_direct_post_recv() argument
645 recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device, in smb_direct_post_recv()
646 recvmsg->packet, t->max_recv_size, in smb_direct_post_recv()
648 ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr); in smb_direct_post_recv()
651 recvmsg->sge.length = t->max_recv_size; in smb_direct_post_recv()
652 recvmsg->sge.lkey = t->pd->local_dma_lkey; in smb_direct_post_recv()
660 ret = ib_post_recv(t->qp, &wr, NULL); in smb_direct_post_recv()
663 ib_dma_unmap_single(t->cm_id->device, in smb_direct_post_recv()
666 smb_direct_disconnect_rdma_connection(t); in smb_direct_post_recv()
672 static int smb_direct_read(struct ksmbd_transport *t, char *buf, in smb_direct_read() argument
680 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_read()
802 struct smb_direct_transport *t = container_of(work, in smb_direct_post_recv_credits() local
809 spin_lock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
810 receive_credits = t->recv_credits; in smb_direct_post_recv_credits()
811 spin_unlock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
813 if (receive_credits < t->recv_credit_target) { in smb_direct_post_recv_credits()
816 recvmsg = get_free_recvmsg(t); in smb_direct_post_recv_credits()
818 recvmsg = get_empty_recvmsg(t); in smb_direct_post_recv_credits()
831 ret = smb_direct_post_recv(t, recvmsg); in smb_direct_post_recv_credits()
834 put_recvmsg(t, recvmsg); in smb_direct_post_recv_credits()
841 spin_lock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
842 t->recv_credits += credits; in smb_direct_post_recv_credits()
843 t->count_avail_recvmsg -= credits; in smb_direct_post_recv_credits()
844 spin_unlock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
846 spin_lock(&t->lock_new_recv_credits); in smb_direct_post_recv_credits()
847 t->new_recv_credits += credits; in smb_direct_post_recv_credits()
848 spin_unlock(&t->lock_new_recv_credits); in smb_direct_post_recv_credits()
851 queue_work(smb_direct_wq, &t->send_immediate_work); in smb_direct_post_recv_credits()
857 struct smb_direct_transport *t; in send_done() local
861 t = sendmsg->transport; in send_done()
871 smb_direct_disconnect_rdma_connection(t); in send_done()
874 if (atomic_dec_and_test(&t->send_pending)) in send_done()
875 wake_up(&t->wait_send_pending); in send_done()
883 smb_direct_free_sendmsg(t, sibling); in send_done()
887 smb_direct_free_sendmsg(t, sibling); in send_done()
890 static int manage_credits_prior_sending(struct smb_direct_transport *t) in manage_credits_prior_sending() argument
894 spin_lock(&t->lock_new_recv_credits); in manage_credits_prior_sending()
895 new_credits = t->new_recv_credits; in manage_credits_prior_sending()
896 t->new_recv_credits = 0; in manage_credits_prior_sending()
897 spin_unlock(&t->lock_new_recv_credits); in manage_credits_prior_sending()
902 static int smb_direct_post_send(struct smb_direct_transport *t, in smb_direct_post_send() argument
907 atomic_inc(&t->send_pending); in smb_direct_post_send()
908 ret = ib_post_send(t->qp, wr, NULL); in smb_direct_post_send()
911 if (atomic_dec_and_test(&t->send_pending)) in smb_direct_post_send()
912 wake_up(&t->wait_send_pending); in smb_direct_post_send()
913 smb_direct_disconnect_rdma_connection(t); in smb_direct_post_send()
918 static void smb_direct_send_ctx_init(struct smb_direct_transport *t, in smb_direct_send_ctx_init() argument
929 static int smb_direct_flush_send_list(struct smb_direct_transport *t, in smb_direct_flush_send_list() argument
953 ret = smb_direct_post_send(t, &first->wr); in smb_direct_flush_send_list()
955 smb_direct_send_ctx_init(t, send_ctx, in smb_direct_flush_send_list()
959 atomic_add(send_ctx->wr_cnt, &t->send_credits); in smb_direct_flush_send_list()
960 wake_up(&t->wait_send_credits); in smb_direct_flush_send_list()
963 smb_direct_free_sendmsg(t, first); in smb_direct_flush_send_list()
969 static int wait_for_credits(struct smb_direct_transport *t, in wait_for_credits() argument
982 t->status != SMB_DIRECT_CS_CONNECTED); in wait_for_credits()
984 if (t->status != SMB_DIRECT_CS_CONNECTED) in wait_for_credits()
991 static int wait_for_send_credits(struct smb_direct_transport *t, in wait_for_send_credits() argument
997 (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) { in wait_for_send_credits()
998 ret = smb_direct_flush_send_list(t, send_ctx, false); in wait_for_send_credits()
1003 return wait_for_credits(t, &t->wait_send_credits, &t->send_credits, 1); in wait_for_send_credits()
1006 static int wait_for_rw_credits(struct smb_direct_transport *t, int credits) in wait_for_rw_credits() argument
1008 return wait_for_credits(t, &t->wait_rw_credits, &t->rw_credits, credits); in wait_for_rw_credits()
1011 static int calc_rw_credits(struct smb_direct_transport *t, in calc_rw_credits() argument
1015 t->pages_per_rw_credit); in calc_rw_credits()
1018 static int smb_direct_create_header(struct smb_direct_transport *t, in smb_direct_create_header() argument
1027 sendmsg = smb_direct_alloc_sendmsg(t); in smb_direct_create_header()
1033 packet->credits_requested = cpu_to_le16(t->send_credit_target); in smb_direct_create_header()
1034 packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); in smb_direct_create_header()
1061 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, in smb_direct_create_header()
1065 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); in smb_direct_create_header()
1067 smb_direct_free_sendmsg(t, sendmsg); in smb_direct_create_header()
1073 sendmsg->sge[0].lkey = t->pd->local_dma_lkey; in smb_direct_create_header()
1123 static int post_sendmsg(struct smb_direct_transport *t, in post_sendmsg() argument
1130 ib_dma_sync_single_for_device(t->cm_id->device, in post_sendmsg()
1158 return smb_direct_post_send(t, &msg->wr); in post_sendmsg()
1161 static int smb_direct_post_send_data(struct smb_direct_transport *t, in smb_direct_post_send_data() argument
1171 ret = wait_for_send_credits(t, send_ctx); in smb_direct_post_send_data()
1179 ret = smb_direct_create_header(t, data_length, remaining_data_length, in smb_direct_post_send_data()
1182 atomic_inc(&t->send_credits); in smb_direct_post_send_data()
1191 sg_cnt = get_mapped_sg_list(t->cm_id->device, in smb_direct_post_send_data()
1202 ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt, in smb_direct_post_send_data()
1211 sge->lkey = t->pd->local_dma_lkey; in smb_direct_post_send_data()
1216 ret = post_sendmsg(t, send_ctx, msg); in smb_direct_post_send_data()
1221 smb_direct_free_sendmsg(t, msg); in smb_direct_post_send_data()
1222 atomic_inc(&t->send_credits); in smb_direct_post_send_data()
1226 static int smb_direct_writev(struct ksmbd_transport *t, in smb_direct_writev() argument
1230 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_writev()
1316 static void smb_direct_free_rdma_rw_msg(struct smb_direct_transport *t, in smb_direct_free_rdma_rw_msg() argument
1320 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port, in smb_direct_free_rdma_rw_msg()
1331 struct smb_direct_transport *t = msg->t; in read_write_done() local
1338 smb_direct_disconnect_rdma_connection(t); in read_write_done()
1354 static int smb_direct_rdma_xmit(struct smb_direct_transport *t, in smb_direct_rdma_xmit() argument
1369 if (t->status != SMB_DIRECT_CS_CONNECTED) in smb_direct_rdma_xmit()
1372 if (buf_len > t->max_rdma_rw_size) in smb_direct_rdma_xmit()
1392 credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len); in smb_direct_rdma_xmit()
1401 ret = wait_for_rw_credits(t, credits_needed); in smb_direct_rdma_xmit()
1417 msg->t = t; in smb_direct_rdma_xmit()
1439 ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port, in smb_direct_rdma_xmit()
1460 first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port, in smb_direct_rdma_xmit()
1464 ret = ib_post_send(t->qp, first_wr, NULL); in smb_direct_rdma_xmit()
1476 smb_direct_free_rdma_rw_msg(t, msg, in smb_direct_rdma_xmit()
1479 atomic_add(credits_needed, &t->rw_credits); in smb_direct_rdma_xmit()
1480 wake_up(&t->wait_rw_credits); in smb_direct_rdma_xmit()
1484 static int smb_direct_rdma_write(struct ksmbd_transport *t, in smb_direct_rdma_write() argument
1489 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, in smb_direct_rdma_write()
1493 static int smb_direct_rdma_read(struct ksmbd_transport *t, in smb_direct_rdma_read() argument
1498 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, in smb_direct_rdma_read()
1502 static void smb_direct_disconnect(struct ksmbd_transport *t) in smb_direct_disconnect() argument
1504 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_disconnect()
1514 static void smb_direct_shutdown(struct ksmbd_transport *t) in smb_direct_shutdown() argument
1516 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_shutdown()
1526 struct smb_direct_transport *t = cm_id->context; in smb_direct_cm_handler() local
1533 t->status = SMB_DIRECT_CS_CONNECTED; in smb_direct_cm_handler()
1534 wake_up_interruptible(&t->wait_status); in smb_direct_cm_handler()
1539 ib_drain_qp(t->qp); in smb_direct_cm_handler()
1541 t->status = SMB_DIRECT_CS_DISCONNECTED; in smb_direct_cm_handler()
1542 wake_up_interruptible(&t->wait_status); in smb_direct_cm_handler()
1543 wake_up_interruptible(&t->wait_reassembly_queue); in smb_direct_cm_handler()
1544 wake_up(&t->wait_send_credits); in smb_direct_cm_handler()
1548 t->status = SMB_DIRECT_CS_DISCONNECTED; in smb_direct_cm_handler()
1549 wake_up_interruptible(&t->wait_status); in smb_direct_cm_handler()
1563 struct smb_direct_transport *t = context; in smb_direct_qpair_handler() local
1566 t->cm_id, ib_event_msg(event->event), event->event); in smb_direct_qpair_handler()
1571 smb_direct_disconnect_rdma_connection(t); in smb_direct_qpair_handler()
1578 static int smb_direct_send_negotiate_response(struct smb_direct_transport *t, in smb_direct_send_negotiate_response() argument
1585 sendmsg = smb_direct_alloc_sendmsg(t); in smb_direct_send_negotiate_response()
1602 cpu_to_le16(t->send_credit_target); in smb_direct_send_negotiate_response()
1603 resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); in smb_direct_send_negotiate_response()
1604 resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size); in smb_direct_send_negotiate_response()
1605 resp->preferred_send_size = cpu_to_le32(t->max_send_size); in smb_direct_send_negotiate_response()
1606 resp->max_receive_size = cpu_to_le32(t->max_recv_size); in smb_direct_send_negotiate_response()
1608 cpu_to_le32(t->max_fragmented_recv_size); in smb_direct_send_negotiate_response()
1611 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, in smb_direct_send_negotiate_response()
1614 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); in smb_direct_send_negotiate_response()
1616 smb_direct_free_sendmsg(t, sendmsg); in smb_direct_send_negotiate_response()
1622 sendmsg->sge[0].lkey = t->pd->local_dma_lkey; in smb_direct_send_negotiate_response()
1624 ret = post_sendmsg(t, NULL, sendmsg); in smb_direct_send_negotiate_response()
1626 smb_direct_free_sendmsg(t, sendmsg); in smb_direct_send_negotiate_response()
1630 wait_event(t->wait_send_pending, in smb_direct_send_negotiate_response()
1631 atomic_read(&t->send_pending) == 0); in smb_direct_send_negotiate_response()
1635 static int smb_direct_accept_client(struct smb_direct_transport *t) in smb_direct_accept_client() argument
1643 conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom, in smb_direct_accept_client()
1647 t->cm_id->device->ops.get_port_immutable(t->cm_id->device, in smb_direct_accept_client()
1648 t->cm_id->port_num, in smb_direct_accept_client()
1663 ret = rdma_accept(t->cm_id, &conn_param); in smb_direct_accept_client()
1671 static int smb_direct_prepare_negotiation(struct smb_direct_transport *t) in smb_direct_prepare_negotiation() argument
1676 recvmsg = get_free_recvmsg(t); in smb_direct_prepare_negotiation()
1681 ret = smb_direct_post_recv(t, recvmsg); in smb_direct_prepare_negotiation()
1687 t->negotiation_requested = false; in smb_direct_prepare_negotiation()
1688 ret = smb_direct_accept_client(t); in smb_direct_prepare_negotiation()
1694 smb_direct_post_recv_credits(&t->post_recv_credits_work.work); in smb_direct_prepare_negotiation()
1697 put_recvmsg(t, recvmsg); in smb_direct_prepare_negotiation()
1701 static unsigned int smb_direct_get_max_fr_pages(struct smb_direct_transport *t) in smb_direct_get_max_fr_pages() argument
1704 t->cm_id->device->attrs.max_fast_reg_page_list_len, in smb_direct_get_max_fr_pages()
1708 static int smb_direct_init_params(struct smb_direct_transport *t, in smb_direct_init_params() argument
1711 struct ib_device *device = t->cm_id->device; in smb_direct_init_params()
1718 t->max_send_size = smb_direct_max_send_size; in smb_direct_init_params()
1719 max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 3; in smb_direct_init_params()
1721 pr_err("max_send_size %d is too large\n", t->max_send_size); in smb_direct_init_params()
1732 t->max_rdma_rw_size = smb_direct_max_read_write_size; in smb_direct_init_params()
1733 t->pages_per_rw_credit = smb_direct_get_max_fr_pages(t); in smb_direct_init_params()
1734 t->max_rw_credits = DIV_ROUND_UP(t->max_rdma_rw_size, in smb_direct_init_params()
1735 (t->pages_per_rw_credit - 1) * in smb_direct_init_params()
1743 DIV_ROUND_UP(t->pages_per_rw_credit, in smb_direct_init_params()
1745 max_rw_wrs = t->max_rw_credits * wrs_per_credit; in smb_direct_init_params()
1772 t->recv_credits = 0; in smb_direct_init_params()
1773 t->count_avail_recvmsg = 0; in smb_direct_init_params()
1775 t->recv_credit_max = smb_direct_receive_credit_max; in smb_direct_init_params()
1776 t->recv_credit_target = 10; in smb_direct_init_params()
1777 t->new_recv_credits = 0; in smb_direct_init_params()
1779 t->send_credit_target = smb_direct_send_credit_target; in smb_direct_init_params()
1780 atomic_set(&t->send_credits, 0); in smb_direct_init_params()
1781 atomic_set(&t->rw_credits, t->max_rw_credits); in smb_direct_init_params()
1783 t->max_send_size = smb_direct_max_send_size; in smb_direct_init_params()
1784 t->max_recv_size = smb_direct_max_receive_size; in smb_direct_init_params()
1785 t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size; in smb_direct_init_params()
1788 cap->max_recv_wr = t->recv_credit_max; in smb_direct_init_params()
1792 cap->max_rdma_ctxs = t->max_rw_credits; in smb_direct_init_params()
1796 static void smb_direct_destroy_pools(struct smb_direct_transport *t) in smb_direct_destroy_pools() argument
1800 while ((recvmsg = get_free_recvmsg(t))) in smb_direct_destroy_pools()
1801 mempool_free(recvmsg, t->recvmsg_mempool); in smb_direct_destroy_pools()
1802 while ((recvmsg = get_empty_recvmsg(t))) in smb_direct_destroy_pools()
1803 mempool_free(recvmsg, t->recvmsg_mempool); in smb_direct_destroy_pools()
1805 mempool_destroy(t->recvmsg_mempool); in smb_direct_destroy_pools()
1806 t->recvmsg_mempool = NULL; in smb_direct_destroy_pools()
1808 kmem_cache_destroy(t->recvmsg_cache); in smb_direct_destroy_pools()
1809 t->recvmsg_cache = NULL; in smb_direct_destroy_pools()
1811 mempool_destroy(t->sendmsg_mempool); in smb_direct_destroy_pools()
1812 t->sendmsg_mempool = NULL; in smb_direct_destroy_pools()
1814 kmem_cache_destroy(t->sendmsg_cache); in smb_direct_destroy_pools()
1815 t->sendmsg_cache = NULL; in smb_direct_destroy_pools()
1818 static int smb_direct_create_pools(struct smb_direct_transport *t) in smb_direct_create_pools() argument
1824 snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t); in smb_direct_create_pools()
1825 t->sendmsg_cache = kmem_cache_create(name, in smb_direct_create_pools()
1829 if (!t->sendmsg_cache) in smb_direct_create_pools()
1832 t->sendmsg_mempool = mempool_create(t->send_credit_target, in smb_direct_create_pools()
1834 t->sendmsg_cache); in smb_direct_create_pools()
1835 if (!t->sendmsg_mempool) in smb_direct_create_pools()
1838 snprintf(name, sizeof(name), "smb_direct_resp_%p", t); in smb_direct_create_pools()
1839 t->recvmsg_cache = kmem_cache_create(name, in smb_direct_create_pools()
1841 t->max_recv_size, in smb_direct_create_pools()
1843 if (!t->recvmsg_cache) in smb_direct_create_pools()
1846 t->recvmsg_mempool = in smb_direct_create_pools()
1847 mempool_create(t->recv_credit_max, mempool_alloc_slab, in smb_direct_create_pools()
1848 mempool_free_slab, t->recvmsg_cache); in smb_direct_create_pools()
1849 if (!t->recvmsg_mempool) in smb_direct_create_pools()
1852 INIT_LIST_HEAD(&t->recvmsg_queue); in smb_direct_create_pools()
1854 for (i = 0; i < t->recv_credit_max; i++) { in smb_direct_create_pools()
1855 recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL); in smb_direct_create_pools()
1858 recvmsg->transport = t; in smb_direct_create_pools()
1859 list_add(&recvmsg->list, &t->recvmsg_queue); in smb_direct_create_pools()
1861 t->count_avail_recvmsg = t->recv_credit_max; in smb_direct_create_pools()
1865 smb_direct_destroy_pools(t); in smb_direct_create_pools()
1869 static int smb_direct_create_qpair(struct smb_direct_transport *t, in smb_direct_create_qpair() argument
1876 t->pd = ib_alloc_pd(t->cm_id->device, 0); in smb_direct_create_qpair()
1877 if (IS_ERR(t->pd)) { in smb_direct_create_qpair()
1879 ret = PTR_ERR(t->pd); in smb_direct_create_qpair()
1880 t->pd = NULL; in smb_direct_create_qpair()
1884 t->send_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair()
1887 if (IS_ERR(t->send_cq)) { in smb_direct_create_qpair()
1889 ret = PTR_ERR(t->send_cq); in smb_direct_create_qpair()
1890 t->send_cq = NULL; in smb_direct_create_qpair()
1894 t->recv_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair()
1895 t->recv_credit_max, 0, IB_POLL_WORKQUEUE); in smb_direct_create_qpair()
1896 if (IS_ERR(t->recv_cq)) { in smb_direct_create_qpair()
1898 ret = PTR_ERR(t->recv_cq); in smb_direct_create_qpair()
1899 t->recv_cq = NULL; in smb_direct_create_qpair()
1905 qp_attr.qp_context = t; in smb_direct_create_qpair()
1909 qp_attr.send_cq = t->send_cq; in smb_direct_create_qpair()
1910 qp_attr.recv_cq = t->recv_cq; in smb_direct_create_qpair()
1913 ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr); in smb_direct_create_qpair()
1919 t->qp = t->cm_id->qp; in smb_direct_create_qpair()
1920 t->cm_id->event_handler = smb_direct_cm_handler; in smb_direct_create_qpair()
1922 pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1; in smb_direct_create_qpair()
1923 if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) { in smb_direct_create_qpair()
1924 ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, in smb_direct_create_qpair()
1925 t->max_rw_credits, IB_MR_TYPE_MEM_REG, in smb_direct_create_qpair()
1926 t->pages_per_rw_credit, 0); in smb_direct_create_qpair()
1929 t->max_rw_credits, t->pages_per_rw_credit); in smb_direct_create_qpair()
1936 if (t->qp) { in smb_direct_create_qpair()
1937 ib_destroy_qp(t->qp); in smb_direct_create_qpair()
1938 t->qp = NULL; in smb_direct_create_qpair()
1940 if (t->recv_cq) { in smb_direct_create_qpair()
1941 ib_destroy_cq(t->recv_cq); in smb_direct_create_qpair()
1942 t->recv_cq = NULL; in smb_direct_create_qpair()
1944 if (t->send_cq) { in smb_direct_create_qpair()
1945 ib_destroy_cq(t->send_cq); in smb_direct_create_qpair()
1946 t->send_cq = NULL; in smb_direct_create_qpair()
1948 if (t->pd) { in smb_direct_create_qpair()
1949 ib_dealloc_pd(t->pd); in smb_direct_create_qpair()
1950 t->pd = NULL; in smb_direct_create_qpair()
1955 static int smb_direct_prepare(struct ksmbd_transport *t) in smb_direct_prepare() argument
1957 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_prepare()
2041 struct smb_direct_transport *t; in smb_direct_handle_connect_request() local
2052 t = alloc_transport(new_cm_id); in smb_direct_handle_connect_request()
2053 if (!t) in smb_direct_handle_connect_request()
2056 ret = smb_direct_connect(t); in smb_direct_handle_connect_request()
2061 KSMBD_TRANS(t)->conn, "ksmbd:r%u", in smb_direct_handle_connect_request()
2071 free_transport(t); in smb_direct_handle_connect_request()