Lines Matching refs:srv

105 	struct rtrs_srv_sess *srv = srv_path->srv;  in rtrs_srv_free_ops_ids()  local
109 for (i = 0; i < srv->queue_depth; i++) in rtrs_srv_free_ops_ids()
134 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_alloc_ops_ids() local
138 srv_path->ops_ids = kcalloc(srv->queue_depth, in rtrs_srv_alloc_ops_ids()
144 for (i = 0; i < srv->queue_depth; ++i) { in rtrs_srv_alloc_ops_ids()
540 void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv) in rtrs_srv_set_sess_priv() argument
542 srv->priv = priv; in rtrs_srv_set_sess_priv()
568 struct rtrs_srv_sess *srv = srv_path->srv; in map_cont_bufs() local
585 mrs_num = srv->queue_depth; in map_cont_bufs()
589 mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr); in map_cont_bufs()
590 chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num); in map_cont_bufs()
607 srv->queue_depth - chunks); in map_cont_bufs()
614 sg_set_page(s, srv->chunks[chunks + i], in map_cont_bufs()
655 chunk_bits = ilog2(srv->queue_depth - 1) + 1; in map_cont_bufs()
718 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_path_up() local
719 struct rtrs_srv_ctx *ctx = srv->ctx; in rtrs_srv_path_up()
722 mutex_lock(&srv->paths_ev_mutex); in rtrs_srv_path_up()
723 up = ++srv->paths_up; in rtrs_srv_path_up()
725 ret = ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL); in rtrs_srv_path_up()
726 mutex_unlock(&srv->paths_ev_mutex); in rtrs_srv_path_up()
737 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_path_down() local
738 struct rtrs_srv_ctx *ctx = srv->ctx; in rtrs_srv_path_down()
744 mutex_lock(&srv->paths_ev_mutex); in rtrs_srv_path_down()
745 WARN_ON(!srv->paths_up); in rtrs_srv_path_down()
746 if (--srv->paths_up == 0) in rtrs_srv_path_down()
747 ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv); in rtrs_srv_path_down()
748 mutex_unlock(&srv->paths_ev_mutex); in rtrs_srv_path_down()
754 struct rtrs_srv_sess *srv; in exist_pathname() local
759 list_for_each_entry(srv, &ctx->srv_list, ctx_list) { in exist_pathname()
760 mutex_lock(&srv->paths_mutex); in exist_pathname()
763 if (uuid_equal(&srv->paths_uuid, path_uuid)) { in exist_pathname()
764 mutex_unlock(&srv->paths_mutex); in exist_pathname()
768 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { in exist_pathname()
775 mutex_unlock(&srv->paths_mutex); in exist_pathname()
809 if (exist_pathname(srv_path->srv->ctx, in process_info_req()
810 msg->pathname, &srv_path->srv->paths_uuid)) { in process_info_req()
860 get_device(&srv_path->srv->dev); in process_info_req()
980 struct rtrs_srv_sess *srv = srv_path->srv; in post_recv_path() local
989 q_size = srv->queue_depth; in post_recv_path()
1007 struct rtrs_srv_sess *srv = srv_path->srv; in process_read() local
1008 struct rtrs_srv_ctx *ctx = srv->ctx; in process_read()
1035 data = page_address(srv->chunks[buf_id]); in process_read()
1036 ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len, in process_read()
1065 struct rtrs_srv_sess *srv = srv_path->srv; in process_write() local
1066 struct rtrs_srv_ctx *ctx = srv->ctx; in process_write()
1088 data = page_address(srv->chunks[buf_id]); in process_write()
1089 ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len, in process_write()
1152 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_inv_rkey_done() local
1163 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_inv_rkey_done()
1209 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_rdma_done() local
1246 if (msg_id >= srv->queue_depth || off >= max_chunk_size) { in rtrs_srv_rdma_done()
1265 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_rdma_done()
1302 int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname, in rtrs_srv_get_path_name() argument
1308 mutex_lock(&srv->paths_mutex); in rtrs_srv_get_path_name()
1309 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { in rtrs_srv_get_path_name()
1317 mutex_unlock(&srv->paths_mutex); in rtrs_srv_get_path_name()
1327 int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv) in rtrs_srv_get_queue_depth() argument
1329 return srv->queue_depth; in rtrs_srv_get_queue_depth()
1353 struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess, in rtrs_srv_dev_release() local
1356 kfree(srv); in rtrs_srv_dev_release()
1359 static void free_srv(struct rtrs_srv_sess *srv) in free_srv() argument
1363 WARN_ON(refcount_read(&srv->refcount)); in free_srv()
1364 for (i = 0; i < srv->queue_depth; i++) in free_srv()
1365 __free_pages(srv->chunks[i], get_order(max_chunk_size)); in free_srv()
1366 kfree(srv->chunks); in free_srv()
1367 mutex_destroy(&srv->paths_mutex); in free_srv()
1368 mutex_destroy(&srv->paths_ev_mutex); in free_srv()
1370 put_device(&srv->dev); in free_srv()
1377 struct rtrs_srv_sess *srv; in get_or_create_srv() local
1381 list_for_each_entry(srv, &ctx->srv_list, ctx_list) { in get_or_create_srv()
1382 if (uuid_equal(&srv->paths_uuid, paths_uuid) && in get_or_create_srv()
1383 refcount_inc_not_zero(&srv->refcount)) { in get_or_create_srv()
1385 return srv; in get_or_create_srv()
1399 srv = kzalloc(sizeof(*srv), GFP_KERNEL); in get_or_create_srv()
1400 if (!srv) in get_or_create_srv()
1403 INIT_LIST_HEAD(&srv->paths_list); in get_or_create_srv()
1404 mutex_init(&srv->paths_mutex); in get_or_create_srv()
1405 mutex_init(&srv->paths_ev_mutex); in get_or_create_srv()
1406 uuid_copy(&srv->paths_uuid, paths_uuid); in get_or_create_srv()
1407 srv->queue_depth = sess_queue_depth; in get_or_create_srv()
1408 srv->ctx = ctx; in get_or_create_srv()
1409 device_initialize(&srv->dev); in get_or_create_srv()
1410 srv->dev.release = rtrs_srv_dev_release; in get_or_create_srv()
1412 srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks), in get_or_create_srv()
1414 if (!srv->chunks) in get_or_create_srv()
1417 for (i = 0; i < srv->queue_depth; i++) { in get_or_create_srv()
1418 srv->chunks[i] = alloc_pages(GFP_KERNEL, in get_or_create_srv()
1420 if (!srv->chunks[i]) in get_or_create_srv()
1423 refcount_set(&srv->refcount, 1); in get_or_create_srv()
1425 list_add(&srv->ctx_list, &ctx->srv_list); in get_or_create_srv()
1428 return srv; in get_or_create_srv()
1432 __free_pages(srv->chunks[i], get_order(max_chunk_size)); in get_or_create_srv()
1433 kfree(srv->chunks); in get_or_create_srv()
1436 kfree(srv); in get_or_create_srv()
1440 static void put_srv(struct rtrs_srv_sess *srv) in put_srv() argument
1442 if (refcount_dec_and_test(&srv->refcount)) { in put_srv()
1443 struct rtrs_srv_ctx *ctx = srv->ctx; in put_srv()
1445 WARN_ON(srv->dev.kobj.state_in_sysfs); in put_srv()
1448 list_del(&srv->ctx_list); in put_srv()
1450 free_srv(srv); in put_srv()
1454 static void __add_path_to_srv(struct rtrs_srv_sess *srv, in __add_path_to_srv() argument
1457 list_add_tail(&srv_path->s.entry, &srv->paths_list); in __add_path_to_srv()
1458 srv->paths_num++; in __add_path_to_srv()
1459 WARN_ON(srv->paths_num >= MAX_PATHS_NUM); in __add_path_to_srv()
1464 struct rtrs_srv_sess *srv = srv_path->srv; in del_path_from_srv() local
1466 if (WARN_ON(!srv)) in del_path_from_srv()
1469 mutex_lock(&srv->paths_mutex); in del_path_from_srv()
1471 WARN_ON(!srv->paths_num); in del_path_from_srv()
1472 srv->paths_num--; in del_path_from_srv()
1473 mutex_unlock(&srv->paths_mutex); in del_path_from_srv()
1500 static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv, in __is_path_w_addr_exists() argument
1505 list_for_each_entry(srv_path, &srv->paths_list, s.entry) in __is_path_w_addr_exists()
1573 put_srv(srv_path->srv); in rtrs_srv_close_work()
1574 srv_path->srv = NULL; in rtrs_srv_close_work()
1585 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_rdma_do_accept() local
1599 .queue_depth = cpu_to_le16(srv->queue_depth), in rtrs_rdma_do_accept()
1634 __find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid) in __find_path() argument
1638 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { in __find_path()
1650 struct rtrs_srv_sess *srv = srv_path->srv; in create_con() local
1679 s->signal_interval = min_not_zero(srv->queue_depth, in create_con()
1686 srv->queue_depth * (1 + 4) + 1); in create_con()
1690 srv->queue_depth * (1 + 2) + 1); in create_con()
1692 max_recv_wr = srv->queue_depth + 1; in create_con()
1731 static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv, in __alloc_path() argument
1742 if (srv->paths_num >= MAX_PATHS_NUM) { in __alloc_path()
1746 if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) { in __alloc_path()
1765 srv_path->dma_addr = kcalloc(srv->queue_depth, in __alloc_path()
1777 srv_path->srv = srv; in __alloc_path()
1809 __add_path_to_srv(srv, srv_path); in __alloc_path()
1837 struct rtrs_srv_sess *srv; in rtrs_rdma_connect() local
1870 srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn); in rtrs_rdma_connect()
1871 if (IS_ERR(srv)) { in rtrs_rdma_connect()
1872 err = PTR_ERR(srv); in rtrs_rdma_connect()
1876 mutex_lock(&srv->paths_mutex); in rtrs_rdma_connect()
1877 srv_path = __find_path(srv, &msg->sess_uuid); in rtrs_rdma_connect()
1882 put_srv(srv); in rtrs_rdma_connect()
1887 mutex_unlock(&srv->paths_mutex); in rtrs_rdma_connect()
1896 mutex_unlock(&srv->paths_mutex); in rtrs_rdma_connect()
1902 mutex_unlock(&srv->paths_mutex); in rtrs_rdma_connect()
1906 srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt, in rtrs_rdma_connect()
1909 mutex_unlock(&srv->paths_mutex); in rtrs_rdma_connect()
1910 put_srv(srv); in rtrs_rdma_connect()
1940 mutex_unlock(&srv->paths_mutex); in rtrs_rdma_connect()
1948 mutex_unlock(&srv->paths_mutex); in rtrs_rdma_connect()
2195 static void close_paths(struct rtrs_srv_sess *srv) in close_paths() argument
2199 mutex_lock(&srv->paths_mutex); in close_paths()
2200 list_for_each_entry(srv_path, &srv->paths_list, s.entry) in close_paths()
2202 mutex_unlock(&srv->paths_mutex); in close_paths()
2207 struct rtrs_srv_sess *srv; in close_ctx() local
2210 list_for_each_entry(srv, &ctx->srv_list, ctx_list) in close_ctx()
2211 close_paths(srv); in close_ctx()