1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * RDMA Transport Layer 4 * 5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved. 6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved. 7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved. 8 */ 9 10 #ifndef RTRS_SRV_H 11 #define RTRS_SRV_H 12 13 #include <linux/device.h> 14 #include <linux/refcount.h> 15 #include <linux/percpu.h> 16 #include "rtrs-pri.h" 17 18 /* 19 * enum rtrs_srv_state - Server states. 20 */ 21 enum rtrs_srv_state { 22 RTRS_SRV_CONNECTING, 23 RTRS_SRV_CONNECTED, 24 RTRS_SRV_CLOSING, 25 RTRS_SRV_CLOSED, 26 }; 27 28 /* stats for Read and write operation. 29 * see Documentation/ABI/testing/sysfs-class-rtrs-server for details 30 */ 31 struct rtrs_srv_stats_rdma_stats { 32 struct { 33 u64 cnt; 34 u64 size_total; 35 } dir[2]; 36 }; 37 38 struct rtrs_srv_stats { 39 struct kobject kobj_stats; 40 struct rtrs_srv_stats_rdma_stats __percpu *rdma_stats; 41 struct rtrs_srv_path *srv_path; 42 }; 43 44 struct rtrs_srv_con { 45 struct rtrs_con c; 46 struct list_head rsp_wr_wait_list; 47 spinlock_t rsp_wr_wait_lock; 48 }; 49 50 /* IO context in rtrs_srv, each io has one */ 51 struct rtrs_srv_op { 52 struct rtrs_srv_con *con; 53 u32 msg_id; 54 u8 dir; 55 struct rtrs_msg_rdma_read *rd_msg; 56 struct ib_rdma_wr tx_wr; 57 struct ib_sge tx_sg; 58 struct list_head wait_list; 59 int status; 60 }; 61 62 /* 63 * server side memory region context, when always_invalidate=Y, we need 64 * queue_depth of memory region to invalidate each memory region. 65 */ 66 struct rtrs_srv_mr { 67 struct ib_mr *mr; 68 struct sg_table sgt; 69 struct ib_cqe inv_cqe; /* only for always_invalidate=true */ 70 u32 msg_id; /* only for always_invalidate=true */ 71 u32 msg_off; /* only for always_invalidate=true */ 72 struct rtrs_iu *iu; /* send buffer for new rkey msg */ 73 }; 74 75 struct rtrs_srv_path { 76 struct rtrs_path s; 77 struct rtrs_srv_sess *srv; 78 struct work_struct close_work; 79 enum rtrs_srv_state state; 80 spinlock_t state_lock; 81 int cur_cq_vector; 82 struct rtrs_srv_op **ops_ids; 83 struct percpu_ref ids_inflight_ref; 84 struct completion complete_done; 85 struct rtrs_srv_mr *mrs; 86 unsigned int mrs_num; 87 dma_addr_t *dma_addr; 88 bool established; 89 unsigned int mem_bits; 90 struct kobject kobj; 91 struct rtrs_srv_stats *stats; 92 }; 93 94 struct rtrs_srv_sess { 95 struct list_head paths_list; 96 int paths_up; 97 struct mutex paths_ev_mutex; 98 size_t paths_num; 99 struct mutex paths_mutex; 100 uuid_t paths_uuid; 101 refcount_t refcount; 102 struct rtrs_srv_ctx *ctx; 103 struct list_head ctx_list; 104 void *priv; 105 size_t queue_depth; 106 struct page **chunks; 107 struct device dev; 108 unsigned int dev_ref; 109 struct kobject *kobj_paths; 110 }; 111 112 struct rtrs_srv_ctx { 113 struct rtrs_srv_ops ops; 114 struct rdma_cm_id *cm_id_ip; 115 struct rdma_cm_id *cm_id_ib; 116 struct mutex srv_mutex; 117 struct list_head srv_list; 118 }; 119 120 struct rtrs_srv_ib_ctx { 121 struct rtrs_srv_ctx *srv_ctx; 122 u16 port; 123 struct mutex ib_dev_mutex; 124 int ib_dev_count; 125 }; 126 127 extern struct class *rtrs_dev_class; 128 129 void close_path(struct rtrs_srv_path *srv_path); 130 131 static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s, 132 size_t size, int d) 133 { 134 this_cpu_inc(s->rdma_stats->dir[d].cnt); 135 this_cpu_add(s->rdma_stats->dir[d].size_total, size); 136 } 137 138 /* functions which are implemented in rtrs-srv-stats.c */ 139 int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable); 140 ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page); 141 int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable); 142 ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats, 143 char *page, size_t len); 144 145 /* functions which are implemented in rtrs-srv-sysfs.c */ 146 int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path); 147 void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path); 148 149 #endif /* RTRS_SRV_H */ 150