1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Connection Data Control (CDC) 6 * 7 * Copyright IBM Corp. 2016 8 * 9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 10 */ 11 12 #ifndef SMC_CDC_H 13 #define SMC_CDC_H 14 15 #include <linux/kernel.h> /* max_t */ 16 #include <linux/atomic.h> 17 #include <linux/in.h> 18 #include <linux/compiler.h> 19 20 #include "smc.h" 21 #include "smc_core.h" 22 #include "smc_wr.h" 23 24 #define SMC_CDC_MSG_TYPE 0xFE 25 26 /* in network byte order */ 27 union smc_cdc_cursor { /* SMC cursor */ 28 struct { 29 __be16 reserved; 30 __be16 wrap; 31 __be32 count; 32 }; 33 #ifdef KERNEL_HAS_ATOMIC64 34 atomic64_t acurs; /* for atomic processing */ 35 #else 36 u64 acurs; /* for atomic processing */ 37 #endif 38 } __aligned(8); 39 40 /* in network byte order */ 41 struct smc_cdc_msg { 42 struct smc_wr_rx_hdr common; /* .type = 0xFE */ 43 u8 len; /* 44 */ 44 __be16 seqno; 45 __be32 token; 46 union smc_cdc_cursor prod; 47 union smc_cdc_cursor cons; /* piggy backed "ack" */ 48 struct smc_cdc_producer_flags prod_flags; 49 struct smc_cdc_conn_state_flags conn_state_flags; 50 u8 reserved[18]; 51 } __aligned(8); 52 53 static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) 54 { 55 return conn->local_rx_ctrl.conn_state_flags.peer_conn_abort || 56 conn->local_rx_ctrl.conn_state_flags.peer_conn_closed; 57 } 58 59 static inline bool smc_cdc_rxed_any_close_or_senddone( 60 struct smc_connection *conn) 61 { 62 return smc_cdc_rxed_any_close(conn) || 63 conn->local_rx_ctrl.conn_state_flags.peer_done_writing; 64 } 65 66 static inline void smc_curs_add(int size, union smc_host_cursor *curs, 67 int value) 68 { 69 curs->count += value; 70 if (curs->count >= size) { 71 curs->wrap++; 72 curs->count -= size; 73 } 74 } 75 76 /* SMC cursors are 8 bytes long and require atomic reading and writing */ 77 static inline u64 smc_curs_read(union smc_host_cursor *curs, 78 struct smc_connection *conn) 79 { 80 #ifndef KERNEL_HAS_ATOMIC64 81 unsigned long flags; 82 u64 ret; 83 84 spin_lock_irqsave(&conn->acurs_lock, flags); 85 ret = curs->acurs; 86 spin_unlock_irqrestore(&conn->acurs_lock, flags); 87 return ret; 88 #else 89 return atomic64_read(&curs->acurs); 90 #endif 91 } 92 93 static inline u64 smc_curs_read_net(union smc_cdc_cursor *curs, 94 struct smc_connection *conn) 95 { 96 #ifndef KERNEL_HAS_ATOMIC64 97 unsigned long flags; 98 u64 ret; 99 100 spin_lock_irqsave(&conn->acurs_lock, flags); 101 ret = curs->acurs; 102 spin_unlock_irqrestore(&conn->acurs_lock, flags); 103 return ret; 104 #else 105 return atomic64_read(&curs->acurs); 106 #endif 107 } 108 109 static inline void smc_curs_write(union smc_host_cursor *curs, u64 val, 110 struct smc_connection *conn) 111 { 112 #ifndef KERNEL_HAS_ATOMIC64 113 unsigned long flags; 114 115 spin_lock_irqsave(&conn->acurs_lock, flags); 116 curs->acurs = val; 117 spin_unlock_irqrestore(&conn->acurs_lock, flags); 118 #else 119 atomic64_set(&curs->acurs, val); 120 #endif 121 } 122 123 static inline void smc_curs_write_net(union smc_cdc_cursor *curs, u64 val, 124 struct smc_connection *conn) 125 { 126 #ifndef KERNEL_HAS_ATOMIC64 127 unsigned long flags; 128 129 spin_lock_irqsave(&conn->acurs_lock, flags); 130 curs->acurs = val; 131 spin_unlock_irqrestore(&conn->acurs_lock, flags); 132 #else 133 atomic64_set(&curs->acurs, val); 134 #endif 135 } 136 137 /* calculate cursor difference between old and new, where old <= new */ 138 static inline int smc_curs_diff(unsigned int size, 139 union smc_host_cursor *old, 140 union smc_host_cursor *new) 141 { 142 if (old->wrap != new->wrap) 143 return max_t(int, 0, 144 ((size - old->count) + new->count)); 145 146 return max_t(int, 0, (new->count - old->count)); 147 } 148 149 static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, 150 union smc_host_cursor *local, 151 struct smc_connection *conn) 152 { 153 union smc_host_cursor temp; 154 155 smc_curs_write(&temp, smc_curs_read(local, conn), conn); 156 peer->count = htonl(temp.count); 157 peer->wrap = htons(temp.wrap); 158 /* peer->reserved = htons(0); must be ensured by caller */ 159 } 160 161 static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer, 162 struct smc_host_cdc_msg *local, 163 struct smc_connection *conn) 164 { 165 peer->common.type = local->common.type; 166 peer->len = local->len; 167 peer->seqno = htons(local->seqno); 168 peer->token = htonl(local->token); 169 smc_host_cursor_to_cdc(&peer->prod, &local->prod, conn); 170 smc_host_cursor_to_cdc(&peer->cons, &local->cons, conn); 171 peer->prod_flags = local->prod_flags; 172 peer->conn_state_flags = local->conn_state_flags; 173 } 174 175 static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local, 176 union smc_cdc_cursor *peer, 177 struct smc_connection *conn) 178 { 179 union smc_host_cursor temp, old; 180 union smc_cdc_cursor net; 181 182 smc_curs_write(&old, smc_curs_read(local, conn), conn); 183 smc_curs_write_net(&net, smc_curs_read_net(peer, conn), conn); 184 temp.count = ntohl(net.count); 185 temp.wrap = ntohs(net.wrap); 186 if ((old.wrap > temp.wrap) && temp.wrap) 187 return; 188 if ((old.wrap == temp.wrap) && 189 (old.count > temp.count)) 190 return; 191 smc_curs_write(local, smc_curs_read(&temp, conn), conn); 192 } 193 194 static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, 195 struct smc_cdc_msg *peer, 196 struct smc_connection *conn) 197 { 198 local->common.type = peer->common.type; 199 local->len = peer->len; 200 local->seqno = ntohs(peer->seqno); 201 local->token = ntohl(peer->token); 202 smc_cdc_cursor_to_host(&local->prod, &peer->prod, conn); 203 smc_cdc_cursor_to_host(&local->cons, &peer->cons, conn); 204 local->prod_flags = peer->prod_flags; 205 local->conn_state_flags = peer->conn_state_flags; 206 } 207 208 struct smc_cdc_tx_pend; 209 210 int smc_cdc_get_free_slot(struct smc_connection *conn, 211 struct smc_wr_buf **wr_buf, 212 struct smc_cdc_tx_pend **pend); 213 void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); 214 int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, 215 struct smc_cdc_tx_pend *pend); 216 int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn); 217 int smc_cdc_init(void) __init; 218 219 #endif /* SMC_CDC_H */ 220