1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Definitions for SMC Connections, Link Groups and Links 6 * 7 * Copyright IBM Corp. 2016 8 * 9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 10 */ 11 12 #ifndef _SMC_CORE_H 13 #define _SMC_CORE_H 14 15 #include <linux/atomic.h> 16 #include <linux/smc.h> 17 #include <linux/pci.h> 18 #include <rdma/ib_verbs.h> 19 #include <net/genetlink.h> 20 21 #include "smc.h" 22 #include "smc_ib.h" 23 24 #define SMC_RMBS_PER_LGR_MAX 255 /* max. # of RMBs per link group */ 25 26 struct smc_lgr_list { /* list of link group definition */ 27 struct list_head list; 28 spinlock_t lock; /* protects list of link groups */ 29 u32 num; /* unique link group number */ 30 }; 31 32 enum smc_lgr_role { /* possible roles of a link group */ 33 SMC_CLNT, /* client */ 34 SMC_SERV /* server */ 35 }; 36 37 enum smc_link_state { /* possible states of a link */ 38 SMC_LNK_UNUSED, /* link is unused */ 39 SMC_LNK_INACTIVE, /* link is inactive */ 40 SMC_LNK_ACTIVATING, /* link is being activated */ 41 SMC_LNK_ACTIVE, /* link is active */ 42 }; 43 44 #define SMC_WR_BUF_SIZE 48 /* size of work request buffer */ 45 #define SMC_WR_BUF_V2_SIZE 8192 /* size of v2 work request buffer */ 46 47 struct smc_wr_buf { 48 u8 raw[SMC_WR_BUF_SIZE]; 49 }; 50 51 struct smc_wr_v2_buf { 52 u8 raw[SMC_WR_BUF_V2_SIZE]; 53 }; 54 55 #define SMC_WR_REG_MR_WAIT_TIME (5 * HZ)/* wait time for ib_wr_reg_mr result */ 56 57 enum smc_wr_reg_state { 58 POSTED, /* ib_wr_reg_mr request posted */ 59 CONFIRMED, /* ib_wr_reg_mr response: successful */ 60 FAILED /* ib_wr_reg_mr response: failure */ 61 }; 62 63 struct smc_rdma_sge { /* sges for RDMA writes */ 64 struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE]; 65 }; 66 67 #define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per 68 * message send 69 */ 70 71 struct smc_rdma_sges { /* sges per message send */ 72 struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES]; 73 }; 74 75 struct smc_rdma_wr { /* work requests per message 76 * send 77 */ 78 struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES]; 79 }; 80 81 #define SMC_LGR_ID_SIZE 4 82 83 struct smc_link { 84 struct smc_ib_device *smcibdev; /* ib-device */ 85 u8 ibport; /* port - values 1 | 2 */ 86 struct ib_pd *roce_pd; /* IB protection domain, 87 * unique for every RoCE QP 88 */ 89 struct ib_qp *roce_qp; /* IB queue pair */ 90 struct ib_qp_attr qp_attr; /* IB queue pair attributes */ 91 92 struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ 93 struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ 94 struct ib_sge *wr_tx_sges; /* WR send gather meta data */ 95 struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/ 96 struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */ 97 struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ 98 struct completion *wr_tx_compl; /* WR send CQE completion */ 99 /* above four vectors have wr_tx_cnt elements and use the same index */ 100 struct ib_send_wr *wr_tx_v2_ib; /* WR send v2 meta data */ 101 struct ib_sge *wr_tx_v2_sge; /* WR send v2 gather meta data*/ 102 struct smc_wr_tx_pend *wr_tx_v2_pend; /* WR send v2 waiting for CQE */ 103 dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ 104 dma_addr_t wr_tx_v2_dma_addr; /* DMA address of v2 tx buf*/ 105 atomic_long_t wr_tx_id; /* seq # of last sent WR */ 106 unsigned long *wr_tx_mask; /* bit mask of used indexes */ 107 u32 wr_tx_cnt; /* number of WR send buffers */ 108 wait_queue_head_t wr_tx_wait; /* wait for free WR send buf */ 109 atomic_t wr_tx_refcnt; /* tx refs to link */ 110 111 struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */ 112 struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */ 113 struct ib_sge *wr_rx_sges; /* WR recv scatter meta data */ 114 /* above three vectors have wr_rx_cnt elements and use the same index */ 115 dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */ 116 dma_addr_t wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/ 117 u64 wr_rx_id; /* seq # of last recv WR */ 118 u32 wr_rx_cnt; /* number of WR recv buffers */ 119 unsigned long wr_rx_tstamp; /* jiffies when last buf rx */ 120 121 struct ib_reg_wr wr_reg; /* WR register memory region */ 122 wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */ 123 atomic_t wr_reg_refcnt; /* reg refs to link */ 124 enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */ 125 126 u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/ 127 u8 sgid_index; /* gid index for vlan id */ 128 u32 peer_qpn; /* QP number of peer */ 129 enum ib_mtu path_mtu; /* used mtu */ 130 enum ib_mtu peer_mtu; /* mtu size of peer */ 131 u32 psn_initial; /* QP tx initial packet seqno */ 132 u32 peer_psn; /* QP rx initial packet seqno */ 133 u8 peer_mac[ETH_ALEN]; /* = gid[8:10||13:15] */ 134 u8 peer_gid[SMC_GID_SIZE]; /* gid of peer*/ 135 u8 link_id; /* unique # within link group */ 136 u8 link_uid[SMC_LGR_ID_SIZE]; /* unique lnk id */ 137 u8 peer_link_uid[SMC_LGR_ID_SIZE]; /* peer uid */ 138 u8 link_idx; /* index in lgr link array */ 139 u8 link_is_asym; /* is link asymmetric? */ 140 struct smc_link_group *lgr; /* parent link group */ 141 struct work_struct link_down_wrk; /* wrk to bring link down */ 142 char ibname[IB_DEVICE_NAME_MAX]; /* ib device name */ 143 int ndev_ifidx; /* network device ifindex */ 144 145 enum smc_link_state state; /* state of link */ 146 struct delayed_work llc_testlink_wrk; /* testlink worker */ 147 struct completion llc_testlink_resp; /* wait for rx of testlink */ 148 int llc_testlink_time; /* testlink interval */ 149 atomic_t conn_cnt; /* connections on this link */ 150 }; 151 152 /* For now we just allow one parallel link per link group. The SMC protocol 153 * allows more (up to 8). 154 */ 155 #define SMC_LINKS_PER_LGR_MAX 3 156 #define SMC_SINGLE_LINK 0 157 158 /* tx/rx buffer list element for sndbufs list and rmbs list of a lgr */ 159 struct smc_buf_desc { 160 struct list_head list; 161 void *cpu_addr; /* virtual address of buffer */ 162 struct page *pages; 163 int len; /* length of buffer */ 164 u32 used; /* currently used / unused */ 165 union { 166 struct { /* SMC-R */ 167 struct sg_table sgt[SMC_LINKS_PER_LGR_MAX]; 168 /* virtual buffer */ 169 struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; 170 /* for rmb only: memory region 171 * incl. rkey provided to peer 172 */ 173 u32 order; /* allocation order */ 174 175 u8 is_conf_rkey; 176 /* confirm_rkey done */ 177 u8 is_reg_mr[SMC_LINKS_PER_LGR_MAX]; 178 /* mem region registered */ 179 u8 is_map_ib[SMC_LINKS_PER_LGR_MAX]; 180 /* mem region mapped to lnk */ 181 u8 is_reg_err; 182 /* buffer registration err */ 183 }; 184 struct { /* SMC-D */ 185 unsigned short sba_idx; 186 /* SBA index number */ 187 u64 token; 188 /* DMB token number */ 189 dma_addr_t dma_addr; 190 /* DMA address */ 191 }; 192 }; 193 }; 194 195 struct smc_rtoken { /* address/key of remote RMB */ 196 u64 dma_addr; 197 u32 rkey; 198 }; 199 200 #define SMC_BUF_MIN_SIZE 16384 /* minimum size of an RMB */ 201 #define SMC_RMBE_SIZES 16 /* number of distinct RMBE sizes */ 202 /* theoretically, the RFC states that largest size would be 512K, 203 * i.e. compressed 5 and thus 6 sizes (0..5), despite 204 * struct smc_clc_msg_accept_confirm.rmbe_size being a 4 bit value (0..15) 205 */ 206 207 struct smcd_dev; 208 209 enum smc_lgr_type { /* redundancy state of lgr */ 210 SMC_LGR_NONE, /* no active links, lgr to be deleted */ 211 SMC_LGR_SINGLE, /* 1 active RNIC on each peer */ 212 SMC_LGR_SYMMETRIC, /* 2 active RNICs on each peer */ 213 SMC_LGR_ASYMMETRIC_PEER, /* local has 2, peer 1 active RNICs */ 214 SMC_LGR_ASYMMETRIC_LOCAL, /* local has 1, peer 2 active RNICs */ 215 }; 216 217 enum smc_llc_flowtype { 218 SMC_LLC_FLOW_NONE = 0, 219 SMC_LLC_FLOW_ADD_LINK = 2, 220 SMC_LLC_FLOW_DEL_LINK = 4, 221 SMC_LLC_FLOW_REQ_ADD_LINK = 5, 222 SMC_LLC_FLOW_RKEY = 6, 223 }; 224 225 struct smc_llc_qentry; 226 227 struct smc_llc_flow { 228 enum smc_llc_flowtype type; 229 struct smc_llc_qentry *qentry; 230 }; 231 232 struct smc_link_group { 233 struct list_head list; 234 struct rb_root conns_all; /* connection tree */ 235 rwlock_t conns_lock; /* protects conns_all */ 236 unsigned int conns_num; /* current # of connections */ 237 unsigned short vlan_id; /* vlan id of link group */ 238 239 struct list_head sndbufs[SMC_RMBE_SIZES];/* tx buffers */ 240 struct mutex sndbufs_lock; /* protects tx buffers */ 241 struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */ 242 struct mutex rmbs_lock; /* protects rx buffers */ 243 244 u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */ 245 struct delayed_work free_work; /* delayed freeing of an lgr */ 246 struct work_struct terminate_work; /* abnormal lgr termination */ 247 struct workqueue_struct *tx_wq; /* wq for conn. tx workers */ 248 u8 sync_err : 1; /* lgr no longer fits to peer */ 249 u8 terminating : 1;/* lgr is terminating */ 250 u8 freeing : 1; /* lgr is being freed */ 251 252 bool is_smcd; /* SMC-R or SMC-D */ 253 u8 smc_version; 254 u8 negotiated_eid[SMC_MAX_EID_LEN]; 255 u8 peer_os; /* peer operating system */ 256 u8 peer_smc_release; 257 u8 peer_hostname[SMC_MAX_HOSTNAME_LEN]; 258 union { 259 struct { /* SMC-R */ 260 enum smc_lgr_role role; 261 /* client or server */ 262 struct smc_link lnk[SMC_LINKS_PER_LGR_MAX]; 263 /* smc link */ 264 struct smc_wr_v2_buf *wr_rx_buf_v2; 265 /* WR v2 recv payload buffer */ 266 struct smc_wr_v2_buf *wr_tx_buf_v2; 267 /* WR v2 send payload buffer */ 268 char peer_systemid[SMC_SYSTEMID_LEN]; 269 /* unique system_id of peer */ 270 struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX] 271 [SMC_LINKS_PER_LGR_MAX]; 272 /* remote addr/key pairs */ 273 DECLARE_BITMAP(rtokens_used_mask, SMC_RMBS_PER_LGR_MAX); 274 /* used rtoken elements */ 275 u8 next_link_id; 276 enum smc_lgr_type type; 277 /* redundancy state */ 278 u8 pnet_id[SMC_MAX_PNETID_LEN + 1]; 279 /* pnet id of this lgr */ 280 struct list_head llc_event_q; 281 /* queue for llc events */ 282 spinlock_t llc_event_q_lock; 283 /* protects llc_event_q */ 284 struct mutex llc_conf_mutex; 285 /* protects lgr reconfig. */ 286 struct work_struct llc_add_link_work; 287 struct work_struct llc_del_link_work; 288 struct work_struct llc_event_work; 289 /* llc event worker */ 290 wait_queue_head_t llc_flow_waiter; 291 /* w4 next llc event */ 292 wait_queue_head_t llc_msg_waiter; 293 /* w4 next llc msg */ 294 struct smc_llc_flow llc_flow_lcl; 295 /* llc local control field */ 296 struct smc_llc_flow llc_flow_rmt; 297 /* llc remote control field */ 298 struct smc_llc_qentry *delayed_event; 299 /* arrived when flow active */ 300 spinlock_t llc_flow_lock; 301 /* protects llc flow */ 302 int llc_testlink_time; 303 /* link keep alive time */ 304 u32 llc_termination_rsn; 305 /* rsn code for termination */ 306 u8 nexthop_mac[ETH_ALEN]; 307 u8 uses_gateway; 308 __be32 saddr; 309 /* net namespace */ 310 struct net *net; 311 }; 312 struct { /* SMC-D */ 313 u64 peer_gid; 314 /* Peer GID (remote) */ 315 struct smcd_dev *smcd; 316 /* ISM device for VLAN reg. */ 317 u8 peer_shutdown : 1; 318 /* peer triggered shutdownn */ 319 }; 320 }; 321 }; 322 323 struct smc_clc_msg_local; 324 325 #define GID_LIST_SIZE 2 326 327 struct smc_gidlist { 328 u8 len; 329 u8 list[GID_LIST_SIZE][SMC_GID_SIZE]; 330 }; 331 332 struct smc_init_info_smcrv2 { 333 /* Input fields */ 334 __be32 saddr; 335 struct sock *clc_sk; 336 __be32 daddr; 337 338 /* Output fields when saddr is set */ 339 struct smc_ib_device *ib_dev_v2; 340 u8 ib_port_v2; 341 u8 ib_gid_v2[SMC_GID_SIZE]; 342 343 /* Additional output fields when clc_sk and daddr is set as well */ 344 u8 uses_gateway; 345 u8 nexthop_mac[ETH_ALEN]; 346 347 struct smc_gidlist gidlist; 348 }; 349 350 struct smc_init_info { 351 u8 is_smcd; 352 u8 smc_type_v1; 353 u8 smc_type_v2; 354 u8 first_contact_peer; 355 u8 first_contact_local; 356 unsigned short vlan_id; 357 u32 rc; 358 u8 negotiated_eid[SMC_MAX_EID_LEN]; 359 /* SMC-R */ 360 u8 smcr_version; 361 u8 check_smcrv2; 362 u8 peer_gid[SMC_GID_SIZE]; 363 u8 peer_mac[ETH_ALEN]; 364 u8 peer_systemid[SMC_SYSTEMID_LEN]; 365 struct smc_ib_device *ib_dev; 366 u8 ib_gid[SMC_GID_SIZE]; 367 u8 ib_port; 368 u32 ib_clcqpn; 369 struct smc_init_info_smcrv2 smcrv2; 370 /* SMC-D */ 371 u64 ism_peer_gid[SMC_MAX_ISM_DEVS + 1]; 372 struct smcd_dev *ism_dev[SMC_MAX_ISM_DEVS + 1]; 373 u16 ism_chid[SMC_MAX_ISM_DEVS + 1]; 374 u8 ism_offered_cnt; /* # of ISM devices offered */ 375 u8 ism_selected; /* index of selected ISM dev*/ 376 u8 smcd_version; 377 }; 378 379 /* Find the connection associated with the given alert token in the link group. 380 * To use rbtrees we have to implement our own search core. 381 * Requires @conns_lock 382 * @token alert token to search for 383 * @lgr link group to search in 384 * Returns connection associated with token if found, NULL otherwise. 385 */ 386 static inline struct smc_connection *smc_lgr_find_conn( 387 u32 token, struct smc_link_group *lgr) 388 { 389 struct smc_connection *res = NULL; 390 struct rb_node *node; 391 392 node = lgr->conns_all.rb_node; 393 while (node) { 394 struct smc_connection *cur = rb_entry(node, 395 struct smc_connection, alert_node); 396 397 if (cur->alert_token_local > token) { 398 node = node->rb_left; 399 } else { 400 if (cur->alert_token_local < token) { 401 node = node->rb_right; 402 } else { 403 res = cur; 404 break; 405 } 406 } 407 } 408 409 return res; 410 } 411 412 /* 413 * Returns true if the specified link is usable. 414 * 415 * usable means the link is ready to receive RDMA messages, map memory 416 * on the link, etc. This doesn't ensure we are able to send RDMA messages 417 * on this link, if sending RDMA messages is needed, use smc_link_sendable() 418 */ 419 static inline bool smc_link_usable(struct smc_link *lnk) 420 { 421 if (lnk->state == SMC_LNK_UNUSED || lnk->state == SMC_LNK_INACTIVE) 422 return false; 423 return true; 424 } 425 426 /* 427 * Returns true if the specified link is ready to receive AND send RDMA 428 * messages. 429 * 430 * For the client side in first contact, the underlying QP may still in 431 * RESET or RTR when the link state is ACTIVATING, checks in smc_link_usable() 432 * is not strong enough. For those places that need to send any CDC or LLC 433 * messages, use smc_link_sendable(), otherwise, use smc_link_usable() instead 434 */ 435 static inline bool smc_link_sendable(struct smc_link *lnk) 436 { 437 return smc_link_usable(lnk) && 438 lnk->qp_attr.cur_qp_state == IB_QPS_RTS; 439 } 440 441 static inline bool smc_link_active(struct smc_link *lnk) 442 { 443 return lnk->state == SMC_LNK_ACTIVE; 444 } 445 446 static inline void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw) 447 { 448 sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", 449 be16_to_cpu(((__be16 *)gid_raw)[0]), 450 be16_to_cpu(((__be16 *)gid_raw)[1]), 451 be16_to_cpu(((__be16 *)gid_raw)[2]), 452 be16_to_cpu(((__be16 *)gid_raw)[3]), 453 be16_to_cpu(((__be16 *)gid_raw)[4]), 454 be16_to_cpu(((__be16 *)gid_raw)[5]), 455 be16_to_cpu(((__be16 *)gid_raw)[6]), 456 be16_to_cpu(((__be16 *)gid_raw)[7])); 457 } 458 459 struct smc_pci_dev { 460 __u32 pci_fid; 461 __u16 pci_pchid; 462 __u16 pci_vendor; 463 __u16 pci_device; 464 __u8 pci_id[SMC_PCI_ID_STR_LEN]; 465 }; 466 467 static inline void smc_set_pci_values(struct pci_dev *pci_dev, 468 struct smc_pci_dev *smc_dev) 469 { 470 smc_dev->pci_vendor = pci_dev->vendor; 471 smc_dev->pci_device = pci_dev->device; 472 snprintf(smc_dev->pci_id, sizeof(smc_dev->pci_id), "%s", 473 pci_name(pci_dev)); 474 #if IS_ENABLED(CONFIG_S390) 475 { /* Set s390 specific PCI information */ 476 struct zpci_dev *zdev; 477 478 zdev = to_zpci(pci_dev); 479 smc_dev->pci_fid = zdev->fid; 480 smc_dev->pci_pchid = zdev->pchid; 481 } 482 #endif 483 } 484 485 struct smc_sock; 486 struct smc_clc_msg_accept_confirm; 487 488 void smc_lgr_cleanup_early(struct smc_link_group *lgr); 489 void smc_lgr_terminate_sched(struct smc_link_group *lgr); 490 void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport); 491 void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport); 492 void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, 493 unsigned short vlan); 494 void smc_smcd_terminate_all(struct smcd_dev *dev); 495 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev); 496 int smc_buf_create(struct smc_sock *smc, bool is_smcd); 497 int smc_uncompress_bufsize(u8 compressed); 498 int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_link *link, 499 struct smc_clc_msg_accept_confirm *clc); 500 int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey); 501 int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey); 502 void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new, 503 __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey); 504 void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id, 505 __be64 nw_vaddr, __be32 nw_rkey); 506 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn); 507 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn); 508 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn); 509 void smc_rmb_sync_sg_for_device(struct smc_connection *conn); 510 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini); 511 512 void smc_conn_free(struct smc_connection *conn); 513 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini); 514 void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr); 515 int smc_core_init(void); 516 void smc_core_exit(void); 517 518 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, 519 u8 link_idx, struct smc_init_info *ini); 520 void smcr_link_clear(struct smc_link *lnk, bool log); 521 void smc_switch_link_and_count(struct smc_connection *conn, 522 struct smc_link *to_lnk); 523 int smcr_buf_map_lgr(struct smc_link *lnk); 524 int smcr_buf_reg_lgr(struct smc_link *lnk); 525 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type); 526 void smcr_lgr_set_type_asym(struct smc_link_group *lgr, 527 enum smc_lgr_type new_type, int asym_lnk_idx); 528 int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc); 529 struct smc_link *smc_switch_conns(struct smc_link_group *lgr, 530 struct smc_link *from_lnk, bool is_dev_err); 531 void smcr_link_down_cond(struct smc_link *lnk); 532 void smcr_link_down_cond_sched(struct smc_link *lnk); 533 int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb); 534 int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb); 535 int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb); 536 int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb); 537 538 static inline struct smc_link_group *smc_get_lgr(struct smc_link *link) 539 { 540 return link->lgr; 541 } 542 #endif 543