xref: /openbmc/linux/include/rdma/rdma_vt.h (revision 023e4163)
1 #ifndef DEF_RDMA_VT_H
2 #define DEF_RDMA_VT_H
3 
4 /*
5  * Copyright(c) 2016 - 2018 Intel Corporation.
6  *
7  * This file is provided under a dual BSD/GPLv2 license.  When using or
8  * redistributing this file, you may do so under either license.
9  *
10  * GPL LICENSE SUMMARY
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * BSD LICENSE
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  *
27  *  - Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  *  - Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in
31  *    the documentation and/or other materials provided with the
32  *    distribution.
33  *  - Neither the name of Intel Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived
35  *    from this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50 
51 /*
52  * Structure that low level drivers will populate in order to register with the
53  * rdmavt layer.
54  */
55 
56 #include <linux/spinlock.h>
57 #include <linux/list.h>
58 #include <linux/hash.h>
59 #include <rdma/ib_verbs.h>
60 #include <rdma/ib_mad.h>
61 #include <rdma/rdmavt_mr.h>
62 #include <rdma/rdmavt_qp.h>
63 
64 #define RVT_MAX_PKEY_VALUES 16
65 
66 #define RVT_MAX_TRAP_LEN 100 /* Limit pending trap list */
67 #define RVT_MAX_TRAP_LISTS 5 /*((IB_NOTICE_TYPE_INFO & 0x0F) + 1)*/
68 #define RVT_TRAP_TIMEOUT 4096 /* 4.096 usec */
69 
70 struct trap_list {
71 	u32 list_len;
72 	struct list_head list;
73 };
74 
75 struct rvt_ibport {
76 	struct rvt_qp __rcu *qp[2];
77 	struct ib_mad_agent *send_agent;	/* agent for SMI (traps) */
78 	struct rb_root mcast_tree;
79 	spinlock_t lock;		/* protect changes in this struct */
80 
81 	/* non-zero when timer is set */
82 	unsigned long mkey_lease_timeout;
83 	unsigned long trap_timeout;
84 	__be64 gid_prefix;      /* in network order */
85 	__be64 mkey;
86 	u64 tid;
87 	u32 port_cap_flags;
88 	u16 port_cap3_flags;
89 	u32 pma_sample_start;
90 	u32 pma_sample_interval;
91 	__be16 pma_counter_select[5];
92 	u16 pma_tag;
93 	u16 mkey_lease_period;
94 	u32 sm_lid;
95 	u8 sm_sl;
96 	u8 mkeyprot;
97 	u8 subnet_timeout;
98 	u8 vl_high_limit;
99 
100 	/*
101 	 * Driver is expected to keep these up to date. These
102 	 * counters are informational only and not required to be
103 	 * completely accurate.
104 	 */
105 	u64 n_rc_resends;
106 	u64 n_seq_naks;
107 	u64 n_rdma_seq;
108 	u64 n_rnr_naks;
109 	u64 n_other_naks;
110 	u64 n_loop_pkts;
111 	u64 n_pkt_drops;
112 	u64 n_vl15_dropped;
113 	u64 n_rc_timeouts;
114 	u64 n_dmawait;
115 	u64 n_unaligned;
116 	u64 n_rc_dupreq;
117 	u64 n_rc_seqnak;
118 	u16 pkey_violations;
119 	u16 qkey_violations;
120 	u16 mkey_violations;
121 
122 	/* Hot-path per CPU counters to avoid cacheline trading to update */
123 	u64 z_rc_acks;
124 	u64 z_rc_qacks;
125 	u64 z_rc_delayed_comp;
126 	u64 __percpu *rc_acks;
127 	u64 __percpu *rc_qacks;
128 	u64 __percpu *rc_delayed_comp;
129 
130 	void *priv; /* driver private data */
131 
132 	/*
133 	 * The pkey table is allocated and maintained by the driver. Drivers
134 	 * need to have access to this before registering with rdmav. However
135 	 * rdmavt will need access to it so drivers need to proviee this during
136 	 * the attach port API call.
137 	 */
138 	u16 *pkey_table;
139 
140 	struct rvt_ah *sm_ah;
141 
142 	/*
143 	 * Keep a list of traps that have not been repressed.  They will be
144 	 * resent based on trap_timer.
145 	 */
146 	struct trap_list trap_lists[RVT_MAX_TRAP_LISTS];
147 	struct timer_list trap_timer;
148 };
149 
150 #define RVT_CQN_MAX 16 /* maximum length of cq name */
151 
152 #define RVT_SGE_COPY_MEMCPY	0
153 #define RVT_SGE_COPY_CACHELESS	1
154 #define RVT_SGE_COPY_ADAPTIVE	2
155 
156 /*
157  * Things that are driver specific, module parameters in hfi1 and qib
158  */
159 struct rvt_driver_params {
160 	struct ib_device_attr props;
161 
162 	/*
163 	 * Anything driver specific that is not covered by props
164 	 * For instance special module parameters. Goes here.
165 	 */
166 	unsigned int lkey_table_size;
167 	unsigned int qp_table_size;
168 	unsigned int sge_copy_mode;
169 	unsigned int wss_threshold;
170 	unsigned int wss_clean_period;
171 	int qpn_start;
172 	int qpn_inc;
173 	int qpn_res_start;
174 	int qpn_res_end;
175 	int nports;
176 	int npkeys;
177 	int node;
178 	int psn_mask;
179 	int psn_shift;
180 	int psn_modify_mask;
181 	u32 core_cap_flags;
182 	u32 max_mad_size;
183 	u8 qos_shift;
184 	u8 max_rdma_atomic;
185 	u8 extra_rdma_atomic;
186 	u8 reserved_operations;
187 };
188 
189 /* User context */
190 struct rvt_ucontext {
191 	struct ib_ucontext ibucontext;
192 };
193 
194 /* Protection domain */
195 struct rvt_pd {
196 	struct ib_pd ibpd;
197 	bool user;
198 };
199 
200 /* Address handle */
201 struct rvt_ah {
202 	struct ib_ah ibah;
203 	struct rdma_ah_attr attr;
204 	atomic_t refcount;
205 	u8 vl;
206 	u8 log_pmtu;
207 };
208 
209 /* memory working set size */
210 struct rvt_wss {
211 	unsigned long *entries;
212 	atomic_t total_count;
213 	atomic_t clean_counter;
214 	atomic_t clean_entry;
215 
216 	int threshold;
217 	int num_entries;
218 	long pages_mask;
219 	unsigned int clean_period;
220 };
221 
222 struct rvt_dev_info;
223 struct rvt_swqe;
224 struct rvt_driver_provided {
225 	/*
226 	 * Which functions are required depends on which verbs rdmavt is
227 	 * providing and which verbs the driver is overriding. See
228 	 * check_support() for details.
229 	 */
230 
231 	/* hot path calldowns in a single cacheline */
232 
233 	/*
234 	 * Give the driver a notice that there is send work to do. It is up to
235 	 * the driver to generally push the packets out, this just queues the
236 	 * work with the driver. There are two variants here. The no_lock
237 	 * version requires the s_lock not to be held. The other assumes the
238 	 * s_lock is held.
239 	 */
240 	bool (*schedule_send)(struct rvt_qp *qp);
241 	bool (*schedule_send_no_lock)(struct rvt_qp *qp);
242 
243 	/*
244 	 * Driver specific work request setup and checking.
245 	 * This function is allowed to perform any setup, checks, or
246 	 * adjustments required to the SWQE in order to be usable by
247 	 * underlying protocols. This includes private data structure
248 	 * allocations.
249 	 */
250 	int (*setup_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe,
251 			 bool *call_send);
252 
253 	/*
254 	 * Sometimes rdmavt needs to kick the driver's send progress. That is
255 	 * done by this call back.
256 	 */
257 	void (*do_send)(struct rvt_qp *qp);
258 
259 	/*
260 	 * Returns a pointer to the undelying hardware's PCI device. This is
261 	 * used to display information as to what hardware is being referenced
262 	 * in an output message
263 	 */
264 	struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi);
265 
266 	/*
267 	 * Allocate a private queue pair data structure for driver specific
268 	 * information which is opaque to rdmavt.  Errors are returned via
269 	 * ERR_PTR(err).  The driver is free to return NULL or a valid
270 	 * pointer.
271 	 */
272 	void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
273 
274 	/*
275 	 * Init a struture allocated with qp_priv_alloc(). This should be
276 	 * called after all qp fields have been initialized in rdmavt.
277 	 */
278 	int (*qp_priv_init)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
279 			    struct ib_qp_init_attr *init_attr);
280 
281 	/*
282 	 * Free the driver's private qp structure.
283 	 */
284 	void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
285 
286 	/*
287 	 * Inform the driver the particular qp in quesiton has been reset so
288 	 * that it can clean up anything it needs to.
289 	 */
290 	void (*notify_qp_reset)(struct rvt_qp *qp);
291 
292 	/*
293 	 * Get a path mtu from the driver based on qp attributes.
294 	 */
295 	int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
296 				  struct ib_qp_attr *attr);
297 
298 	/*
299 	 * Notify driver that it needs to flush any outstanding IO requests that
300 	 * are waiting on a qp.
301 	 */
302 	void (*flush_qp_waiters)(struct rvt_qp *qp);
303 
304 	/*
305 	 * Notify driver to stop its queue of sending packets. Nothing else
306 	 * should be posted to the queue pair after this has been called.
307 	 */
308 	void (*stop_send_queue)(struct rvt_qp *qp);
309 
310 	/*
311 	 * Have the drivr drain any in progress operations
312 	 */
313 	void (*quiesce_qp)(struct rvt_qp *qp);
314 
315 	/*
316 	 * Inform the driver a qp has went to error state.
317 	 */
318 	void (*notify_error_qp)(struct rvt_qp *qp);
319 
320 	/*
321 	 * Get an MTU for a qp.
322 	 */
323 	u32 (*mtu_from_qp)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
324 			   u32 pmtu);
325 	/*
326 	 * Convert an mtu to a path mtu
327 	 */
328 	int (*mtu_to_path_mtu)(u32 mtu);
329 
330 	/*
331 	 * Get the guid of a port in big endian byte order
332 	 */
333 	int (*get_guid_be)(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
334 			   int guid_index, __be64 *guid);
335 
336 	/*
337 	 * Query driver for the state of the port.
338 	 */
339 	int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num,
340 				struct ib_port_attr *props);
341 
342 	/*
343 	 * Tell driver to shutdown a port
344 	 */
345 	int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num);
346 
347 	/* Tell driver to send a trap for changed  port capabilities */
348 	void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num);
349 
350 	/*
351 	 * The following functions can be safely ignored completely. Any use of
352 	 * these is checked for NULL before blindly calling. Rdmavt should also
353 	 * be functional if drivers omit these.
354 	 */
355 
356 	/* Called to inform the driver that all qps should now be freed. */
357 	unsigned (*free_all_qps)(struct rvt_dev_info *rdi);
358 
359 	/* Driver specific AH validation */
360 	int (*check_ah)(struct ib_device *, struct rdma_ah_attr *);
361 
362 	/* Inform the driver a new AH has been created */
363 	void (*notify_new_ah)(struct ib_device *, struct rdma_ah_attr *,
364 			      struct rvt_ah *);
365 
366 	/* Let the driver pick the next queue pair number*/
367 	int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
368 			 enum ib_qp_type type, u8 port_num);
369 
370 	/* Determine if its safe or allowed to modify the qp */
371 	int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
372 			       int attr_mask, struct ib_udata *udata);
373 
374 	/* Driver specific QP modification/notification-of */
375 	void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
376 			  int attr_mask, struct ib_udata *udata);
377 
378 	/* Notify driver a mad agent has been created */
379 	void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
380 
381 	/* Notify driver a mad agent has been removed */
382 	void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
383 
384 	/* Notify driver to restart rc */
385 	void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait);
386 
387 	/* Get and return CPU to pin CQ processing thread */
388 	int (*comp_vect_cpu_lookup)(struct rvt_dev_info *rdi, int comp_vect);
389 };
390 
391 struct rvt_dev_info {
392 	struct ib_device ibdev; /* Keep this first. Nothing above here */
393 
394 	/*
395 	 * Prior to calling for registration the driver will be responsible for
396 	 * allocating space for this structure.
397 	 *
398 	 * The driver will also be responsible for filling in certain members of
399 	 * dparms.props. The driver needs to fill in dparms exactly as it would
400 	 * want values reported to a ULP. This will be returned to the caller
401 	 * in rdmavt's device. The driver should also therefore refrain from
402 	 * modifying this directly after registration with rdmavt.
403 	 */
404 
405 	/* Driver specific properties */
406 	struct rvt_driver_params dparms;
407 
408 	/* post send table */
409 	const struct rvt_operation_params *post_parms;
410 
411 	/* opcode translation table */
412 	const enum ib_wc_opcode *wc_opcode;
413 
414 	/* Driver specific helper functions */
415 	struct rvt_driver_provided driver_f;
416 
417 	struct rvt_mregion __rcu *dma_mr;
418 	struct rvt_lkey_table lkey_table;
419 
420 	/* Internal use */
421 	int n_pds_allocated;
422 	spinlock_t n_pds_lock; /* Protect pd allocated count */
423 
424 	int n_ahs_allocated;
425 	spinlock_t n_ahs_lock; /* Protect ah allocated count */
426 
427 	u32 n_srqs_allocated;
428 	spinlock_t n_srqs_lock; /* Protect srqs allocated count */
429 
430 	int flags;
431 	struct rvt_ibport **ports;
432 
433 	/* QP */
434 	struct rvt_qp_ibdev *qp_dev;
435 	u32 n_qps_allocated;    /* number of QPs allocated for device */
436 	u32 n_rc_qps;		/* number of RC QPs allocated for device */
437 	u32 busy_jiffies;	/* timeout scaling based on RC QP count */
438 	spinlock_t n_qps_lock;	/* protect qps, rc qps and busy jiffy counts */
439 
440 	/* memory maps */
441 	struct list_head pending_mmaps;
442 	spinlock_t mmap_offset_lock; /* protect mmap_offset */
443 	u32 mmap_offset;
444 	spinlock_t pending_lock; /* protect pending mmap list */
445 
446 	/* CQ */
447 	u32 n_cqs_allocated;    /* number of CQs allocated for device */
448 	spinlock_t n_cqs_lock; /* protect count of in use cqs */
449 
450 	/* Multicast */
451 	u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
452 	spinlock_t n_mcast_grps_lock;
453 
454 	/* Memory Working Set Size */
455 	struct rvt_wss *wss;
456 };
457 
458 /**
459  * rvt_set_ibdev_name - Craft an IB device name from client info
460  * @rdi: pointer to the client rvt_dev_info structure
461  * @name: client specific name
462  * @unit: client specific unit number.
463  */
464 static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
465 				      const char *fmt, const char *name,
466 				      const int unit)
467 {
468 	/*
469 	 * FIXME: rvt and its users want to touch the ibdev before
470 	 * registration and have things like the name work. We don't have the
471 	 * infrastructure in the core to support this directly today, hack it
472 	 * to work by setting the name manually here.
473 	 */
474 	dev_set_name(&rdi->ibdev.dev, fmt, name, unit);
475 	strlcpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
476 }
477 
478 /**
479  * rvt_get_ibdev_name - return the IB name
480  * @rdi: rdmavt device
481  *
482  * Return the registered name of the device.
483  */
484 static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
485 {
486 	return dev_name(&rdi->ibdev.dev);
487 }
488 
489 static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
490 {
491 	return container_of(ibpd, struct rvt_pd, ibpd);
492 }
493 
494 static inline struct rvt_ah *ibah_to_rvtah(struct ib_ah *ibah)
495 {
496 	return container_of(ibah, struct rvt_ah, ibah);
497 }
498 
499 static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev)
500 {
501 	return  container_of(ibdev, struct rvt_dev_info, ibdev);
502 }
503 
504 static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
505 {
506 	return container_of(ibsrq, struct rvt_srq, ibsrq);
507 }
508 
509 static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
510 {
511 	return container_of(ibqp, struct rvt_qp, ibqp);
512 }
513 
514 static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
515 {
516 	/*
517 	 * All ports have same number of pkeys.
518 	 */
519 	return rdi->dparms.npkeys;
520 }
521 
522 /*
523  * Return the max atomic suitable for determining
524  * the size of the ack ring buffer in a QP.
525  */
526 static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
527 {
528 	return rdi->dparms.max_rdma_atomic +
529 		rdi->dparms.extra_rdma_atomic + 1;
530 }
531 
532 static inline unsigned int rvt_size_atomic(struct rvt_dev_info *rdi)
533 {
534 	return rdi->dparms.max_rdma_atomic +
535 		rdi->dparms.extra_rdma_atomic;
536 }
537 
538 /*
539  * Return the indexed PKEY from the port PKEY table.
540  */
541 static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
542 			       int port_index,
543 			       unsigned index)
544 {
545 	if (index >= rvt_get_npkeys(rdi))
546 		return 0;
547 	else
548 		return rdi->ports[port_index]->pkey_table[index];
549 }
550 
551 /**
552  * rvt_lookup_qpn - return the QP with the given QPN
553  * @ibp: the ibport
554  * @qpn: the QP number to look up
555  *
556  * The caller must hold the rcu_read_lock(), and keep the lock until
557  * the returned qp is no longer in use.
558  */
559 /* TODO: Remove this and put in rdmavt/qp.h when no longer needed by drivers */
560 static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
561 					    struct rvt_ibport *rvp,
562 					    u32 qpn) __must_hold(RCU)
563 {
564 	struct rvt_qp *qp = NULL;
565 
566 	if (unlikely(qpn <= 1)) {
567 		qp = rcu_dereference(rvp->qp[qpn]);
568 	} else {
569 		u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
570 
571 		for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
572 			qp = rcu_dereference(qp->next))
573 			if (qp->ibqp.qp_num == qpn)
574 				break;
575 	}
576 	return qp;
577 }
578 
579 /**
580  * rvt_mod_retry_timer - mod a retry timer
581  * @qp - the QP
582  * @shift - timeout shift to wait for multiple packets
583  * Modify a potentially already running retry timer
584  */
585 static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
586 {
587 	struct ib_qp *ibqp = &qp->ibqp;
588 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
589 
590 	lockdep_assert_held(&qp->s_lock);
591 	qp->s_flags |= RVT_S_TIMER;
592 	/* 4.096 usec. * (1 << qp->timeout) */
593 	mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
594 		  (qp->timeout_jiffies << shift));
595 }
596 
597 static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
598 {
599 	return rvt_mod_retry_timer_ext(qp, 0);
600 }
601 
602 struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
603 void rvt_dealloc_device(struct rvt_dev_info *rdi);
604 int rvt_register_device(struct rvt_dev_info *rvd, u32 driver_id);
605 void rvt_unregister_device(struct rvt_dev_info *rvd);
606 int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
607 int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
608 		  int port_index, u16 *pkey_table);
609 int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
610 		    int access);
611 int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey);
612 int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
613 		u32 len, u64 vaddr, u32 rkey, int acc);
614 int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
615 		struct rvt_sge *isge, struct rvt_sge *last_sge,
616 		struct ib_sge *sge, int acc);
617 struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
618 				 u16 lid);
619 
620 #endif          /* DEF_RDMA_VT_H */
621