1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Read-Copy Update definitions shared among RCU implementations. 4 * 5 * Copyright IBM Corporation, 2011 6 * 7 * Author: Paul E. McKenney <paulmck@linux.ibm.com> 8 */ 9 10 #ifndef __LINUX_RCU_H 11 #define __LINUX_RCU_H 12 13 #include <trace/events/rcu.h> 14 15 /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */ 16 #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) 17 18 19 /* 20 * Grace-period counter management. 21 */ 22 23 #define RCU_SEQ_CTR_SHIFT 2 24 #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) 25 26 /* 27 * Return the counter portion of a sequence number previously returned 28 * by rcu_seq_snap() or rcu_seq_current(). 29 */ 30 static inline unsigned long rcu_seq_ctr(unsigned long s) 31 { 32 return s >> RCU_SEQ_CTR_SHIFT; 33 } 34 35 /* 36 * Return the state portion of a sequence number previously returned 37 * by rcu_seq_snap() or rcu_seq_current(). 38 */ 39 static inline int rcu_seq_state(unsigned long s) 40 { 41 return s & RCU_SEQ_STATE_MASK; 42 } 43 44 /* 45 * Set the state portion of the pointed-to sequence number. 46 * The caller is responsible for preventing conflicting updates. 47 */ 48 static inline void rcu_seq_set_state(unsigned long *sp, int newstate) 49 { 50 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK); 51 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate); 52 } 53 54 /* Adjust sequence number for start of update-side operation. */ 55 static inline void rcu_seq_start(unsigned long *sp) 56 { 57 WRITE_ONCE(*sp, *sp + 1); 58 smp_mb(); /* Ensure update-side operation after counter increment. */ 59 WARN_ON_ONCE(rcu_seq_state(*sp) != 1); 60 } 61 62 /* Compute the end-of-grace-period value for the specified sequence number. */ 63 static inline unsigned long rcu_seq_endval(unsigned long *sp) 64 { 65 return (*sp | RCU_SEQ_STATE_MASK) + 1; 66 } 67 68 /* Adjust sequence number for end of update-side operation. */ 69 static inline void rcu_seq_end(unsigned long *sp) 70 { 71 smp_mb(); /* Ensure update-side operation before counter increment. */ 72 WARN_ON_ONCE(!rcu_seq_state(*sp)); 73 WRITE_ONCE(*sp, rcu_seq_endval(sp)); 74 } 75 76 /* 77 * rcu_seq_snap - Take a snapshot of the update side's sequence number. 78 * 79 * This function returns the earliest value of the grace-period sequence number 80 * that will indicate that a full grace period has elapsed since the current 81 * time. Once the grace-period sequence number has reached this value, it will 82 * be safe to invoke all callbacks that have been registered prior to the 83 * current time. This value is the current grace-period number plus two to the 84 * power of the number of low-order bits reserved for state, then rounded up to 85 * the next value in which the state bits are all zero. 86 */ 87 static inline unsigned long rcu_seq_snap(unsigned long *sp) 88 { 89 unsigned long s; 90 91 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK; 92 smp_mb(); /* Above access must not bleed into critical section. */ 93 return s; 94 } 95 96 /* Return the current value the update side's sequence number, no ordering. */ 97 static inline unsigned long rcu_seq_current(unsigned long *sp) 98 { 99 return READ_ONCE(*sp); 100 } 101 102 /* 103 * Given a snapshot from rcu_seq_snap(), determine whether or not the 104 * corresponding update-side operation has started. 105 */ 106 static inline bool rcu_seq_started(unsigned long *sp, unsigned long s) 107 { 108 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp)); 109 } 110 111 /* 112 * Given a snapshot from rcu_seq_snap(), determine whether or not a 113 * full update-side operation has occurred. 114 */ 115 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s) 116 { 117 return ULONG_CMP_GE(READ_ONCE(*sp), s); 118 } 119 120 /* 121 * Has a grace period completed since the time the old gp_seq was collected? 122 */ 123 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new) 124 { 125 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK); 126 } 127 128 /* 129 * Has a grace period started since the time the old gp_seq was collected? 130 */ 131 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) 132 { 133 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK, 134 new); 135 } 136 137 /* 138 * Roughly how many full grace periods have elapsed between the collection 139 * of the two specified grace periods? 140 */ 141 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) 142 { 143 unsigned long rnd_diff; 144 145 if (old == new) 146 return 0; 147 /* 148 * Compute the number of grace periods (still shifted up), plus 149 * one if either of new and old is not an exact grace period. 150 */ 151 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) - 152 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) + 153 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK)); 154 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff)) 155 return 1; /* Definitely no grace period has elapsed. */ 156 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2; 157 } 158 159 /* 160 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally 161 * by call_rcu() and rcu callback execution, and are therefore not part 162 * of the RCU API. These are in rcupdate.h because they are used by all 163 * RCU implementations. 164 */ 165 166 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 167 # define STATE_RCU_HEAD_READY 0 168 # define STATE_RCU_HEAD_QUEUED 1 169 170 extern const struct debug_obj_descr rcuhead_debug_descr; 171 172 static inline int debug_rcu_head_queue(struct rcu_head *head) 173 { 174 int r1; 175 176 r1 = debug_object_activate(head, &rcuhead_debug_descr); 177 debug_object_active_state(head, &rcuhead_debug_descr, 178 STATE_RCU_HEAD_READY, 179 STATE_RCU_HEAD_QUEUED); 180 return r1; 181 } 182 183 static inline void debug_rcu_head_unqueue(struct rcu_head *head) 184 { 185 debug_object_active_state(head, &rcuhead_debug_descr, 186 STATE_RCU_HEAD_QUEUED, 187 STATE_RCU_HEAD_READY); 188 debug_object_deactivate(head, &rcuhead_debug_descr); 189 } 190 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 191 static inline int debug_rcu_head_queue(struct rcu_head *head) 192 { 193 return 0; 194 } 195 196 static inline void debug_rcu_head_unqueue(struct rcu_head *head) 197 { 198 } 199 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 200 201 extern int rcu_cpu_stall_suppress_at_boot; 202 203 static inline bool rcu_stall_is_suppressed_at_boot(void) 204 { 205 return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended(); 206 } 207 208 #ifdef CONFIG_RCU_STALL_COMMON 209 210 extern int rcu_cpu_stall_ftrace_dump; 211 extern int rcu_cpu_stall_suppress; 212 extern int rcu_cpu_stall_timeout; 213 extern int rcu_exp_cpu_stall_timeout; 214 int rcu_jiffies_till_stall_check(void); 215 int rcu_exp_jiffies_till_stall_check(void); 216 217 static inline bool rcu_stall_is_suppressed(void) 218 { 219 return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress; 220 } 221 222 #define rcu_ftrace_dump_stall_suppress() \ 223 do { \ 224 if (!rcu_cpu_stall_suppress) \ 225 rcu_cpu_stall_suppress = 3; \ 226 } while (0) 227 228 #define rcu_ftrace_dump_stall_unsuppress() \ 229 do { \ 230 if (rcu_cpu_stall_suppress == 3) \ 231 rcu_cpu_stall_suppress = 0; \ 232 } while (0) 233 234 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */ 235 236 static inline bool rcu_stall_is_suppressed(void) 237 { 238 return rcu_stall_is_suppressed_at_boot(); 239 } 240 #define rcu_ftrace_dump_stall_suppress() 241 #define rcu_ftrace_dump_stall_unsuppress() 242 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 243 244 /* 245 * Strings used in tracepoints need to be exported via the 246 * tracing system such that tools like perf and trace-cmd can 247 * translate the string address pointers to actual text. 248 */ 249 #define TPS(x) tracepoint_string(x) 250 251 /* 252 * Dump the ftrace buffer, but only one time per callsite per boot. 253 */ 254 #define rcu_ftrace_dump(oops_dump_mode) \ 255 do { \ 256 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ 257 \ 258 if (!atomic_read(&___rfd_beenhere) && \ 259 !atomic_xchg(&___rfd_beenhere, 1)) { \ 260 tracing_off(); \ 261 rcu_ftrace_dump_stall_suppress(); \ 262 ftrace_dump(oops_dump_mode); \ 263 rcu_ftrace_dump_stall_unsuppress(); \ 264 } \ 265 } while (0) 266 267 void rcu_early_boot_tests(void); 268 void rcu_test_sync_prims(void); 269 270 /* 271 * This function really isn't for public consumption, but RCU is special in 272 * that context switches can allow the state machine to make progress. 273 */ 274 extern void resched_cpu(int cpu); 275 276 #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) 277 278 #include <linux/rcu_node_tree.h> 279 280 extern int rcu_num_lvls; 281 extern int num_rcu_lvl[]; 282 extern int rcu_num_nodes; 283 static bool rcu_fanout_exact; 284 static int rcu_fanout_leaf; 285 286 /* 287 * Compute the per-level fanout, either using the exact fanout specified 288 * or balancing the tree, depending on the rcu_fanout_exact boot parameter. 289 */ 290 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) 291 { 292 int i; 293 294 for (i = 0; i < RCU_NUM_LVLS; i++) 295 levelspread[i] = INT_MIN; 296 if (rcu_fanout_exact) { 297 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; 298 for (i = rcu_num_lvls - 2; i >= 0; i--) 299 levelspread[i] = RCU_FANOUT; 300 } else { 301 int ccur; 302 int cprv; 303 304 cprv = nr_cpu_ids; 305 for (i = rcu_num_lvls - 1; i >= 0; i--) { 306 ccur = levelcnt[i]; 307 levelspread[i] = (cprv + ccur - 1) / ccur; 308 cprv = ccur; 309 } 310 } 311 } 312 313 extern void rcu_init_geometry(void); 314 315 /* Returns a pointer to the first leaf rcu_node structure. */ 316 #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) 317 318 /* Is this rcu_node a leaf? */ 319 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) 320 321 /* Is this rcu_node the last leaf? */ 322 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) 323 324 /* 325 * Do a full breadth-first scan of the {s,}rcu_node structures for the 326 * specified state structure (for SRCU) or the only rcu_state structure 327 * (for RCU). 328 */ 329 #define srcu_for_each_node_breadth_first(sp, rnp) \ 330 for ((rnp) = &(sp)->node[0]; \ 331 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++) 332 #define rcu_for_each_node_breadth_first(rnp) \ 333 srcu_for_each_node_breadth_first(&rcu_state, rnp) 334 335 /* 336 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure. 337 * Note that if there is a singleton rcu_node tree with but one rcu_node 338 * structure, this loop -will- visit the rcu_node structure. It is still 339 * a leaf node, even if it is also the root node. 340 */ 341 #define rcu_for_each_leaf_node(rnp) \ 342 for ((rnp) = rcu_first_leaf_node(); \ 343 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++) 344 345 /* 346 * Iterate over all possible CPUs in a leaf RCU node. 347 */ 348 #define for_each_leaf_node_possible_cpu(rnp, cpu) \ 349 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \ 350 (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \ 351 (cpu) <= rnp->grphi; \ 352 (cpu) = cpumask_next((cpu), cpu_possible_mask)) 353 354 /* 355 * Iterate over all CPUs in a leaf RCU node's specified mask. 356 */ 357 #define rcu_find_next_bit(rnp, cpu, mask) \ 358 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu))) 359 #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \ 360 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \ 361 (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \ 362 (cpu) <= rnp->grphi; \ 363 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask))) 364 365 /* 366 * Wrappers for the rcu_node::lock acquire and release. 367 * 368 * Because the rcu_nodes form a tree, the tree traversal locking will observe 369 * different lock values, this in turn means that an UNLOCK of one level 370 * followed by a LOCK of another level does not imply a full memory barrier; 371 * and most importantly transitivity is lost. 372 * 373 * In order to restore full ordering between tree levels, augment the regular 374 * lock acquire functions with smp_mb__after_unlock_lock(). 375 * 376 * As ->lock of struct rcu_node is a __private field, therefore one should use 377 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. 378 */ 379 #define raw_spin_lock_rcu_node(p) \ 380 do { \ 381 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \ 382 smp_mb__after_unlock_lock(); \ 383 } while (0) 384 385 #define raw_spin_unlock_rcu_node(p) \ 386 do { \ 387 lockdep_assert_irqs_disabled(); \ 388 raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \ 389 } while (0) 390 391 #define raw_spin_lock_irq_rcu_node(p) \ 392 do { \ 393 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ 394 smp_mb__after_unlock_lock(); \ 395 } while (0) 396 397 #define raw_spin_unlock_irq_rcu_node(p) \ 398 do { \ 399 lockdep_assert_irqs_disabled(); \ 400 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \ 401 } while (0) 402 403 #define raw_spin_lock_irqsave_rcu_node(p, flags) \ 404 do { \ 405 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ 406 smp_mb__after_unlock_lock(); \ 407 } while (0) 408 409 #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \ 410 do { \ 411 lockdep_assert_irqs_disabled(); \ 412 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \ 413 } while (0) 414 415 #define raw_spin_trylock_rcu_node(p) \ 416 ({ \ 417 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \ 418 \ 419 if (___locked) \ 420 smp_mb__after_unlock_lock(); \ 421 ___locked; \ 422 }) 423 424 #define raw_lockdep_assert_held_rcu_node(p) \ 425 lockdep_assert_held(&ACCESS_PRIVATE(p, lock)) 426 427 #endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */ 428 429 #ifdef CONFIG_TINY_RCU 430 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ 431 static inline bool rcu_gp_is_normal(void) { return true; } 432 static inline bool rcu_gp_is_expedited(void) { return false; } 433 static inline void rcu_expedite_gp(void) { } 434 static inline void rcu_unexpedite_gp(void) { } 435 static inline void rcu_request_urgent_qs_task(struct task_struct *t) { } 436 #else /* #ifdef CONFIG_TINY_RCU */ 437 bool rcu_gp_is_normal(void); /* Internal RCU use. */ 438 bool rcu_gp_is_expedited(void); /* Internal RCU use. */ 439 void rcu_expedite_gp(void); 440 void rcu_unexpedite_gp(void); 441 void rcupdate_announce_bootup_oddness(void); 442 #ifdef CONFIG_TASKS_RCU_GENERIC 443 void show_rcu_tasks_gp_kthreads(void); 444 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ 445 static inline void show_rcu_tasks_gp_kthreads(void) {} 446 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ 447 void rcu_request_urgent_qs_task(struct task_struct *t); 448 #endif /* #else #ifdef CONFIG_TINY_RCU */ 449 450 #define RCU_SCHEDULER_INACTIVE 0 451 #define RCU_SCHEDULER_INIT 1 452 #define RCU_SCHEDULER_RUNNING 2 453 454 enum rcutorture_type { 455 RCU_FLAVOR, 456 RCU_TASKS_FLAVOR, 457 RCU_TASKS_RUDE_FLAVOR, 458 RCU_TASKS_TRACING_FLAVOR, 459 RCU_TRIVIAL_FLAVOR, 460 SRCU_FLAVOR, 461 INVALID_RCU_FLAVOR 462 }; 463 464 #if defined(CONFIG_TREE_RCU) 465 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 466 unsigned long *gp_seq); 467 void do_trace_rcu_torture_read(const char *rcutorturename, 468 struct rcu_head *rhp, 469 unsigned long secs, 470 unsigned long c_old, 471 unsigned long c); 472 void rcu_gp_set_torture_wait(int duration); 473 #else 474 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, 475 int *flags, unsigned long *gp_seq) 476 { 477 *flags = 0; 478 *gp_seq = 0; 479 } 480 #ifdef CONFIG_RCU_TRACE 481 void do_trace_rcu_torture_read(const char *rcutorturename, 482 struct rcu_head *rhp, 483 unsigned long secs, 484 unsigned long c_old, 485 unsigned long c); 486 #else 487 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 488 do { } while (0) 489 #endif 490 static inline void rcu_gp_set_torture_wait(int duration) { } 491 #endif 492 493 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) 494 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask); 495 #endif 496 497 #ifdef CONFIG_TINY_SRCU 498 499 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, 500 struct srcu_struct *sp, int *flags, 501 unsigned long *gp_seq) 502 { 503 if (test_type != SRCU_FLAVOR) 504 return; 505 *flags = 0; 506 *gp_seq = sp->srcu_idx; 507 } 508 509 #elif defined(CONFIG_TREE_SRCU) 510 511 void srcutorture_get_gp_data(enum rcutorture_type test_type, 512 struct srcu_struct *sp, int *flags, 513 unsigned long *gp_seq); 514 515 #endif 516 517 #ifdef CONFIG_TINY_RCU 518 static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; } 519 static inline unsigned long rcu_get_gp_seq(void) { return 0; } 520 static inline unsigned long rcu_exp_batches_completed(void) { return 0; } 521 static inline unsigned long 522 srcu_batches_completed(struct srcu_struct *sp) { return 0; } 523 static inline void rcu_force_quiescent_state(void) { } 524 static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; } 525 static inline void show_rcu_gp_kthreads(void) { } 526 static inline int rcu_get_gp_kthreads_prio(void) { return 0; } 527 static inline void rcu_fwd_progress_check(unsigned long j) { } 528 static inline void rcu_gp_slow_register(atomic_t *rgssp) { } 529 static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { } 530 #else /* #ifdef CONFIG_TINY_RCU */ 531 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp); 532 unsigned long rcu_get_gp_seq(void); 533 unsigned long rcu_exp_batches_completed(void); 534 unsigned long srcu_batches_completed(struct srcu_struct *sp); 535 bool rcu_check_boost_fail(unsigned long gp_state, int *cpup); 536 void show_rcu_gp_kthreads(void); 537 int rcu_get_gp_kthreads_prio(void); 538 void rcu_fwd_progress_check(unsigned long j); 539 void rcu_force_quiescent_state(void); 540 extern struct workqueue_struct *rcu_gp_wq; 541 #ifdef CONFIG_RCU_EXP_KTHREAD 542 extern struct kthread_worker *rcu_exp_gp_kworker; 543 extern struct kthread_worker *rcu_exp_par_gp_kworker; 544 #else /* !CONFIG_RCU_EXP_KTHREAD */ 545 extern struct workqueue_struct *rcu_par_gp_wq; 546 #endif /* CONFIG_RCU_EXP_KTHREAD */ 547 void rcu_gp_slow_register(atomic_t *rgssp); 548 void rcu_gp_slow_unregister(atomic_t *rgssp); 549 #endif /* #else #ifdef CONFIG_TINY_RCU */ 550 551 #ifdef CONFIG_RCU_NOCB_CPU 552 void rcu_bind_current_to_nocb(void); 553 #else 554 static inline void rcu_bind_current_to_nocb(void) { } 555 #endif 556 557 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU) 558 void show_rcu_tasks_classic_gp_kthread(void); 559 #else 560 static inline void show_rcu_tasks_classic_gp_kthread(void) {} 561 #endif 562 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU) 563 void show_rcu_tasks_rude_gp_kthread(void); 564 #else 565 static inline void show_rcu_tasks_rude_gp_kthread(void) {} 566 #endif 567 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU) 568 void show_rcu_tasks_trace_gp_kthread(void); 569 #else 570 static inline void show_rcu_tasks_trace_gp_kthread(void) {} 571 #endif 572 573 #endif /* __LINUX_RCU_H */ 574