xref: /openbmc/linux/kernel/stop_machine.c (revision 4e95bc26)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * kernel/stop_machine.c
4  *
5  * Copyright (C) 2008, 2005	IBM Corporation.
6  * Copyright (C) 2008, 2005	Rusty Russell rusty@rustcorp.com.au
7  * Copyright (C) 2010		SUSE Linux Products GmbH
8  * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
9  */
10 #include <linux/completion.h>
11 #include <linux/cpu.h>
12 #include <linux/init.h>
13 #include <linux/kthread.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/sched.h>
17 #include <linux/stop_machine.h>
18 #include <linux/interrupt.h>
19 #include <linux/kallsyms.h>
20 #include <linux/smpboot.h>
21 #include <linux/atomic.h>
22 #include <linux/nmi.h>
23 #include <linux/sched/wake_q.h>
24 
25 /*
26  * Structure to determine completion condition and record errors.  May
27  * be shared by works on different cpus.
28  */
29 struct cpu_stop_done {
30 	atomic_t		nr_todo;	/* nr left to execute */
31 	int			ret;		/* collected return value */
32 	struct completion	completion;	/* fired if nr_todo reaches 0 */
33 };
34 
35 /* the actual stopper, one per every possible cpu, enabled on online cpus */
36 struct cpu_stopper {
37 	struct task_struct	*thread;
38 
39 	raw_spinlock_t		lock;
40 	bool			enabled;	/* is this stopper enabled? */
41 	struct list_head	works;		/* list of pending works */
42 
43 	struct cpu_stop_work	stop_work;	/* for stop_cpus */
44 };
45 
46 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
47 static bool stop_machine_initialized = false;
48 
49 /* static data for stop_cpus */
50 static DEFINE_MUTEX(stop_cpus_mutex);
51 static bool stop_cpus_in_progress;
52 
53 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
54 {
55 	memset(done, 0, sizeof(*done));
56 	atomic_set(&done->nr_todo, nr_todo);
57 	init_completion(&done->completion);
58 }
59 
60 /* signal completion unless @done is NULL */
61 static void cpu_stop_signal_done(struct cpu_stop_done *done)
62 {
63 	if (atomic_dec_and_test(&done->nr_todo))
64 		complete(&done->completion);
65 }
66 
67 static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
68 					struct cpu_stop_work *work,
69 					struct wake_q_head *wakeq)
70 {
71 	list_add_tail(&work->list, &stopper->works);
72 	wake_q_add(wakeq, stopper->thread);
73 }
74 
75 /* queue @work to @stopper.  if offline, @work is completed immediately */
76 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
77 {
78 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
79 	DEFINE_WAKE_Q(wakeq);
80 	unsigned long flags;
81 	bool enabled;
82 
83 	preempt_disable();
84 	raw_spin_lock_irqsave(&stopper->lock, flags);
85 	enabled = stopper->enabled;
86 	if (enabled)
87 		__cpu_stop_queue_work(stopper, work, &wakeq);
88 	else if (work->done)
89 		cpu_stop_signal_done(work->done);
90 	raw_spin_unlock_irqrestore(&stopper->lock, flags);
91 
92 	wake_up_q(&wakeq);
93 	preempt_enable();
94 
95 	return enabled;
96 }
97 
98 /**
99  * stop_one_cpu - stop a cpu
100  * @cpu: cpu to stop
101  * @fn: function to execute
102  * @arg: argument to @fn
103  *
104  * Execute @fn(@arg) on @cpu.  @fn is run in a process context with
105  * the highest priority preempting any task on the cpu and
106  * monopolizing it.  This function returns after the execution is
107  * complete.
108  *
109  * This function doesn't guarantee @cpu stays online till @fn
110  * completes.  If @cpu goes down in the middle, execution may happen
111  * partially or fully on different cpus.  @fn should either be ready
112  * for that or the caller should ensure that @cpu stays online until
113  * this function completes.
114  *
115  * CONTEXT:
116  * Might sleep.
117  *
118  * RETURNS:
119  * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
120  * otherwise, the return value of @fn.
121  */
122 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
123 {
124 	struct cpu_stop_done done;
125 	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
126 
127 	cpu_stop_init_done(&done, 1);
128 	if (!cpu_stop_queue_work(cpu, &work))
129 		return -ENOENT;
130 	/*
131 	 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
132 	 * cycle by doing a preemption:
133 	 */
134 	cond_resched();
135 	wait_for_completion(&done.completion);
136 	return done.ret;
137 }
138 
139 /* This controls the threads on each CPU. */
140 enum multi_stop_state {
141 	/* Dummy starting state for thread. */
142 	MULTI_STOP_NONE,
143 	/* Awaiting everyone to be scheduled. */
144 	MULTI_STOP_PREPARE,
145 	/* Disable interrupts. */
146 	MULTI_STOP_DISABLE_IRQ,
147 	/* Run the function */
148 	MULTI_STOP_RUN,
149 	/* Exit */
150 	MULTI_STOP_EXIT,
151 };
152 
153 struct multi_stop_data {
154 	cpu_stop_fn_t		fn;
155 	void			*data;
156 	/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
157 	unsigned int		num_threads;
158 	const struct cpumask	*active_cpus;
159 
160 	enum multi_stop_state	state;
161 	atomic_t		thread_ack;
162 };
163 
164 static void set_state(struct multi_stop_data *msdata,
165 		      enum multi_stop_state newstate)
166 {
167 	/* Reset ack counter. */
168 	atomic_set(&msdata->thread_ack, msdata->num_threads);
169 	smp_wmb();
170 	msdata->state = newstate;
171 }
172 
173 /* Last one to ack a state moves to the next state. */
174 static void ack_state(struct multi_stop_data *msdata)
175 {
176 	if (atomic_dec_and_test(&msdata->thread_ack))
177 		set_state(msdata, msdata->state + 1);
178 }
179 
180 /* This is the cpu_stop function which stops the CPU. */
181 static int multi_cpu_stop(void *data)
182 {
183 	struct multi_stop_data *msdata = data;
184 	enum multi_stop_state curstate = MULTI_STOP_NONE;
185 	int cpu = smp_processor_id(), err = 0;
186 	unsigned long flags;
187 	bool is_active;
188 
189 	/*
190 	 * When called from stop_machine_from_inactive_cpu(), irq might
191 	 * already be disabled.  Save the state and restore it on exit.
192 	 */
193 	local_save_flags(flags);
194 
195 	if (!msdata->active_cpus)
196 		is_active = cpu == cpumask_first(cpu_online_mask);
197 	else
198 		is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
199 
200 	/* Simple state machine */
201 	do {
202 		/* Chill out and ensure we re-read multi_stop_state. */
203 		cpu_relax_yield();
204 		if (msdata->state != curstate) {
205 			curstate = msdata->state;
206 			switch (curstate) {
207 			case MULTI_STOP_DISABLE_IRQ:
208 				local_irq_disable();
209 				hard_irq_disable();
210 				break;
211 			case MULTI_STOP_RUN:
212 				if (is_active)
213 					err = msdata->fn(msdata->data);
214 				break;
215 			default:
216 				break;
217 			}
218 			ack_state(msdata);
219 		} else if (curstate > MULTI_STOP_PREPARE) {
220 			/*
221 			 * At this stage all other CPUs we depend on must spin
222 			 * in the same loop. Any reason for hard-lockup should
223 			 * be detected and reported on their side.
224 			 */
225 			touch_nmi_watchdog();
226 		}
227 	} while (curstate != MULTI_STOP_EXIT);
228 
229 	local_irq_restore(flags);
230 	return err;
231 }
232 
233 static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
234 				    int cpu2, struct cpu_stop_work *work2)
235 {
236 	struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
237 	struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
238 	DEFINE_WAKE_Q(wakeq);
239 	int err;
240 
241 retry:
242 	/*
243 	 * The waking up of stopper threads has to happen in the same
244 	 * scheduling context as the queueing.  Otherwise, there is a
245 	 * possibility of one of the above stoppers being woken up by another
246 	 * CPU, and preempting us. This will cause us to not wake up the other
247 	 * stopper forever.
248 	 */
249 	preempt_disable();
250 	raw_spin_lock_irq(&stopper1->lock);
251 	raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
252 
253 	if (!stopper1->enabled || !stopper2->enabled) {
254 		err = -ENOENT;
255 		goto unlock;
256 	}
257 
258 	/*
259 	 * Ensure that if we race with __stop_cpus() the stoppers won't get
260 	 * queued up in reverse order leading to system deadlock.
261 	 *
262 	 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
263 	 * queued a work on cpu1 but not on cpu2, we hold both locks.
264 	 *
265 	 * It can be falsely true but it is safe to spin until it is cleared,
266 	 * queue_stop_cpus_work() does everything under preempt_disable().
267 	 */
268 	if (unlikely(stop_cpus_in_progress)) {
269 		err = -EDEADLK;
270 		goto unlock;
271 	}
272 
273 	err = 0;
274 	__cpu_stop_queue_work(stopper1, work1, &wakeq);
275 	__cpu_stop_queue_work(stopper2, work2, &wakeq);
276 
277 unlock:
278 	raw_spin_unlock(&stopper2->lock);
279 	raw_spin_unlock_irq(&stopper1->lock);
280 
281 	if (unlikely(err == -EDEADLK)) {
282 		preempt_enable();
283 
284 		while (stop_cpus_in_progress)
285 			cpu_relax();
286 
287 		goto retry;
288 	}
289 
290 	wake_up_q(&wakeq);
291 	preempt_enable();
292 
293 	return err;
294 }
295 /**
296  * stop_two_cpus - stops two cpus
297  * @cpu1: the cpu to stop
298  * @cpu2: the other cpu to stop
299  * @fn: function to execute
300  * @arg: argument to @fn
301  *
302  * Stops both the current and specified CPU and runs @fn on one of them.
303  *
304  * returns when both are completed.
305  */
306 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
307 {
308 	struct cpu_stop_done done;
309 	struct cpu_stop_work work1, work2;
310 	struct multi_stop_data msdata;
311 
312 	msdata = (struct multi_stop_data){
313 		.fn = fn,
314 		.data = arg,
315 		.num_threads = 2,
316 		.active_cpus = cpumask_of(cpu1),
317 	};
318 
319 	work1 = work2 = (struct cpu_stop_work){
320 		.fn = multi_cpu_stop,
321 		.arg = &msdata,
322 		.done = &done
323 	};
324 
325 	cpu_stop_init_done(&done, 2);
326 	set_state(&msdata, MULTI_STOP_PREPARE);
327 
328 	if (cpu1 > cpu2)
329 		swap(cpu1, cpu2);
330 	if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
331 		return -ENOENT;
332 
333 	wait_for_completion(&done.completion);
334 	return done.ret;
335 }
336 
337 /**
338  * stop_one_cpu_nowait - stop a cpu but don't wait for completion
339  * @cpu: cpu to stop
340  * @fn: function to execute
341  * @arg: argument to @fn
342  * @work_buf: pointer to cpu_stop_work structure
343  *
344  * Similar to stop_one_cpu() but doesn't wait for completion.  The
345  * caller is responsible for ensuring @work_buf is currently unused
346  * and will remain untouched until stopper starts executing @fn.
347  *
348  * CONTEXT:
349  * Don't care.
350  *
351  * RETURNS:
352  * true if cpu_stop_work was queued successfully and @fn will be called,
353  * false otherwise.
354  */
355 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
356 			struct cpu_stop_work *work_buf)
357 {
358 	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
359 	return cpu_stop_queue_work(cpu, work_buf);
360 }
361 
362 static bool queue_stop_cpus_work(const struct cpumask *cpumask,
363 				 cpu_stop_fn_t fn, void *arg,
364 				 struct cpu_stop_done *done)
365 {
366 	struct cpu_stop_work *work;
367 	unsigned int cpu;
368 	bool queued = false;
369 
370 	/*
371 	 * Disable preemption while queueing to avoid getting
372 	 * preempted by a stopper which might wait for other stoppers
373 	 * to enter @fn which can lead to deadlock.
374 	 */
375 	preempt_disable();
376 	stop_cpus_in_progress = true;
377 	for_each_cpu(cpu, cpumask) {
378 		work = &per_cpu(cpu_stopper.stop_work, cpu);
379 		work->fn = fn;
380 		work->arg = arg;
381 		work->done = done;
382 		if (cpu_stop_queue_work(cpu, work))
383 			queued = true;
384 	}
385 	stop_cpus_in_progress = false;
386 	preempt_enable();
387 
388 	return queued;
389 }
390 
391 static int __stop_cpus(const struct cpumask *cpumask,
392 		       cpu_stop_fn_t fn, void *arg)
393 {
394 	struct cpu_stop_done done;
395 
396 	cpu_stop_init_done(&done, cpumask_weight(cpumask));
397 	if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
398 		return -ENOENT;
399 	wait_for_completion(&done.completion);
400 	return done.ret;
401 }
402 
403 /**
404  * stop_cpus - stop multiple cpus
405  * @cpumask: cpus to stop
406  * @fn: function to execute
407  * @arg: argument to @fn
408  *
409  * Execute @fn(@arg) on online cpus in @cpumask.  On each target cpu,
410  * @fn is run in a process context with the highest priority
411  * preempting any task on the cpu and monopolizing it.  This function
412  * returns after all executions are complete.
413  *
414  * This function doesn't guarantee the cpus in @cpumask stay online
415  * till @fn completes.  If some cpus go down in the middle, execution
416  * on the cpu may happen partially or fully on different cpus.  @fn
417  * should either be ready for that or the caller should ensure that
418  * the cpus stay online until this function completes.
419  *
420  * All stop_cpus() calls are serialized making it safe for @fn to wait
421  * for all cpus to start executing it.
422  *
423  * CONTEXT:
424  * Might sleep.
425  *
426  * RETURNS:
427  * -ENOENT if @fn(@arg) was not executed at all because all cpus in
428  * @cpumask were offline; otherwise, 0 if all executions of @fn
429  * returned 0, any non zero return value if any returned non zero.
430  */
431 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
432 {
433 	int ret;
434 
435 	/* static works are used, process one request at a time */
436 	mutex_lock(&stop_cpus_mutex);
437 	ret = __stop_cpus(cpumask, fn, arg);
438 	mutex_unlock(&stop_cpus_mutex);
439 	return ret;
440 }
441 
442 /**
443  * try_stop_cpus - try to stop multiple cpus
444  * @cpumask: cpus to stop
445  * @fn: function to execute
446  * @arg: argument to @fn
447  *
448  * Identical to stop_cpus() except that it fails with -EAGAIN if
449  * someone else is already using the facility.
450  *
451  * CONTEXT:
452  * Might sleep.
453  *
454  * RETURNS:
455  * -EAGAIN if someone else is already stopping cpus, -ENOENT if
456  * @fn(@arg) was not executed at all because all cpus in @cpumask were
457  * offline; otherwise, 0 if all executions of @fn returned 0, any non
458  * zero return value if any returned non zero.
459  */
460 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
461 {
462 	int ret;
463 
464 	/* static works are used, process one request at a time */
465 	if (!mutex_trylock(&stop_cpus_mutex))
466 		return -EAGAIN;
467 	ret = __stop_cpus(cpumask, fn, arg);
468 	mutex_unlock(&stop_cpus_mutex);
469 	return ret;
470 }
471 
472 static int cpu_stop_should_run(unsigned int cpu)
473 {
474 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
475 	unsigned long flags;
476 	int run;
477 
478 	raw_spin_lock_irqsave(&stopper->lock, flags);
479 	run = !list_empty(&stopper->works);
480 	raw_spin_unlock_irqrestore(&stopper->lock, flags);
481 	return run;
482 }
483 
484 static void cpu_stopper_thread(unsigned int cpu)
485 {
486 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
487 	struct cpu_stop_work *work;
488 
489 repeat:
490 	work = NULL;
491 	raw_spin_lock_irq(&stopper->lock);
492 	if (!list_empty(&stopper->works)) {
493 		work = list_first_entry(&stopper->works,
494 					struct cpu_stop_work, list);
495 		list_del_init(&work->list);
496 	}
497 	raw_spin_unlock_irq(&stopper->lock);
498 
499 	if (work) {
500 		cpu_stop_fn_t fn = work->fn;
501 		void *arg = work->arg;
502 		struct cpu_stop_done *done = work->done;
503 		int ret;
504 
505 		/* cpu stop callbacks must not sleep, make in_atomic() == T */
506 		preempt_count_inc();
507 		ret = fn(arg);
508 		if (done) {
509 			if (ret)
510 				done->ret = ret;
511 			cpu_stop_signal_done(done);
512 		}
513 		preempt_count_dec();
514 		WARN_ONCE(preempt_count(),
515 			  "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
516 		goto repeat;
517 	}
518 }
519 
520 void stop_machine_park(int cpu)
521 {
522 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
523 	/*
524 	 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
525 	 * the pending works before it parks, until then it is fine to queue
526 	 * the new works.
527 	 */
528 	stopper->enabled = false;
529 	kthread_park(stopper->thread);
530 }
531 
532 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
533 
534 static void cpu_stop_create(unsigned int cpu)
535 {
536 	sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
537 }
538 
539 static void cpu_stop_park(unsigned int cpu)
540 {
541 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
542 
543 	WARN_ON(!list_empty(&stopper->works));
544 }
545 
546 void stop_machine_unpark(int cpu)
547 {
548 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
549 
550 	stopper->enabled = true;
551 	kthread_unpark(stopper->thread);
552 }
553 
554 static struct smp_hotplug_thread cpu_stop_threads = {
555 	.store			= &cpu_stopper.thread,
556 	.thread_should_run	= cpu_stop_should_run,
557 	.thread_fn		= cpu_stopper_thread,
558 	.thread_comm		= "migration/%u",
559 	.create			= cpu_stop_create,
560 	.park			= cpu_stop_park,
561 	.selfparking		= true,
562 };
563 
564 static int __init cpu_stop_init(void)
565 {
566 	unsigned int cpu;
567 
568 	for_each_possible_cpu(cpu) {
569 		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
570 
571 		raw_spin_lock_init(&stopper->lock);
572 		INIT_LIST_HEAD(&stopper->works);
573 	}
574 
575 	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
576 	stop_machine_unpark(raw_smp_processor_id());
577 	stop_machine_initialized = true;
578 	return 0;
579 }
580 early_initcall(cpu_stop_init);
581 
582 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
583 			    const struct cpumask *cpus)
584 {
585 	struct multi_stop_data msdata = {
586 		.fn = fn,
587 		.data = data,
588 		.num_threads = num_online_cpus(),
589 		.active_cpus = cpus,
590 	};
591 
592 	lockdep_assert_cpus_held();
593 
594 	if (!stop_machine_initialized) {
595 		/*
596 		 * Handle the case where stop_machine() is called
597 		 * early in boot before stop_machine() has been
598 		 * initialized.
599 		 */
600 		unsigned long flags;
601 		int ret;
602 
603 		WARN_ON_ONCE(msdata.num_threads != 1);
604 
605 		local_irq_save(flags);
606 		hard_irq_disable();
607 		ret = (*fn)(data);
608 		local_irq_restore(flags);
609 
610 		return ret;
611 	}
612 
613 	/* Set the initial state and stop all online cpus. */
614 	set_state(&msdata, MULTI_STOP_PREPARE);
615 	return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
616 }
617 
618 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
619 {
620 	int ret;
621 
622 	/* No CPUs can come up or down during this. */
623 	cpus_read_lock();
624 	ret = stop_machine_cpuslocked(fn, data, cpus);
625 	cpus_read_unlock();
626 	return ret;
627 }
628 EXPORT_SYMBOL_GPL(stop_machine);
629 
630 /**
631  * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
632  * @fn: the function to run
633  * @data: the data ptr for the @fn()
634  * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
635  *
636  * This is identical to stop_machine() but can be called from a CPU which
637  * is not active.  The local CPU is in the process of hotplug (so no other
638  * CPU hotplug can start) and not marked active and doesn't have enough
639  * context to sleep.
640  *
641  * This function provides stop_machine() functionality for such state by
642  * using busy-wait for synchronization and executing @fn directly for local
643  * CPU.
644  *
645  * CONTEXT:
646  * Local CPU is inactive.  Temporarily stops all active CPUs.
647  *
648  * RETURNS:
649  * 0 if all executions of @fn returned 0, any non zero return value if any
650  * returned non zero.
651  */
652 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
653 				  const struct cpumask *cpus)
654 {
655 	struct multi_stop_data msdata = { .fn = fn, .data = data,
656 					    .active_cpus = cpus };
657 	struct cpu_stop_done done;
658 	int ret;
659 
660 	/* Local CPU must be inactive and CPU hotplug in progress. */
661 	BUG_ON(cpu_active(raw_smp_processor_id()));
662 	msdata.num_threads = num_active_cpus() + 1;	/* +1 for local */
663 
664 	/* No proper task established and can't sleep - busy wait for lock. */
665 	while (!mutex_trylock(&stop_cpus_mutex))
666 		cpu_relax();
667 
668 	/* Schedule work on other CPUs and execute directly for local CPU */
669 	set_state(&msdata, MULTI_STOP_PREPARE);
670 	cpu_stop_init_done(&done, num_active_cpus());
671 	queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
672 			     &done);
673 	ret = multi_cpu_stop(&msdata);
674 
675 	/* Busy wait for completion. */
676 	while (!completion_done(&done.completion))
677 		cpu_relax();
678 
679 	mutex_unlock(&stop_cpus_mutex);
680 	return ret ?: done.ret;
681 }
682