1932309fbSMichal Wajdeczko // SPDX-License-Identifier: MIT
2932309fbSMichal Wajdeczko /*
3932309fbSMichal Wajdeczko * Copyright © 2018 Intel Corporation
4932309fbSMichal Wajdeczko */
5932309fbSMichal Wajdeczko
63da3c5c1SChris Wilson #include <linux/crc32.h>
73da3c5c1SChris Wilson
83da3c5c1SChris Wilson #include "gem/i915_gem_stolen.h"
93da3c5c1SChris Wilson
103da3c5c1SChris Wilson #include "i915_memcpy.h"
11932309fbSMichal Wajdeczko #include "i915_selftest.h"
1245233ab2SChris Wilson #include "intel_gpu_commands.h"
13932309fbSMichal Wajdeczko #include "selftests/igt_reset.h"
14f6470c9bSMichal Wajdeczko #include "selftests/igt_atomic.h"
153da3c5c1SChris Wilson #include "selftests/igt_spinner.h"
163da3c5c1SChris Wilson
173da3c5c1SChris Wilson static int
__igt_reset_stolen(struct intel_gt * gt,intel_engine_mask_t mask,const char * msg)183da3c5c1SChris Wilson __igt_reset_stolen(struct intel_gt *gt,
193da3c5c1SChris Wilson intel_engine_mask_t mask,
203da3c5c1SChris Wilson const char *msg)
213da3c5c1SChris Wilson {
22848915c3SMichał Winiarski struct i915_ggtt *ggtt = gt->ggtt;
231eca0778SJani Nikula const struct resource *dsm = >->i915->dsm.stolen;
243da3c5c1SChris Wilson resource_size_t num_pages, page;
253da3c5c1SChris Wilson struct intel_engine_cs *engine;
263da3c5c1SChris Wilson intel_wakeref_t wakeref;
273da3c5c1SChris Wilson enum intel_engine_id id;
283da3c5c1SChris Wilson struct igt_spinner spin;
293da3c5c1SChris Wilson long max, count;
303da3c5c1SChris Wilson void *tmp;
313da3c5c1SChris Wilson u32 *crc;
323da3c5c1SChris Wilson int err;
333da3c5c1SChris Wilson
343da3c5c1SChris Wilson if (!drm_mm_node_allocated(&ggtt->error_capture))
353da3c5c1SChris Wilson return 0;
363da3c5c1SChris Wilson
373da3c5c1SChris Wilson num_pages = resource_size(dsm) >> PAGE_SHIFT;
383da3c5c1SChris Wilson if (!num_pages)
393da3c5c1SChris Wilson return 0;
403da3c5c1SChris Wilson
413da3c5c1SChris Wilson crc = kmalloc_array(num_pages, sizeof(u32), GFP_KERNEL);
423da3c5c1SChris Wilson if (!crc)
433da3c5c1SChris Wilson return -ENOMEM;
443da3c5c1SChris Wilson
453da3c5c1SChris Wilson tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
463da3c5c1SChris Wilson if (!tmp) {
473da3c5c1SChris Wilson err = -ENOMEM;
483da3c5c1SChris Wilson goto err_crc;
493da3c5c1SChris Wilson }
503da3c5c1SChris Wilson
513da3c5c1SChris Wilson igt_global_reset_lock(gt);
523da3c5c1SChris Wilson wakeref = intel_runtime_pm_get(gt->uncore->rpm);
533da3c5c1SChris Wilson
543da3c5c1SChris Wilson err = igt_spinner_init(&spin, gt);
553da3c5c1SChris Wilson if (err)
563da3c5c1SChris Wilson goto err_lock;
573da3c5c1SChris Wilson
583da3c5c1SChris Wilson for_each_engine(engine, gt, id) {
593da3c5c1SChris Wilson struct intel_context *ce;
603da3c5c1SChris Wilson struct i915_request *rq;
613da3c5c1SChris Wilson
623da3c5c1SChris Wilson if (!(mask & engine->mask))
633da3c5c1SChris Wilson continue;
643da3c5c1SChris Wilson
653da3c5c1SChris Wilson if (!intel_engine_can_store_dword(engine))
663da3c5c1SChris Wilson continue;
673da3c5c1SChris Wilson
683da3c5c1SChris Wilson ce = intel_context_create(engine);
693da3c5c1SChris Wilson if (IS_ERR(ce)) {
703da3c5c1SChris Wilson err = PTR_ERR(ce);
713da3c5c1SChris Wilson goto err_spin;
723da3c5c1SChris Wilson }
733da3c5c1SChris Wilson rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
743da3c5c1SChris Wilson intel_context_put(ce);
753da3c5c1SChris Wilson if (IS_ERR(rq)) {
763da3c5c1SChris Wilson err = PTR_ERR(rq);
773da3c5c1SChris Wilson goto err_spin;
783da3c5c1SChris Wilson }
793da3c5c1SChris Wilson i915_request_add(rq);
803da3c5c1SChris Wilson }
813da3c5c1SChris Wilson
823da3c5c1SChris Wilson for (page = 0; page < num_pages; page++) {
833da3c5c1SChris Wilson dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
843da3c5c1SChris Wilson void __iomem *s;
853da3c5c1SChris Wilson void *in;
863da3c5c1SChris Wilson
873da3c5c1SChris Wilson ggtt->vm.insert_page(&ggtt->vm, dma,
883da3c5c1SChris Wilson ggtt->error_capture.start,
89*9275277dSFei Yang i915_gem_get_pat_index(gt->i915,
90*9275277dSFei Yang I915_CACHE_NONE),
91*9275277dSFei Yang 0);
923da3c5c1SChris Wilson mb();
933da3c5c1SChris Wilson
943da3c5c1SChris Wilson s = io_mapping_map_wc(&ggtt->iomap,
953da3c5c1SChris Wilson ggtt->error_capture.start,
963da3c5c1SChris Wilson PAGE_SIZE);
973da3c5c1SChris Wilson
983da3c5c1SChris Wilson if (!__drm_mm_interval_first(>->i915->mm.stolen,
993da3c5c1SChris Wilson page << PAGE_SHIFT,
1003da3c5c1SChris Wilson ((page + 1) << PAGE_SHIFT) - 1))
10188b39600SChris Wilson memset_io(s, STACK_MAGIC, PAGE_SIZE);
1023da3c5c1SChris Wilson
10388b39600SChris Wilson in = (void __force *)s;
10488b39600SChris Wilson if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
1053da3c5c1SChris Wilson in = tmp;
1063da3c5c1SChris Wilson crc[page] = crc32_le(0, in, PAGE_SIZE);
1073da3c5c1SChris Wilson
1083da3c5c1SChris Wilson io_mapping_unmap(s);
1093da3c5c1SChris Wilson }
1103da3c5c1SChris Wilson mb();
1113da3c5c1SChris Wilson ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
1123da3c5c1SChris Wilson
1133da3c5c1SChris Wilson if (mask == ALL_ENGINES) {
1143da3c5c1SChris Wilson intel_gt_reset(gt, mask, NULL);
1153da3c5c1SChris Wilson } else {
1163da3c5c1SChris Wilson for_each_engine(engine, gt, id) {
1173da3c5c1SChris Wilson if (mask & engine->mask)
1183da3c5c1SChris Wilson intel_engine_reset(engine, NULL);
1193da3c5c1SChris Wilson }
1203da3c5c1SChris Wilson }
1213da3c5c1SChris Wilson
1223da3c5c1SChris Wilson max = -1;
1233da3c5c1SChris Wilson count = 0;
1243da3c5c1SChris Wilson for (page = 0; page < num_pages; page++) {
1253da3c5c1SChris Wilson dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
1263da3c5c1SChris Wilson void __iomem *s;
1273da3c5c1SChris Wilson void *in;
1283da3c5c1SChris Wilson u32 x;
1293da3c5c1SChris Wilson
1303da3c5c1SChris Wilson ggtt->vm.insert_page(&ggtt->vm, dma,
1313da3c5c1SChris Wilson ggtt->error_capture.start,
132*9275277dSFei Yang i915_gem_get_pat_index(gt->i915,
133*9275277dSFei Yang I915_CACHE_NONE),
134*9275277dSFei Yang 0);
1353da3c5c1SChris Wilson mb();
1363da3c5c1SChris Wilson
1373da3c5c1SChris Wilson s = io_mapping_map_wc(&ggtt->iomap,
1383da3c5c1SChris Wilson ggtt->error_capture.start,
1393da3c5c1SChris Wilson PAGE_SIZE);
1403da3c5c1SChris Wilson
14188b39600SChris Wilson in = (void __force *)s;
14288b39600SChris Wilson if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
1433da3c5c1SChris Wilson in = tmp;
1443da3c5c1SChris Wilson x = crc32_le(0, in, PAGE_SIZE);
1453da3c5c1SChris Wilson
1463da3c5c1SChris Wilson if (x != crc[page] &&
1473da3c5c1SChris Wilson !__drm_mm_interval_first(>->i915->mm.stolen,
1483da3c5c1SChris Wilson page << PAGE_SHIFT,
1493da3c5c1SChris Wilson ((page + 1) << PAGE_SHIFT) - 1)) {
1503da3c5c1SChris Wilson pr_debug("unused stolen page %pa modified by GPU reset\n",
1513da3c5c1SChris Wilson &page);
1523da3c5c1SChris Wilson if (count++ == 0)
1533da3c5c1SChris Wilson igt_hexdump(in, PAGE_SIZE);
1543da3c5c1SChris Wilson max = page;
1553da3c5c1SChris Wilson }
1563da3c5c1SChris Wilson
1573da3c5c1SChris Wilson io_mapping_unmap(s);
1583da3c5c1SChris Wilson }
1593da3c5c1SChris Wilson mb();
1603da3c5c1SChris Wilson ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
1613da3c5c1SChris Wilson
1623da3c5c1SChris Wilson if (count > 0) {
1633da3c5c1SChris Wilson pr_info("%s reset clobbered %ld pages of stolen, last clobber at page %ld\n",
1643da3c5c1SChris Wilson msg, count, max);
1653da3c5c1SChris Wilson }
1663da3c5c1SChris Wilson if (max >= I915_GEM_STOLEN_BIAS >> PAGE_SHIFT) {
1673da3c5c1SChris Wilson pr_err("%s reset clobbered unreserved area [above %x] of stolen; may cause severe faults\n",
1683da3c5c1SChris Wilson msg, I915_GEM_STOLEN_BIAS);
1693da3c5c1SChris Wilson err = -EINVAL;
1703da3c5c1SChris Wilson }
1713da3c5c1SChris Wilson
1723da3c5c1SChris Wilson err_spin:
1733da3c5c1SChris Wilson igt_spinner_fini(&spin);
1743da3c5c1SChris Wilson
1753da3c5c1SChris Wilson err_lock:
1763da3c5c1SChris Wilson intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1773da3c5c1SChris Wilson igt_global_reset_unlock(gt);
1783da3c5c1SChris Wilson
1793da3c5c1SChris Wilson kfree(tmp);
1803da3c5c1SChris Wilson err_crc:
1813da3c5c1SChris Wilson kfree(crc);
1823da3c5c1SChris Wilson return err;
1833da3c5c1SChris Wilson }
1843da3c5c1SChris Wilson
igt_reset_device_stolen(void * arg)1853da3c5c1SChris Wilson static int igt_reset_device_stolen(void *arg)
1863da3c5c1SChris Wilson {
1873da3c5c1SChris Wilson return __igt_reset_stolen(arg, ALL_ENGINES, "device");
1883da3c5c1SChris Wilson }
1893da3c5c1SChris Wilson
igt_reset_engines_stolen(void * arg)1903da3c5c1SChris Wilson static int igt_reset_engines_stolen(void *arg)
1913da3c5c1SChris Wilson {
1923da3c5c1SChris Wilson struct intel_gt *gt = arg;
1933da3c5c1SChris Wilson struct intel_engine_cs *engine;
1943da3c5c1SChris Wilson enum intel_engine_id id;
1953da3c5c1SChris Wilson int err;
1963da3c5c1SChris Wilson
1973da3c5c1SChris Wilson if (!intel_has_reset_engine(gt))
1983da3c5c1SChris Wilson return 0;
1993da3c5c1SChris Wilson
2003da3c5c1SChris Wilson for_each_engine(engine, gt, id) {
2013da3c5c1SChris Wilson err = __igt_reset_stolen(gt, engine->mask, engine->name);
2023da3c5c1SChris Wilson if (err)
2033da3c5c1SChris Wilson return err;
2043da3c5c1SChris Wilson }
2053da3c5c1SChris Wilson
2063da3c5c1SChris Wilson return 0;
2073da3c5c1SChris Wilson }
208932309fbSMichal Wajdeczko
igt_global_reset(void * arg)209932309fbSMichal Wajdeczko static int igt_global_reset(void *arg)
210932309fbSMichal Wajdeczko {
211cb823ed9SChris Wilson struct intel_gt *gt = arg;
212932309fbSMichal Wajdeczko unsigned int reset_count;
213cb823ed9SChris Wilson intel_wakeref_t wakeref;
214932309fbSMichal Wajdeczko int err = 0;
215932309fbSMichal Wajdeczko
216932309fbSMichal Wajdeczko /* Check that we can issue a global GPU reset */
217932309fbSMichal Wajdeczko
218cb823ed9SChris Wilson igt_global_reset_lock(gt);
219cd6a8513SChris Wilson wakeref = intel_runtime_pm_get(gt->uncore->rpm);
220932309fbSMichal Wajdeczko
221cb823ed9SChris Wilson reset_count = i915_reset_count(>->i915->gpu_error);
222932309fbSMichal Wajdeczko
223cb823ed9SChris Wilson intel_gt_reset(gt, ALL_ENGINES, NULL);
224932309fbSMichal Wajdeczko
225cb823ed9SChris Wilson if (i915_reset_count(>->i915->gpu_error) == reset_count) {
226932309fbSMichal Wajdeczko pr_err("No GPU reset recorded!\n");
227932309fbSMichal Wajdeczko err = -EINVAL;
228932309fbSMichal Wajdeczko }
229932309fbSMichal Wajdeczko
230cd6a8513SChris Wilson intel_runtime_pm_put(gt->uncore->rpm, wakeref);
231cb823ed9SChris Wilson igt_global_reset_unlock(gt);
232932309fbSMichal Wajdeczko
233cb823ed9SChris Wilson if (intel_gt_is_wedged(gt))
234932309fbSMichal Wajdeczko err = -EIO;
235932309fbSMichal Wajdeczko
236932309fbSMichal Wajdeczko return err;
237932309fbSMichal Wajdeczko }
238932309fbSMichal Wajdeczko
igt_wedged_reset(void * arg)239932309fbSMichal Wajdeczko static int igt_wedged_reset(void *arg)
240932309fbSMichal Wajdeczko {
241cb823ed9SChris Wilson struct intel_gt *gt = arg;
242932309fbSMichal Wajdeczko intel_wakeref_t wakeref;
243932309fbSMichal Wajdeczko
244932309fbSMichal Wajdeczko /* Check that we can recover a wedged device with a GPU reset */
245932309fbSMichal Wajdeczko
246cb823ed9SChris Wilson igt_global_reset_lock(gt);
247cd6a8513SChris Wilson wakeref = intel_runtime_pm_get(gt->uncore->rpm);
248932309fbSMichal Wajdeczko
249cb823ed9SChris Wilson intel_gt_set_wedged(gt);
250932309fbSMichal Wajdeczko
251cb823ed9SChris Wilson GEM_BUG_ON(!intel_gt_is_wedged(gt));
252cb823ed9SChris Wilson intel_gt_reset(gt, ALL_ENGINES, NULL);
253932309fbSMichal Wajdeczko
254cd6a8513SChris Wilson intel_runtime_pm_put(gt->uncore->rpm, wakeref);
255cb823ed9SChris Wilson igt_global_reset_unlock(gt);
256932309fbSMichal Wajdeczko
257cb823ed9SChris Wilson return intel_gt_is_wedged(gt) ? -EIO : 0;
258932309fbSMichal Wajdeczko }
259932309fbSMichal Wajdeczko
igt_atomic_reset(void * arg)260f6470c9bSMichal Wajdeczko static int igt_atomic_reset(void *arg)
261f6470c9bSMichal Wajdeczko {
262cb823ed9SChris Wilson struct intel_gt *gt = arg;
263f6470c9bSMichal Wajdeczko const typeof(*igt_atomic_phases) *p;
264f6470c9bSMichal Wajdeczko int err = 0;
265f6470c9bSMichal Wajdeczko
266f6470c9bSMichal Wajdeczko /* Check that the resets are usable from atomic context */
267f6470c9bSMichal Wajdeczko
268cb823ed9SChris Wilson intel_gt_pm_get(gt);
269cb823ed9SChris Wilson igt_global_reset_lock(gt);
270f6470c9bSMichal Wajdeczko
271f6470c9bSMichal Wajdeczko /* Flush any requests before we get started and check basics */
272cb823ed9SChris Wilson if (!igt_force_reset(gt))
273f6470c9bSMichal Wajdeczko goto unlock;
274f6470c9bSMichal Wajdeczko
275f6470c9bSMichal Wajdeczko for (p = igt_atomic_phases; p->name; p++) {
27618398904SChris Wilson intel_engine_mask_t awake;
27718398904SChris Wilson
278cb823ed9SChris Wilson GEM_TRACE("__intel_gt_reset under %s\n", p->name);
279f6470c9bSMichal Wajdeczko
280cb823ed9SChris Wilson awake = reset_prepare(gt);
281faaa2902SChris Wilson p->critical_section_begin();
282faaa2902SChris Wilson
283cb823ed9SChris Wilson err = __intel_gt_reset(gt, ALL_ENGINES);
284faaa2902SChris Wilson
285f6470c9bSMichal Wajdeczko p->critical_section_end();
286cb823ed9SChris Wilson reset_finish(gt, awake);
287f6470c9bSMichal Wajdeczko
288f6470c9bSMichal Wajdeczko if (err) {
289cb823ed9SChris Wilson pr_err("__intel_gt_reset failed under %s\n", p->name);
290f6470c9bSMichal Wajdeczko break;
291f6470c9bSMichal Wajdeczko }
292f6470c9bSMichal Wajdeczko }
293f6470c9bSMichal Wajdeczko
294f6470c9bSMichal Wajdeczko /* As we poke around the guts, do a full reset before continuing. */
295cb823ed9SChris Wilson igt_force_reset(gt);
296f6470c9bSMichal Wajdeczko
297f6470c9bSMichal Wajdeczko unlock:
298cb823ed9SChris Wilson igt_global_reset_unlock(gt);
299cb823ed9SChris Wilson intel_gt_pm_put(gt);
300f6470c9bSMichal Wajdeczko
301f6470c9bSMichal Wajdeczko return err;
302f6470c9bSMichal Wajdeczko }
303f6470c9bSMichal Wajdeczko
igt_atomic_engine_reset(void * arg)304faaa2902SChris Wilson static int igt_atomic_engine_reset(void *arg)
305faaa2902SChris Wilson {
306cb823ed9SChris Wilson struct intel_gt *gt = arg;
307faaa2902SChris Wilson const typeof(*igt_atomic_phases) *p;
308faaa2902SChris Wilson struct intel_engine_cs *engine;
309faaa2902SChris Wilson enum intel_engine_id id;
310faaa2902SChris Wilson int err = 0;
311faaa2902SChris Wilson
312faaa2902SChris Wilson /* Check that the resets are usable from atomic context */
313faaa2902SChris Wilson
314260e6b71SChris Wilson if (!intel_has_reset_engine(gt))
315faaa2902SChris Wilson return 0;
316faaa2902SChris Wilson
317065273f7SDaniele Ceraolo Spurio if (intel_uc_uses_guc_submission(>->uc))
318faaa2902SChris Wilson return 0;
319faaa2902SChris Wilson
320cb823ed9SChris Wilson intel_gt_pm_get(gt);
321cb823ed9SChris Wilson igt_global_reset_lock(gt);
322faaa2902SChris Wilson
323faaa2902SChris Wilson /* Flush any requests before we get started and check basics */
324cb823ed9SChris Wilson if (!igt_force_reset(gt))
325faaa2902SChris Wilson goto out_unlock;
326faaa2902SChris Wilson
3275d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id) {
32822916badSMatthew Brost struct tasklet_struct *t = &engine->sched_engine->tasklet;
32980655d2aSChris Wilson
33080655d2aSChris Wilson if (t->func)
33180655d2aSChris Wilson tasklet_disable(t);
332faaa2902SChris Wilson intel_engine_pm_get(engine);
333faaa2902SChris Wilson
334faaa2902SChris Wilson for (p = igt_atomic_phases; p->name; p++) {
335cb823ed9SChris Wilson GEM_TRACE("intel_engine_reset(%s) under %s\n",
336faaa2902SChris Wilson engine->name, p->name);
33716f2941aSChris Wilson if (strcmp(p->name, "softirq"))
33816f2941aSChris Wilson local_bh_disable();
339faaa2902SChris Wilson
340faaa2902SChris Wilson p->critical_section_begin();
34116f2941aSChris Wilson err = __intel_engine_reset_bh(engine, NULL);
342faaa2902SChris Wilson p->critical_section_end();
343faaa2902SChris Wilson
34416f2941aSChris Wilson if (strcmp(p->name, "softirq"))
34516f2941aSChris Wilson local_bh_enable();
34616f2941aSChris Wilson
347faaa2902SChris Wilson if (err) {
348cb823ed9SChris Wilson pr_err("intel_engine_reset(%s) failed under %s\n",
349faaa2902SChris Wilson engine->name, p->name);
350faaa2902SChris Wilson break;
351faaa2902SChris Wilson }
352faaa2902SChris Wilson }
353faaa2902SChris Wilson
354faaa2902SChris Wilson intel_engine_pm_put(engine);
35580655d2aSChris Wilson if (t->func) {
35680655d2aSChris Wilson tasklet_enable(t);
35780655d2aSChris Wilson tasklet_hi_schedule(t);
35880655d2aSChris Wilson }
359faaa2902SChris Wilson if (err)
360faaa2902SChris Wilson break;
361faaa2902SChris Wilson }
362faaa2902SChris Wilson
363faaa2902SChris Wilson /* As we poke around the guts, do a full reset before continuing. */
364cb823ed9SChris Wilson igt_force_reset(gt);
365faaa2902SChris Wilson
366faaa2902SChris Wilson out_unlock:
367cb823ed9SChris Wilson igt_global_reset_unlock(gt);
368cb823ed9SChris Wilson intel_gt_pm_put(gt);
369faaa2902SChris Wilson
370faaa2902SChris Wilson return err;
371faaa2902SChris Wilson }
372faaa2902SChris Wilson
intel_reset_live_selftests(struct drm_i915_private * i915)373932309fbSMichal Wajdeczko int intel_reset_live_selftests(struct drm_i915_private *i915)
374932309fbSMichal Wajdeczko {
375932309fbSMichal Wajdeczko static const struct i915_subtest tests[] = {
376932309fbSMichal Wajdeczko SUBTEST(igt_global_reset), /* attempt to recover GPU first */
3773da3c5c1SChris Wilson SUBTEST(igt_reset_device_stolen),
3783da3c5c1SChris Wilson SUBTEST(igt_reset_engines_stolen),
379932309fbSMichal Wajdeczko SUBTEST(igt_wedged_reset),
380f6470c9bSMichal Wajdeczko SUBTEST(igt_atomic_reset),
381faaa2902SChris Wilson SUBTEST(igt_atomic_engine_reset),
382932309fbSMichal Wajdeczko };
383c14adcbdSMichał Winiarski struct intel_gt *gt = to_gt(i915);
384932309fbSMichal Wajdeczko
385260e6b71SChris Wilson if (!intel_has_gpu_reset(gt))
386932309fbSMichal Wajdeczko return 0;
387932309fbSMichal Wajdeczko
388cb823ed9SChris Wilson if (intel_gt_is_wedged(gt))
389932309fbSMichal Wajdeczko return -EIO; /* we're long past hope of a successful reset */
390932309fbSMichal Wajdeczko
391cb823ed9SChris Wilson return intel_gt_live_subtests(tests, gt);
392932309fbSMichal Wajdeczko }
393