xref: /openbmc/qemu/tests/qemu-iotests/261 (revision 159fb790)
1#!/usr/bin/env bash
2# group: rw
3#
4# Test case for qcow2's handling of extra data in snapshot table entries
5# (and more generally, how certain cases of broken snapshot tables are
6# handled)
7#
8# Copyright (C) 2019 Red Hat, Inc.
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License as published by
12# the Free Software Foundation; either version 2 of the License, or
13# (at your option) any later version.
14#
15# This program is distributed in the hope that it will be useful,
16# but WITHOUT ANY WARRANTY; without even the implied warranty of
17# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18# GNU General Public License for more details.
19#
20# You should have received a copy of the GNU General Public License
21# along with this program.  If not, see <http://www.gnu.org/licenses/>.
22#
23
24# creator
25owner=hreitz@redhat.com
26
27seq=$(basename $0)
28echo "QA output created by $seq"
29
30status=1	# failure is the default!
31
32_cleanup()
33{
34    _cleanup_test_img
35    rm -f "$TEST_IMG".v{2,3}.orig
36    rm -f "$TEST_DIR"/sn{0,1,2}{,-pre,-extra,-post}
37}
38trap "_cleanup; exit \$status" 0 1 2 3 15
39
40# get standard environment, filters and checks
41. ./common.rc
42. ./common.filter
43
44# This tests qcow2-specific low-level functionality
45_supported_fmt qcow2
46_supported_proto file
47_supported_os Linux
48# (1) We create a v2 image that supports nothing but refcount_bits=16
49# (2) We do some refcount management on our own which expects
50#     refcount_bits=16
51# As for data files, they do not support snapshots at all.
52_unsupported_imgopts 'refcount_bits=\([^1]\|.\([^6]\|$\)\)' data_file
53
54# Parameters:
55#   $1: image filename
56#   $2: snapshot table entry offset in the image
57snapshot_table_entry_size()
58{
59    id_len=$(peek_file_be "$1" $(($2 + 12)) 2)
60    name_len=$(peek_file_be "$1" $(($2 + 14)) 2)
61    extra_len=$(peek_file_be "$1" $(($2 + 36)) 4)
62
63    full_len=$((40 + extra_len + id_len + name_len))
64    echo $(((full_len + 7) / 8 * 8))
65}
66
67# Parameter:
68#   $1: image filename
69print_snapshot_table()
70{
71    nb_entries=$(peek_file_be "$1" 60 4)
72    offset=$(peek_file_be "$1" 64 8)
73
74    echo "Snapshots in $1:" | _filter_testdir | _filter_imgfmt
75
76    for ((i = 0; i < nb_entries; i++)); do
77        id_len=$(peek_file_be "$1" $((offset + 12)) 2)
78        name_len=$(peek_file_be "$1" $((offset + 14)) 2)
79        extra_len=$(peek_file_be "$1" $((offset + 36)) 4)
80
81        extra_ofs=$((offset + 40))
82        id_ofs=$((extra_ofs + extra_len))
83        name_ofs=$((id_ofs + id_len))
84
85        echo "  [$i]"
86        echo "    ID: $(peek_file_raw "$1" $id_ofs $id_len)"
87        echo "    Name: $(peek_file_raw "$1" $name_ofs $name_len)"
88        echo "    Extra data size: $extra_len"
89        if [ $extra_len -ge 8 ]; then
90            echo "    VM state size: $(peek_file_be "$1" $extra_ofs 8)"
91        fi
92        if [ $extra_len -ge 16 ]; then
93            echo "    Disk size: $(peek_file_be "$1" $((extra_ofs + 8)) 8)"
94        fi
95        if [ $extra_len -ge 24 ]; then
96            echo "    Icount: $(peek_file_be "$1" $((extra_ofs + 16)) 8)"
97        fi
98        if [ $extra_len -gt 24 ]; then
99            echo '    Unknown extra data:' \
100                "$(peek_file_raw "$1" $((extra_ofs + 16)) $((extra_len - 16)) \
101                   | tr -d '\0')"
102        fi
103
104        offset=$((offset + $(snapshot_table_entry_size "$1" $offset)))
105    done
106}
107
108# Mark clusters as allocated; works only in refblock 0 (i.e. before
109# cluster #32768).
110# Parameters:
111#   $1: Start offset of what to allocate
112#   $2: End offset (exclusive)
113refblock0_allocate()
114{
115    reftable_ofs=$(peek_file_be "$TEST_IMG" 48 8)
116    refblock_ofs=$(peek_file_be "$TEST_IMG" $reftable_ofs 8)
117
118    cluster=$(($1 / 65536))
119    ecluster=$((($2 + 65535) / 65536))
120
121    while [ $cluster -lt $ecluster ]; do
122        if [ $cluster -ge 32768 ]; then
123            echo "*** Abort: Cluster $cluster exceeds refblock 0 ***"
124            exit 1
125        fi
126        poke_file "$TEST_IMG" $((refblock_ofs + cluster * 2)) '\x00\x01'
127        cluster=$((cluster + 1))
128    done
129}
130
131
132echo
133echo '=== Create v2 template ==='
134echo
135
136# Create v2 image with a snapshot table with three entries:
137# [0]: No extra data (valid with v2, not valid with v3)
138# [1]: Has extra data unknown to qemu
139# [2]: Has the 64-bit VM state size, but not the disk size (again,
140#      valid with v2, not valid with v3)
141
142TEST_IMG="$TEST_IMG.v2.orig" IMGOPTS='compat=0.10' _make_test_img 64M
143$QEMU_IMG snapshot -c sn0 "$TEST_IMG.v2.orig"
144$QEMU_IMG snapshot -c sn1 "$TEST_IMG.v2.orig"
145$QEMU_IMG snapshot -c sn2 "$TEST_IMG.v2.orig"
146
147# Copy out all existing snapshot table entries
148sn_table_ofs=$(peek_file_be "$TEST_IMG.v2.orig" 64 8)
149
150# ofs: Snapshot table entry offset
151# eds: Extra data size
152# ids: Name + ID size
153# len: Total entry length
154sn0_ofs=$sn_table_ofs
155sn0_eds=$(peek_file_be "$TEST_IMG.v2.orig" $((sn0_ofs + 36)) 4)
156sn0_ids=$(($(peek_file_be "$TEST_IMG.v2.orig" $((sn0_ofs + 12)) 2) +
157           $(peek_file_be "$TEST_IMG.v2.orig" $((sn0_ofs + 14)) 2)))
158sn0_len=$(snapshot_table_entry_size "$TEST_IMG.v2.orig" $sn0_ofs)
159sn1_ofs=$((sn0_ofs + sn0_len))
160sn1_eds=$(peek_file_be "$TEST_IMG.v2.orig" $((sn1_ofs + 36)) 4)
161sn1_ids=$(($(peek_file_be "$TEST_IMG.v2.orig" $((sn1_ofs + 12)) 2) +
162           $(peek_file_be "$TEST_IMG.v2.orig" $((sn1_ofs + 14)) 2)))
163sn1_len=$(snapshot_table_entry_size "$TEST_IMG.v2.orig" $sn1_ofs)
164sn2_ofs=$((sn1_ofs + sn1_len))
165sn2_eds=$(peek_file_be "$TEST_IMG.v2.orig" $((sn2_ofs + 36)) 4)
166sn2_ids=$(($(peek_file_be "$TEST_IMG.v2.orig" $((sn2_ofs + 12)) 2) +
167           $(peek_file_be "$TEST_IMG.v2.orig" $((sn2_ofs + 14)) 2)))
168sn2_len=$(snapshot_table_entry_size "$TEST_IMG.v2.orig" $sn2_ofs)
169
170# Data before extra data
171dd if="$TEST_IMG.v2.orig" of="$TEST_DIR/sn0-pre" bs=1 skip=$sn0_ofs count=40 \
172    &> /dev/null
173dd if="$TEST_IMG.v2.orig" of="$TEST_DIR/sn1-pre" bs=1 skip=$sn1_ofs count=40 \
174    &> /dev/null
175dd if="$TEST_IMG.v2.orig" of="$TEST_DIR/sn2-pre" bs=1 skip=$sn2_ofs count=40 \
176    &> /dev/null
177
178# Extra data
179dd if="$TEST_IMG.v2.orig" of="$TEST_DIR/sn0-extra" bs=1 \
180    skip=$((sn0_ofs + 40)) count=$sn0_eds &> /dev/null
181dd if="$TEST_IMG.v2.orig" of="$TEST_DIR/sn1-extra" bs=1 \
182    skip=$((sn1_ofs + 40)) count=$sn1_eds &> /dev/null
183dd if="$TEST_IMG.v2.orig" of="$TEST_DIR/sn2-extra" bs=1 \
184    skip=$((sn2_ofs + 40)) count=$sn2_eds &> /dev/null
185
186# Data after extra data
187dd if="$TEST_IMG.v2.orig" of="$TEST_DIR/sn0-post" bs=1 \
188    skip=$((sn0_ofs + 40 + sn0_eds)) count=$sn0_ids \
189    &> /dev/null
190dd if="$TEST_IMG.v2.orig" of="$TEST_DIR/sn1-post" bs=1 \
191    skip=$((sn1_ofs + 40 + sn1_eds)) count=$sn1_ids \
192    &> /dev/null
193dd if="$TEST_IMG.v2.orig" of="$TEST_DIR/sn2-post" bs=1 \
194    skip=$((sn2_ofs + 40 + sn2_eds)) count=$sn2_ids \
195    &> /dev/null
196
197# Amend them, one by one
198# Set sn0's extra data size to 0
199poke_file "$TEST_DIR/sn0-pre" 36 '\x00\x00\x00\x00'
200truncate -s 0 "$TEST_DIR/sn0-extra"
201# Grow sn0-post to pad
202truncate -s $(($(snapshot_table_entry_size "$TEST_DIR/sn0-pre") - 40)) \
203    "$TEST_DIR/sn0-post"
204
205# Set sn1's extra data size to 50
206poke_file "$TEST_DIR/sn1-pre" 36 '\x00\x00\x00\x32'
207truncate -s 50 "$TEST_DIR/sn1-extra"
208poke_file "$TEST_DIR/sn1-extra" 24 'very important data'
209# Grow sn1-post to pad
210truncate -s $(($(snapshot_table_entry_size "$TEST_DIR/sn1-pre") - 90)) \
211    "$TEST_DIR/sn1-post"
212
213# Set sn2's extra data size to 8
214poke_file "$TEST_DIR/sn2-pre" 36 '\x00\x00\x00\x08'
215truncate -s 8 "$TEST_DIR/sn2-extra"
216# Grow sn2-post to pad
217truncate -s $(($(snapshot_table_entry_size "$TEST_DIR/sn2-pre") - 48)) \
218    "$TEST_DIR/sn2-post"
219
220# Construct snapshot table
221cat "$TEST_DIR"/sn0-{pre,extra,post} \
222    "$TEST_DIR"/sn1-{pre,extra,post} \
223    "$TEST_DIR"/sn2-{pre,extra,post} \
224    | dd of="$TEST_IMG.v2.orig" bs=1 seek=$sn_table_ofs conv=notrunc \
225          &> /dev/null
226
227# Done!
228TEST_IMG="$TEST_IMG.v2.orig" _check_test_img
229print_snapshot_table "$TEST_IMG.v2.orig"
230
231echo
232echo '=== Upgrade to v3 ==='
233echo
234
235cp "$TEST_IMG.v2.orig" "$TEST_IMG.v3.orig"
236$QEMU_IMG amend -o compat=1.1 "$TEST_IMG.v3.orig"
237TEST_IMG="$TEST_IMG.v3.orig" _check_test_img
238print_snapshot_table "$TEST_IMG.v3.orig"
239
240echo
241echo '=== Repair botched v3 ==='
242echo
243
244# Force the v2 file to be v3.  v3 requires each snapshot table entry
245# to have at least 16 bytes of extra data, so it will not comply to
246# the qcow2 v3 specification; but we can fix that.
247cp "$TEST_IMG.v2.orig" "$TEST_IMG"
248
249# Set version
250poke_file "$TEST_IMG" 4 '\x00\x00\x00\x03'
251# Increase header length (necessary for v3)
252poke_file "$TEST_IMG" 100 '\x00\x00\x00\x68'
253# Set refcount order (necessary for v3)
254poke_file "$TEST_IMG" 96 '\x00\x00\x00\x04'
255
256_check_test_img -r all
257print_snapshot_table "$TEST_IMG"
258
259
260# From now on, just test the qcow2 version we are supposed to test.
261# (v3 by default, v2 by choice through $IMGOPTS.)
262# That works because we always write all known extra data when
263# updating the snapshot table, independent of the version.
264
265if echo "$IMGOPTS" | grep -q 'compat=\(0\.10\|v2\)' 2> /dev/null; then
266    subver=v2
267else
268    subver=v3
269fi
270
271echo
272echo '=== Add new snapshot ==='
273echo
274
275cp "$TEST_IMG.$subver.orig" "$TEST_IMG"
276$QEMU_IMG snapshot -c sn3 "$TEST_IMG"
277_check_test_img
278print_snapshot_table "$TEST_IMG"
279
280echo
281echo '=== Remove different snapshots ==='
282
283for sn in sn0 sn1 sn2; do
284    echo
285    echo "--- $sn ---"
286
287    cp "$TEST_IMG.$subver.orig" "$TEST_IMG"
288    $QEMU_IMG snapshot -d $sn "$TEST_IMG"
289    _check_test_img
290    print_snapshot_table "$TEST_IMG"
291done
292
293echo
294echo '=== Reject too much unknown extra data ==='
295echo
296
297cp "$TEST_IMG.$subver.orig" "$TEST_IMG"
298$QEMU_IMG snapshot -c sn3 "$TEST_IMG"
299
300sn_table_ofs=$(peek_file_be "$TEST_IMG" 64 8)
301sn0_ofs=$sn_table_ofs
302sn1_ofs=$((sn0_ofs + $(snapshot_table_entry_size "$TEST_IMG" $sn0_ofs)))
303sn2_ofs=$((sn1_ofs + $(snapshot_table_entry_size "$TEST_IMG" $sn1_ofs)))
304sn3_ofs=$((sn2_ofs + $(snapshot_table_entry_size "$TEST_IMG" $sn2_ofs)))
305
306# 64 kB of extra data should be rejected
307# (Note that this also induces a refcount error, because it spills
308# over to the next cluster.  That's a good way to test that we can
309# handle simultaneous snapshot table and refcount errors.)
310poke_file "$TEST_IMG" $((sn3_ofs + 36)) '\x00\x01\x00\x00'
311
312# Print error
313_img_info
314echo
315_check_test_img
316echo
317
318# Should be repairable
319_check_test_img -r all
320
321echo
322echo '=== Snapshot table too big ==='
323echo
324
325sn_table_ofs=$(peek_file_be "$TEST_IMG.v3.orig" 64 8)
326
327# Fill a snapshot with 1 kB of extra data, a 65535-char ID, and a
328# 65535-char name, and repeat it as many times as necessary to fill
329# 64 MB (the maximum supported by qemu)
330
331touch "$TEST_DIR/sn0"
332
333# Full size (fixed + extra + ID + name + padding)
334sn_size=$((40 + 1024 + 65535 + 65535 + 2))
335
336# We only need the fixed part, though.
337truncate -s 40 "$TEST_DIR/sn0"
338
339# 65535-char ID string
340poke_file "$TEST_DIR/sn0" 12 '\xff\xff'
341# 65535-char name
342poke_file "$TEST_DIR/sn0" 14 '\xff\xff'
343# 1 kB of extra data
344poke_file "$TEST_DIR/sn0" 36 '\x00\x00\x04\x00'
345
346# Create test image
347_make_test_img 64M
348
349# Hook up snapshot table somewhere safe (at 1 MB)
350poke_file "$TEST_IMG" 64 '\x00\x00\x00\x00\x00\x10\x00\x00'
351
352offset=1048576
353size_written=0
354sn_count=0
355while [ $size_written -le $((64 * 1048576)) ]; do
356    dd if="$TEST_DIR/sn0" of="$TEST_IMG" bs=1 seek=$offset conv=notrunc \
357        &> /dev/null
358    offset=$((offset + sn_size))
359    size_written=$((size_written + sn_size))
360    sn_count=$((sn_count + 1))
361done
362truncate -s "$offset" "$TEST_IMG"
363
364# Give the last snapshot (the one to be removed) an L1 table so we can
365# see how that is handled when repairing the image
366# (Put it two clusters before 1 MB, and one L2 table one cluster
367# before 1 MB)
368poke_file "$TEST_IMG" $((offset - sn_size + 0)) \
369    '\x00\x00\x00\x00\x00\x0e\x00\x00'
370poke_file "$TEST_IMG" $((offset - sn_size + 8)) \
371    '\x00\x00\x00\x01'
372
373# Hook up the L2 table
374poke_file "$TEST_IMG" $((1048576 - 2 * 65536)) \
375    '\x80\x00\x00\x00\x00\x0f\x00\x00'
376
377# Make sure all of the clusters we just hooked up are allocated:
378# - The snapshot table
379# - The last snapshot's L1 and L2 table
380refblock0_allocate $((1048576 - 2 * 65536)) $offset
381
382poke_file "$TEST_IMG" 60 \
383    "$(printf '%08x' $sn_count | sed -e 's/\(..\)/\\x\1/g')"
384
385# Print error
386_img_info
387echo
388_check_test_img
389echo
390
391# Should be repairable
392_check_test_img -r all
393
394echo
395echo "$((sn_count - 1)) snapshots should remain:"
396echo "  qemu-img info reports $(_img_info | grep -c '^ \{30\}') snapshots"
397echo "  Image header reports $(peek_file_be "$TEST_IMG" 60 4) snapshots"
398
399echo
400echo '=== Snapshot table too big with one entry with too much extra data ==='
401echo
402
403# For this test, we reuse the image from the previous case, which has
404# a snapshot table that is right at the limit.
405# Our layout looks like this:
406# - (a number of snapshot table entries)
407# - One snapshot with $extra_data_size extra data
408# - One normal snapshot that breaks the 64 MB boundary
409# - One normal snapshot beyond the 64 MB boundary
410#
411# $extra_data_size is calculated so that simply by virtue of it
412# decreasing to 1 kB, the penultimate snapshot will fit into 64 MB
413# limit again.  The final snapshot will always be beyond the limit, so
414# that we can see that the repair algorithm does still determine the
415# limit to be somewhere, even when truncating one snapshot's extra
416# data.
417
418# The last case has removed the last snapshot, so calculate
419# $old_offset to get the current image's real length
420old_offset=$((offset - sn_size))
421
422# The layout from the previous test had one snapshot beyond the 64 MB
423# limit; we want the same (after the oversized extra data has been
424# truncated to 1 kB), so we drop the last three snapshots and
425# construct them from scratch.
426offset=$((offset - 3 * sn_size))
427sn_count=$((sn_count - 3))
428
429# Assuming we had already written one of the three snapshots
430# (necessary so we can calculate $extra_data_size next).
431size_written=$((size_written - 2 * sn_size))
432
433# Increase the extra data size so we go past the limit
434# (The -1024 comes from the 1 kB of extra data we already have)
435extra_data_size=$((64 * 1048576 + 8 - sn_size - (size_written - 1024)))
436
437poke_file "$TEST_IMG" $((offset + 36)) \
438    "$(printf '%08x' $extra_data_size | sed -e 's/\(..\)/\\x\1/g')"
439
440offset=$((offset + sn_size - 1024 + extra_data_size))
441size_written=$((size_written - 1024 + extra_data_size))
442sn_count=$((sn_count + 1))
443
444# Write the two normal snapshots
445for ((i = 0; i < 2; i++)); do
446    dd if="$TEST_DIR/sn0" of="$TEST_IMG" bs=1 seek=$offset conv=notrunc \
447        &> /dev/null
448    offset=$((offset + sn_size))
449    size_written=$((size_written + sn_size))
450    sn_count=$((sn_count + 1))
451
452    if [ $i = 0 ]; then
453        # Check that the penultimate snapshot is beyond the 64 MB limit
454        echo "Snapshot table size should equal $((64 * 1048576 + 8)):" \
455            $size_written
456        echo
457    fi
458done
459
460truncate -s $offset "$TEST_IMG"
461refblock0_allocate $old_offset $offset
462
463poke_file "$TEST_IMG" 60 \
464    "$(printf '%08x' $sn_count | sed -e 's/\(..\)/\\x\1/g')"
465
466# Print error
467_img_info
468echo
469_check_test_img
470echo
471
472# Just truncating the extra data should be sufficient to shorten the
473# snapshot table so only one snapshot exceeds the extra size
474_check_test_img -r all
475
476echo
477echo '=== Too many snapshots ==='
478echo
479
480# Create a v2 image, for speeds' sake: All-zero snapshot table entries
481# are only valid in v2.
482IMGOPTS='compat=0.10' _make_test_img 64M
483
484# Hook up snapshot table somewhere safe (at 1 MB)
485poke_file "$TEST_IMG" 64 '\x00\x00\x00\x00\x00\x10\x00\x00'
486# "Create" more than 65536 snapshots (twice that many here)
487poke_file "$TEST_IMG" 60 '\x00\x02\x00\x00'
488
489# 40-byte all-zero snapshot table entries are valid snapshots, but
490# only in v2 (v3 needs 16 bytes of extra data, so we would have to
491# write 131072x '\x10').
492truncate -s $((1048576 + 40 * 131072)) "$TEST_IMG"
493
494# But let us give one of the snapshots to be removed an L1 table so
495# we can see how that is handled when repairing the image.
496# (Put it two clusters before 1 MB, and one L2 table one cluster
497# before 1 MB)
498poke_file "$TEST_IMG" $((1048576 + 40 * 65536 + 0)) \
499    '\x00\x00\x00\x00\x00\x0e\x00\x00'
500poke_file "$TEST_IMG" $((1048576 + 40 * 65536 + 8)) \
501    '\x00\x00\x00\x01'
502
503# Hook up the L2 table
504poke_file "$TEST_IMG" $((1048576 - 2 * 65536)) \
505    '\x80\x00\x00\x00\x00\x0f\x00\x00'
506
507# Make sure all of the clusters we just hooked up are allocated:
508# - The snapshot table
509# - The last snapshot's L1 and L2 table
510refblock0_allocate $((1048576 - 2 * 65536)) $((1048576 + 40 * 131072))
511
512# Print error
513_img_info
514echo
515_check_test_img
516echo
517
518# Should be repairable
519_check_test_img -r all
520
521echo
522echo '65536 snapshots should remain:'
523echo "  qemu-img info reports $(_img_info | grep -c '^ \{30\}') snapshots"
524echo "  Image header reports $(peek_file_be "$TEST_IMG" 60 4) snapshots"
525
526# success, all done
527echo "*** done"
528status=0
529