xref: /openbmc/qemu/block/qed-cluster.c (revision 1cfe48c1)
1 /*
2  * QEMU Enhanced Disk Format Cluster functions
3  *
4  * Copyright IBM, Corp. 2010
5  *
6  * Authors:
7  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *
10  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11  * See the COPYING.LIB file in the top-level directory.
12  *
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qed.h"
17 
18 /**
19  * Count the number of contiguous data clusters
20  *
21  * @s:              QED state
22  * @table:          L2 table
23  * @index:          First cluster index
24  * @n:              Maximum number of clusters
25  * @offset:         Set to first cluster offset
26  *
27  * This function scans tables for contiguous clusters.  A contiguous run of
28  * clusters may be allocated, unallocated, or zero.
29  */
30 static unsigned int qed_count_contiguous_clusters(BDRVQEDState *s,
31                                                   QEDTable *table,
32                                                   unsigned int index,
33                                                   unsigned int n,
34                                                   uint64_t *offset)
35 {
36     unsigned int end = MIN(index + n, s->table_nelems);
37     uint64_t last = table->offsets[index];
38     unsigned int i;
39 
40     *offset = last;
41 
42     for (i = index + 1; i < end; i++) {
43         if (qed_offset_is_unalloc_cluster(last)) {
44             /* Counting unallocated clusters */
45             if (!qed_offset_is_unalloc_cluster(table->offsets[i])) {
46                 break;
47             }
48         } else if (qed_offset_is_zero_cluster(last)) {
49             /* Counting zero clusters */
50             if (!qed_offset_is_zero_cluster(table->offsets[i])) {
51                 break;
52             }
53         } else {
54             /* Counting allocated clusters */
55             if (table->offsets[i] != last + s->header.cluster_size) {
56                 break;
57             }
58             last = table->offsets[i];
59         }
60     }
61     return i - index;
62 }
63 
64 /**
65  * Find the offset of a data cluster
66  *
67  * @s:          QED state
68  * @request:    L2 cache entry
69  * @pos:        Byte position in device
70  * @len:        Number of bytes (may be shortened on return)
71  * @img_offset: Contains offset in the image file on success
72  *
73  * This function translates a position in the block device to an offset in the
74  * image file. The translated offset or unallocated range in the image file is
75  * reported back in *img_offset and *len.
76  *
77  * If the L2 table exists, request->l2_table points to the L2 table cache entry
78  * and the caller must free the reference when they are finished.  The cache
79  * entry is exposed in this way to avoid callers having to read the L2 table
80  * again later during request processing.  If request->l2_table is non-NULL it
81  * will be unreferenced before taking on the new cache entry.
82  *
83  * On success QED_CLUSTER_FOUND is returned and img_offset/len are a contiguous
84  * range in the image file.
85  *
86  * On failure QED_CLUSTER_L2 or QED_CLUSTER_L1 is returned for missing L2 or L1
87  * table offset, respectively. len is number of contiguous unallocated bytes.
88  */
89 int coroutine_fn qed_find_cluster(BDRVQEDState *s, QEDRequest *request,
90                                   uint64_t pos, size_t *len,
91                                   uint64_t *img_offset)
92 {
93     uint64_t l2_offset;
94     uint64_t offset = 0;
95     unsigned int index;
96     unsigned int n;
97     int ret;
98 
99     /* Limit length to L2 boundary.  Requests are broken up at the L2 boundary
100      * so that a request acts on one L2 table at a time.
101      */
102     *len = MIN(*len, (((pos >> s->l1_shift) + 1) << s->l1_shift) - pos);
103 
104     l2_offset = s->l1_table->offsets[qed_l1_index(s, pos)];
105     if (qed_offset_is_unalloc_cluster(l2_offset)) {
106         *img_offset = 0;
107         return QED_CLUSTER_L1;
108     }
109     if (!qed_check_table_offset(s, l2_offset)) {
110         *img_offset = *len = 0;
111         return -EINVAL;
112     }
113 
114     ret = qed_read_l2_table(s, request, l2_offset);
115     qed_acquire(s);
116     if (ret) {
117         goto out;
118     }
119 
120     index = qed_l2_index(s, pos);
121     n = qed_bytes_to_clusters(s, qed_offset_into_cluster(s, pos) + *len);
122     n = qed_count_contiguous_clusters(s, request->l2_table->table,
123                                       index, n, &offset);
124 
125     if (qed_offset_is_unalloc_cluster(offset)) {
126         ret = QED_CLUSTER_L2;
127     } else if (qed_offset_is_zero_cluster(offset)) {
128         ret = QED_CLUSTER_ZERO;
129     } else if (qed_check_cluster_offset(s, offset)) {
130         ret = QED_CLUSTER_FOUND;
131     } else {
132         ret = -EINVAL;
133     }
134 
135     *len = MIN(*len,
136                n * s->header.cluster_size - qed_offset_into_cluster(s, pos));
137 
138 out:
139     *img_offset = offset;
140     qed_release(s);
141     return ret;
142 }
143