1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License version 2
4  * as published by the Free Software Foundation; or, when distributed
5  * separately from the Linux kernel or incorporated into other
6  * software packages, subject to the following license:
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this source file (the "Software"), to deal in the Software without
10  * restriction, including without limitation the rights to use, copy, modify,
11  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12  * and to permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24  * IN THE SOFTWARE.
25  */
26 
27 #ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28 #define __XEN_BLKIF__BACKEND__COMMON_H__
29 
30 #include <linux/module.h>
31 #include <linux/interrupt.h>
32 #include <linux/slab.h>
33 #include <linux/blkdev.h>
34 #include <linux/vmalloc.h>
35 #include <linux/wait.h>
36 #include <linux/io.h>
37 #include <linux/rbtree.h>
38 #include <asm/setup.h>
39 #include <asm/pgalloc.h>
40 #include <asm/hypervisor.h>
41 #include <xen/grant_table.h>
42 #include <xen/xenbus.h>
43 #include <xen/interface/io/ring.h>
44 #include <xen/interface/io/blkif.h>
45 #include <xen/interface/io/protocols.h>
46 
47 #define DRV_PFX "xen-blkback:"
48 #define DPRINTK(fmt, args...)				\
49 	pr_debug(DRV_PFX "(%s:%d) " fmt ".\n",		\
50 		 __func__, __LINE__, ##args)
51 
52 
53 /*
54  * This is the maximum number of segments that would be allowed in indirect
55  * requests. This value will also be passed to the frontend.
56  */
57 #define MAX_INDIRECT_SEGMENTS 256
58 
59 #define SEGS_PER_INDIRECT_FRAME \
60 	(PAGE_SIZE/sizeof(struct blkif_request_segment))
61 #define MAX_INDIRECT_PAGES \
62 	((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
63 #define INDIRECT_PAGES(_segs) \
64 	((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
65 
66 /* Not a real protocol.  Used to generate ring structs which contain
67  * the elements common to all protocols only.  This way we get a
68  * compiler-checkable way to use common struct elements, so we can
69  * avoid using switch(protocol) in a number of places.  */
70 struct blkif_common_request {
71 	char dummy;
72 };
73 struct blkif_common_response {
74 	char dummy;
75 };
76 
77 struct blkif_x86_32_request_rw {
78 	uint8_t        nr_segments;  /* number of segments                   */
79 	blkif_vdev_t   handle;       /* only for read/write requests         */
80 	uint64_t       id;           /* private guest value, echoed in resp  */
81 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
82 	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
83 } __attribute__((__packed__));
84 
85 struct blkif_x86_32_request_discard {
86 	uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
87 	blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
88 	uint64_t       id;           /* private guest value, echoed in resp  */
89 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
90 	uint64_t       nr_sectors;
91 } __attribute__((__packed__));
92 
93 struct blkif_x86_32_request_other {
94 	uint8_t        _pad1;
95 	blkif_vdev_t   _pad2;
96 	uint64_t       id;           /* private guest value, echoed in resp  */
97 } __attribute__((__packed__));
98 
99 struct blkif_x86_32_request_indirect {
100 	uint8_t        indirect_op;
101 	uint16_t       nr_segments;
102 	uint64_t       id;
103 	blkif_sector_t sector_number;
104 	blkif_vdev_t   handle;
105 	uint16_t       _pad1;
106 	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
107 	/*
108 	 * The maximum number of indirect segments (and pages) that will
109 	 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
110 	 * is also exported to the guest (via xenstore
111 	 * feature-max-indirect-segments entry), so the frontend knows how
112 	 * many indirect segments the backend supports.
113 	 */
114 	uint64_t       _pad2;        /* make it 64 byte aligned */
115 } __attribute__((__packed__));
116 
117 struct blkif_x86_32_request {
118 	uint8_t        operation;    /* BLKIF_OP_???                         */
119 	union {
120 		struct blkif_x86_32_request_rw rw;
121 		struct blkif_x86_32_request_discard discard;
122 		struct blkif_x86_32_request_other other;
123 		struct blkif_x86_32_request_indirect indirect;
124 	} u;
125 } __attribute__((__packed__));
126 
127 /* i386 protocol version */
128 #pragma pack(push, 4)
129 struct blkif_x86_32_response {
130 	uint64_t        id;              /* copied from request */
131 	uint8_t         operation;       /* copied from request */
132 	int16_t         status;          /* BLKIF_RSP_???       */
133 };
134 #pragma pack(pop)
135 /* x86_64 protocol version */
136 
137 struct blkif_x86_64_request_rw {
138 	uint8_t        nr_segments;  /* number of segments                   */
139 	blkif_vdev_t   handle;       /* only for read/write requests         */
140 	uint32_t       _pad1;        /* offsetof(blkif_reqest..,u.rw.id)==8  */
141 	uint64_t       id;
142 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
143 	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
144 } __attribute__((__packed__));
145 
146 struct blkif_x86_64_request_discard {
147 	uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
148 	blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
149         uint32_t       _pad2;        /* offsetof(blkif_..,u.discard.id)==8   */
150 	uint64_t       id;
151 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
152 	uint64_t       nr_sectors;
153 } __attribute__((__packed__));
154 
155 struct blkif_x86_64_request_other {
156 	uint8_t        _pad1;
157 	blkif_vdev_t   _pad2;
158 	uint32_t       _pad3;        /* offsetof(blkif_..,u.discard.id)==8   */
159 	uint64_t       id;           /* private guest value, echoed in resp  */
160 } __attribute__((__packed__));
161 
162 struct blkif_x86_64_request_indirect {
163 	uint8_t        indirect_op;
164 	uint16_t       nr_segments;
165 	uint32_t       _pad1;        /* offsetof(blkif_..,u.indirect.id)==8   */
166 	uint64_t       id;
167 	blkif_sector_t sector_number;
168 	blkif_vdev_t   handle;
169 	uint16_t       _pad2;
170 	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
171 	/*
172 	 * The maximum number of indirect segments (and pages) that will
173 	 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
174 	 * is also exported to the guest (via xenstore
175 	 * feature-max-indirect-segments entry), so the frontend knows how
176 	 * many indirect segments the backend supports.
177 	 */
178 	uint32_t       _pad3;        /* make it 64 byte aligned */
179 } __attribute__((__packed__));
180 
181 struct blkif_x86_64_request {
182 	uint8_t        operation;    /* BLKIF_OP_???                         */
183 	union {
184 		struct blkif_x86_64_request_rw rw;
185 		struct blkif_x86_64_request_discard discard;
186 		struct blkif_x86_64_request_other other;
187 		struct blkif_x86_64_request_indirect indirect;
188 	} u;
189 } __attribute__((__packed__));
190 
191 struct blkif_x86_64_response {
192 	uint64_t       __attribute__((__aligned__(8))) id;
193 	uint8_t         operation;       /* copied from request */
194 	int16_t         status;          /* BLKIF_RSP_???       */
195 };
196 
197 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
198 		  struct blkif_common_response);
199 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
200 		  struct blkif_x86_32_response);
201 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
202 		  struct blkif_x86_64_response);
203 
204 union blkif_back_rings {
205 	struct blkif_back_ring        native;
206 	struct blkif_common_back_ring common;
207 	struct blkif_x86_32_back_ring x86_32;
208 	struct blkif_x86_64_back_ring x86_64;
209 };
210 
211 enum blkif_protocol {
212 	BLKIF_PROTOCOL_NATIVE = 1,
213 	BLKIF_PROTOCOL_X86_32 = 2,
214 	BLKIF_PROTOCOL_X86_64 = 3,
215 };
216 
217 struct xen_vbd {
218 	/* What the domain refers to this vbd as. */
219 	blkif_vdev_t		handle;
220 	/* Non-zero -> read-only */
221 	unsigned char		readonly;
222 	/* VDISK_xxx */
223 	unsigned char		type;
224 	/* phys device that this vbd maps to. */
225 	u32			pdevice;
226 	struct block_device	*bdev;
227 	/* Cached size parameter. */
228 	sector_t		size;
229 	unsigned int		flush_support:1;
230 	unsigned int		discard_secure:1;
231 	unsigned int		feature_gnt_persistent:1;
232 	unsigned int		overflow_max_grants:1;
233 };
234 
235 struct backend_info;
236 
237 /* Number of available flags */
238 #define PERSISTENT_GNT_FLAGS_SIZE	2
239 /* This persistent grant is currently in use */
240 #define PERSISTENT_GNT_ACTIVE		0
241 /*
242  * This persistent grant has been used, this flag is set when we remove the
243  * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
244  */
245 #define PERSISTENT_GNT_WAS_ACTIVE	1
246 
247 /* Number of requests that we can fit in a ring */
248 #define XEN_BLKIF_REQS			32
249 
250 struct persistent_gnt {
251 	struct page *page;
252 	grant_ref_t gnt;
253 	grant_handle_t handle;
254 	DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
255 	struct rb_node node;
256 	struct list_head remove_node;
257 };
258 
259 struct xen_blkif {
260 	/* Unique identifier for this interface. */
261 	domid_t			domid;
262 	unsigned int		handle;
263 	/* Physical parameters of the comms window. */
264 	unsigned int		irq;
265 	/* Comms information. */
266 	enum blkif_protocol	blk_protocol;
267 	union blkif_back_rings	blk_rings;
268 	void			*blk_ring;
269 	/* The VBD attached to this interface. */
270 	struct xen_vbd		vbd;
271 	/* Back pointer to the backend_info. */
272 	struct backend_info	*be;
273 	/* Private fields. */
274 	spinlock_t		blk_ring_lock;
275 	atomic_t		refcnt;
276 
277 	wait_queue_head_t	wq;
278 	/* for barrier (drain) requests */
279 	struct completion	drain_complete;
280 	atomic_t		drain;
281 	atomic_t		inflight;
282 	/* One thread per one blkif. */
283 	struct task_struct	*xenblkd;
284 	unsigned int		waiting_reqs;
285 
286 	/* tree to store persistent grants */
287 	struct rb_root		persistent_gnts;
288 	unsigned int		persistent_gnt_c;
289 	atomic_t		persistent_gnt_in_use;
290 	unsigned long           next_lru;
291 
292 	/* used by the kworker that offload work from the persistent purge */
293 	struct list_head	persistent_purge_list;
294 	struct work_struct	persistent_purge_work;
295 
296 	/* buffer of free pages to map grant refs */
297 	spinlock_t		free_pages_lock;
298 	int			free_pages_num;
299 	struct list_head	free_pages;
300 
301 	/* List of all 'pending_req' available */
302 	struct list_head	pending_free;
303 	/* And its spinlock. */
304 	spinlock_t		pending_free_lock;
305 	wait_queue_head_t	pending_free_wq;
306 
307 	/* statistics */
308 	unsigned long		st_print;
309 	unsigned long long			st_rd_req;
310 	unsigned long long			st_wr_req;
311 	unsigned long long			st_oo_req;
312 	unsigned long long			st_f_req;
313 	unsigned long long			st_ds_req;
314 	unsigned long long			st_rd_sect;
315 	unsigned long long			st_wr_sect;
316 
317 	struct work_struct	free_work;
318 	/* Thread shutdown wait queue. */
319 	wait_queue_head_t	shutdown_wq;
320 };
321 
322 struct seg_buf {
323 	unsigned long offset;
324 	unsigned int nsec;
325 };
326 
327 struct grant_page {
328 	struct page 		*page;
329 	struct persistent_gnt	*persistent_gnt;
330 	grant_handle_t		handle;
331 	grant_ref_t		gref;
332 };
333 
334 /*
335  * Each outstanding request that we've passed to the lower device layers has a
336  * 'pending_req' allocated to it. Each buffer_head that completes decrements
337  * the pendcnt towards zero. When it hits zero, the specified domain has a
338  * response queued for it, with the saved 'id' passed back.
339  */
340 struct pending_req {
341 	struct xen_blkif	*blkif;
342 	u64			id;
343 	int			nr_pages;
344 	atomic_t		pendcnt;
345 	unsigned short		operation;
346 	int			status;
347 	struct list_head	free_list;
348 	struct grant_page	*segments[MAX_INDIRECT_SEGMENTS];
349 	/* Indirect descriptors */
350 	struct grant_page	*indirect_pages[MAX_INDIRECT_PAGES];
351 	struct seg_buf		seg[MAX_INDIRECT_SEGMENTS];
352 	struct bio		*biolist[MAX_INDIRECT_SEGMENTS];
353 };
354 
355 
356 #define vbd_sz(_v)	((_v)->bdev->bd_part ? \
357 			 (_v)->bdev->bd_part->nr_sects : \
358 			  get_capacity((_v)->bdev->bd_disk))
359 
360 #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
361 #define xen_blkif_put(_b)				\
362 	do {						\
363 		if (atomic_dec_and_test(&(_b)->refcnt))	\
364 			schedule_work(&(_b)->free_work);\
365 	} while (0)
366 
367 struct phys_req {
368 	unsigned short		dev;
369 	blkif_sector_t		nr_sects;
370 	struct block_device	*bdev;
371 	blkif_sector_t		sector_number;
372 };
373 int xen_blkif_interface_init(void);
374 
375 int xen_blkif_xenbus_init(void);
376 
377 irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
378 int xen_blkif_schedule(void *arg);
379 int xen_blkif_purge_persistent(void *arg);
380 void xen_blkbk_free_caches(struct xen_blkif *blkif);
381 
382 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
383 			      struct backend_info *be, int state);
384 
385 int xen_blkbk_barrier(struct xenbus_transaction xbt,
386 		      struct backend_info *be, int state);
387 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
388 void xen_blkbk_unmap_purged_grants(struct work_struct *work);
389 
390 static inline void blkif_get_x86_32_req(struct blkif_request *dst,
391 					struct blkif_x86_32_request *src)
392 {
393 	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
394 	dst->operation = src->operation;
395 	switch (src->operation) {
396 	case BLKIF_OP_READ:
397 	case BLKIF_OP_WRITE:
398 	case BLKIF_OP_WRITE_BARRIER:
399 	case BLKIF_OP_FLUSH_DISKCACHE:
400 		dst->u.rw.nr_segments = src->u.rw.nr_segments;
401 		dst->u.rw.handle = src->u.rw.handle;
402 		dst->u.rw.id = src->u.rw.id;
403 		dst->u.rw.sector_number = src->u.rw.sector_number;
404 		barrier();
405 		if (n > dst->u.rw.nr_segments)
406 			n = dst->u.rw.nr_segments;
407 		for (i = 0; i < n; i++)
408 			dst->u.rw.seg[i] = src->u.rw.seg[i];
409 		break;
410 	case BLKIF_OP_DISCARD:
411 		dst->u.discard.flag = src->u.discard.flag;
412 		dst->u.discard.id = src->u.discard.id;
413 		dst->u.discard.sector_number = src->u.discard.sector_number;
414 		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
415 		break;
416 	case BLKIF_OP_INDIRECT:
417 		dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
418 		dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
419 		dst->u.indirect.handle = src->u.indirect.handle;
420 		dst->u.indirect.id = src->u.indirect.id;
421 		dst->u.indirect.sector_number = src->u.indirect.sector_number;
422 		barrier();
423 		j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
424 		for (i = 0; i < j; i++)
425 			dst->u.indirect.indirect_grefs[i] =
426 				src->u.indirect.indirect_grefs[i];
427 		break;
428 	default:
429 		/*
430 		 * Don't know how to translate this op. Only get the
431 		 * ID so failure can be reported to the frontend.
432 		 */
433 		dst->u.other.id = src->u.other.id;
434 		break;
435 	}
436 }
437 
438 static inline void blkif_get_x86_64_req(struct blkif_request *dst,
439 					struct blkif_x86_64_request *src)
440 {
441 	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
442 	dst->operation = src->operation;
443 	switch (src->operation) {
444 	case BLKIF_OP_READ:
445 	case BLKIF_OP_WRITE:
446 	case BLKIF_OP_WRITE_BARRIER:
447 	case BLKIF_OP_FLUSH_DISKCACHE:
448 		dst->u.rw.nr_segments = src->u.rw.nr_segments;
449 		dst->u.rw.handle = src->u.rw.handle;
450 		dst->u.rw.id = src->u.rw.id;
451 		dst->u.rw.sector_number = src->u.rw.sector_number;
452 		barrier();
453 		if (n > dst->u.rw.nr_segments)
454 			n = dst->u.rw.nr_segments;
455 		for (i = 0; i < n; i++)
456 			dst->u.rw.seg[i] = src->u.rw.seg[i];
457 		break;
458 	case BLKIF_OP_DISCARD:
459 		dst->u.discard.flag = src->u.discard.flag;
460 		dst->u.discard.id = src->u.discard.id;
461 		dst->u.discard.sector_number = src->u.discard.sector_number;
462 		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
463 		break;
464 	case BLKIF_OP_INDIRECT:
465 		dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
466 		dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
467 		dst->u.indirect.handle = src->u.indirect.handle;
468 		dst->u.indirect.id = src->u.indirect.id;
469 		dst->u.indirect.sector_number = src->u.indirect.sector_number;
470 		barrier();
471 		j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
472 		for (i = 0; i < j; i++)
473 			dst->u.indirect.indirect_grefs[i] =
474 				src->u.indirect.indirect_grefs[i];
475 		break;
476 	default:
477 		/*
478 		 * Don't know how to translate this op. Only get the
479 		 * ID so failure can be reported to the frontend.
480 		 */
481 		dst->u.other.id = src->u.other.id;
482 		break;
483 	}
484 }
485 
486 #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
487