1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License version 2
4  * as published by the Free Software Foundation; or, when distributed
5  * separately from the Linux kernel or incorporated into other
6  * software packages, subject to the following license:
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this source file (the "Software"), to deal in the Software without
10  * restriction, including without limitation the rights to use, copy, modify,
11  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12  * and to permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24  * IN THE SOFTWARE.
25  */
26 
27 #ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28 #define __XEN_BLKIF__BACKEND__COMMON_H__
29 
30 #include <linux/module.h>
31 #include <linux/interrupt.h>
32 #include <linux/slab.h>
33 #include <linux/blkdev.h>
34 #include <linux/vmalloc.h>
35 #include <linux/wait.h>
36 #include <linux/io.h>
37 #include <linux/rbtree.h>
38 #include <asm/setup.h>
39 #include <asm/pgalloc.h>
40 #include <asm/hypervisor.h>
41 #include <xen/grant_table.h>
42 #include <xen/xenbus.h>
43 #include <xen/interface/io/ring.h>
44 #include <xen/interface/io/blkif.h>
45 #include <xen/interface/io/protocols.h>
46 
47 extern unsigned int xen_blkif_max_ring_order;
48 /*
49  * This is the maximum number of segments that would be allowed in indirect
50  * requests. This value will also be passed to the frontend.
51  */
52 #define MAX_INDIRECT_SEGMENTS 256
53 
54 #define SEGS_PER_INDIRECT_FRAME \
55 	(PAGE_SIZE/sizeof(struct blkif_request_segment))
56 #define MAX_INDIRECT_PAGES \
57 	((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
58 #define INDIRECT_PAGES(_segs) \
59 	((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
60 
61 /* Not a real protocol.  Used to generate ring structs which contain
62  * the elements common to all protocols only.  This way we get a
63  * compiler-checkable way to use common struct elements, so we can
64  * avoid using switch(protocol) in a number of places.  */
65 struct blkif_common_request {
66 	char dummy;
67 };
68 struct blkif_common_response {
69 	char dummy;
70 };
71 
72 struct blkif_x86_32_request_rw {
73 	uint8_t        nr_segments;  /* number of segments                   */
74 	blkif_vdev_t   handle;       /* only for read/write requests         */
75 	uint64_t       id;           /* private guest value, echoed in resp  */
76 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
77 	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
78 } __attribute__((__packed__));
79 
80 struct blkif_x86_32_request_discard {
81 	uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
82 	blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
83 	uint64_t       id;           /* private guest value, echoed in resp  */
84 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
85 	uint64_t       nr_sectors;
86 } __attribute__((__packed__));
87 
88 struct blkif_x86_32_request_other {
89 	uint8_t        _pad1;
90 	blkif_vdev_t   _pad2;
91 	uint64_t       id;           /* private guest value, echoed in resp  */
92 } __attribute__((__packed__));
93 
94 struct blkif_x86_32_request_indirect {
95 	uint8_t        indirect_op;
96 	uint16_t       nr_segments;
97 	uint64_t       id;
98 	blkif_sector_t sector_number;
99 	blkif_vdev_t   handle;
100 	uint16_t       _pad1;
101 	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
102 	/*
103 	 * The maximum number of indirect segments (and pages) that will
104 	 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
105 	 * is also exported to the guest (via xenstore
106 	 * feature-max-indirect-segments entry), so the frontend knows how
107 	 * many indirect segments the backend supports.
108 	 */
109 	uint64_t       _pad2;        /* make it 64 byte aligned */
110 } __attribute__((__packed__));
111 
112 struct blkif_x86_32_request {
113 	uint8_t        operation;    /* BLKIF_OP_???                         */
114 	union {
115 		struct blkif_x86_32_request_rw rw;
116 		struct blkif_x86_32_request_discard discard;
117 		struct blkif_x86_32_request_other other;
118 		struct blkif_x86_32_request_indirect indirect;
119 	} u;
120 } __attribute__((__packed__));
121 
122 /* i386 protocol version */
123 #pragma pack(push, 4)
124 struct blkif_x86_32_response {
125 	uint64_t        id;              /* copied from request */
126 	uint8_t         operation;       /* copied from request */
127 	int16_t         status;          /* BLKIF_RSP_???       */
128 };
129 #pragma pack(pop)
130 /* x86_64 protocol version */
131 
132 struct blkif_x86_64_request_rw {
133 	uint8_t        nr_segments;  /* number of segments                   */
134 	blkif_vdev_t   handle;       /* only for read/write requests         */
135 	uint32_t       _pad1;        /* offsetof(blkif_reqest..,u.rw.id)==8  */
136 	uint64_t       id;
137 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
138 	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
139 } __attribute__((__packed__));
140 
141 struct blkif_x86_64_request_discard {
142 	uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
143 	blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
144         uint32_t       _pad2;        /* offsetof(blkif_..,u.discard.id)==8   */
145 	uint64_t       id;
146 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
147 	uint64_t       nr_sectors;
148 } __attribute__((__packed__));
149 
150 struct blkif_x86_64_request_other {
151 	uint8_t        _pad1;
152 	blkif_vdev_t   _pad2;
153 	uint32_t       _pad3;        /* offsetof(blkif_..,u.discard.id)==8   */
154 	uint64_t       id;           /* private guest value, echoed in resp  */
155 } __attribute__((__packed__));
156 
157 struct blkif_x86_64_request_indirect {
158 	uint8_t        indirect_op;
159 	uint16_t       nr_segments;
160 	uint32_t       _pad1;        /* offsetof(blkif_..,u.indirect.id)==8   */
161 	uint64_t       id;
162 	blkif_sector_t sector_number;
163 	blkif_vdev_t   handle;
164 	uint16_t       _pad2;
165 	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
166 	/*
167 	 * The maximum number of indirect segments (and pages) that will
168 	 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
169 	 * is also exported to the guest (via xenstore
170 	 * feature-max-indirect-segments entry), so the frontend knows how
171 	 * many indirect segments the backend supports.
172 	 */
173 	uint32_t       _pad3;        /* make it 64 byte aligned */
174 } __attribute__((__packed__));
175 
176 struct blkif_x86_64_request {
177 	uint8_t        operation;    /* BLKIF_OP_???                         */
178 	union {
179 		struct blkif_x86_64_request_rw rw;
180 		struct blkif_x86_64_request_discard discard;
181 		struct blkif_x86_64_request_other other;
182 		struct blkif_x86_64_request_indirect indirect;
183 	} u;
184 } __attribute__((__packed__));
185 
186 struct blkif_x86_64_response {
187 	uint64_t       __attribute__((__aligned__(8))) id;
188 	uint8_t         operation;       /* copied from request */
189 	int16_t         status;          /* BLKIF_RSP_???       */
190 };
191 
192 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
193 		  struct blkif_common_response);
194 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
195 		  struct blkif_x86_32_response);
196 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
197 		  struct blkif_x86_64_response);
198 
199 union blkif_back_rings {
200 	struct blkif_back_ring        native;
201 	struct blkif_common_back_ring common;
202 	struct blkif_x86_32_back_ring x86_32;
203 	struct blkif_x86_64_back_ring x86_64;
204 };
205 
206 enum blkif_protocol {
207 	BLKIF_PROTOCOL_NATIVE = 1,
208 	BLKIF_PROTOCOL_X86_32 = 2,
209 	BLKIF_PROTOCOL_X86_64 = 3,
210 };
211 
212 /*
213  * Default protocol if the frontend doesn't specify one.
214  */
215 #ifdef CONFIG_X86
216 #  define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
217 #else
218 #  define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
219 #endif
220 
221 struct xen_vbd {
222 	/* What the domain refers to this vbd as. */
223 	blkif_vdev_t		handle;
224 	/* Non-zero -> read-only */
225 	unsigned char		readonly;
226 	/* VDISK_xxx */
227 	unsigned char		type;
228 	/* phys device that this vbd maps to. */
229 	u32			pdevice;
230 	struct block_device	*bdev;
231 	/* Cached size parameter. */
232 	sector_t		size;
233 	unsigned int		flush_support:1;
234 	unsigned int		discard_secure:1;
235 	unsigned int		feature_gnt_persistent:1;
236 	unsigned int		overflow_max_grants:1;
237 };
238 
239 struct backend_info;
240 
241 /* Number of available flags */
242 #define PERSISTENT_GNT_FLAGS_SIZE	2
243 /* This persistent grant is currently in use */
244 #define PERSISTENT_GNT_ACTIVE		0
245 /*
246  * This persistent grant has been used, this flag is set when we remove the
247  * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
248  */
249 #define PERSISTENT_GNT_WAS_ACTIVE	1
250 
251 /* Number of requests that we can fit in a ring */
252 #define XEN_BLKIF_REQS_PER_PAGE		32
253 
254 struct persistent_gnt {
255 	struct page *page;
256 	grant_ref_t gnt;
257 	grant_handle_t handle;
258 	DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
259 	struct rb_node node;
260 	struct list_head remove_node;
261 };
262 
263 struct xen_blkif {
264 	/* Unique identifier for this interface. */
265 	domid_t			domid;
266 	unsigned int		handle;
267 	/* Physical parameters of the comms window. */
268 	unsigned int		irq;
269 	/* Comms information. */
270 	enum blkif_protocol	blk_protocol;
271 	union blkif_back_rings	blk_rings;
272 	void			*blk_ring;
273 	/* The VBD attached to this interface. */
274 	struct xen_vbd		vbd;
275 	/* Back pointer to the backend_info. */
276 	struct backend_info	*be;
277 	/* Private fields. */
278 	spinlock_t		blk_ring_lock;
279 	atomic_t		refcnt;
280 
281 	wait_queue_head_t	wq;
282 	/* for barrier (drain) requests */
283 	struct completion	drain_complete;
284 	atomic_t		drain;
285 	atomic_t		inflight;
286 	/* One thread per one blkif. */
287 	struct task_struct	*xenblkd;
288 	unsigned int		waiting_reqs;
289 
290 	/* tree to store persistent grants */
291 	struct rb_root		persistent_gnts;
292 	unsigned int		persistent_gnt_c;
293 	atomic_t		persistent_gnt_in_use;
294 	unsigned long           next_lru;
295 
296 	/* used by the kworker that offload work from the persistent purge */
297 	struct list_head	persistent_purge_list;
298 	struct work_struct	persistent_purge_work;
299 
300 	/* buffer of free pages to map grant refs */
301 	spinlock_t		free_pages_lock;
302 	int			free_pages_num;
303 	struct list_head	free_pages;
304 
305 	/* List of all 'pending_req' available */
306 	struct list_head	pending_free;
307 	/* And its spinlock. */
308 	spinlock_t		pending_free_lock;
309 	wait_queue_head_t	pending_free_wq;
310 
311 	/* statistics */
312 	unsigned long		st_print;
313 	unsigned long long			st_rd_req;
314 	unsigned long long			st_wr_req;
315 	unsigned long long			st_oo_req;
316 	unsigned long long			st_f_req;
317 	unsigned long long			st_ds_req;
318 	unsigned long long			st_rd_sect;
319 	unsigned long long			st_wr_sect;
320 
321 	struct work_struct	free_work;
322 	/* Thread shutdown wait queue. */
323 	wait_queue_head_t	shutdown_wq;
324 	unsigned int nr_ring_pages;
325 };
326 
327 struct seg_buf {
328 	unsigned long offset;
329 	unsigned int nsec;
330 };
331 
332 struct grant_page {
333 	struct page 		*page;
334 	struct persistent_gnt	*persistent_gnt;
335 	grant_handle_t		handle;
336 	grant_ref_t		gref;
337 };
338 
339 /*
340  * Each outstanding request that we've passed to the lower device layers has a
341  * 'pending_req' allocated to it. Each buffer_head that completes decrements
342  * the pendcnt towards zero. When it hits zero, the specified domain has a
343  * response queued for it, with the saved 'id' passed back.
344  */
345 struct pending_req {
346 	struct xen_blkif	*blkif;
347 	u64			id;
348 	int			nr_segs;
349 	atomic_t		pendcnt;
350 	unsigned short		operation;
351 	int			status;
352 	struct list_head	free_list;
353 	struct grant_page	*segments[MAX_INDIRECT_SEGMENTS];
354 	/* Indirect descriptors */
355 	struct grant_page	*indirect_pages[MAX_INDIRECT_PAGES];
356 	struct seg_buf		seg[MAX_INDIRECT_SEGMENTS];
357 	struct bio		*biolist[MAX_INDIRECT_SEGMENTS];
358 	struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
359 	struct page                   *unmap_pages[MAX_INDIRECT_SEGMENTS];
360 	struct gntab_unmap_queue_data gnttab_unmap_data;
361 };
362 
363 
364 #define vbd_sz(_v)	((_v)->bdev->bd_part ? \
365 			 (_v)->bdev->bd_part->nr_sects : \
366 			  get_capacity((_v)->bdev->bd_disk))
367 
368 #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
369 #define xen_blkif_put(_b)				\
370 	do {						\
371 		if (atomic_dec_and_test(&(_b)->refcnt))	\
372 			schedule_work(&(_b)->free_work);\
373 	} while (0)
374 
375 struct phys_req {
376 	unsigned short		dev;
377 	blkif_sector_t		nr_sects;
378 	struct block_device	*bdev;
379 	blkif_sector_t		sector_number;
380 };
381 int xen_blkif_interface_init(void);
382 
383 int xen_blkif_xenbus_init(void);
384 
385 irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
386 int xen_blkif_schedule(void *arg);
387 int xen_blkif_purge_persistent(void *arg);
388 void xen_blkbk_free_caches(struct xen_blkif *blkif);
389 
390 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
391 			      struct backend_info *be, int state);
392 
393 int xen_blkbk_barrier(struct xenbus_transaction xbt,
394 		      struct backend_info *be, int state);
395 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
396 void xen_blkbk_unmap_purged_grants(struct work_struct *work);
397 
398 static inline void blkif_get_x86_32_req(struct blkif_request *dst,
399 					struct blkif_x86_32_request *src)
400 {
401 	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
402 	dst->operation = src->operation;
403 	switch (src->operation) {
404 	case BLKIF_OP_READ:
405 	case BLKIF_OP_WRITE:
406 	case BLKIF_OP_WRITE_BARRIER:
407 	case BLKIF_OP_FLUSH_DISKCACHE:
408 		dst->u.rw.nr_segments = src->u.rw.nr_segments;
409 		dst->u.rw.handle = src->u.rw.handle;
410 		dst->u.rw.id = src->u.rw.id;
411 		dst->u.rw.sector_number = src->u.rw.sector_number;
412 		barrier();
413 		if (n > dst->u.rw.nr_segments)
414 			n = dst->u.rw.nr_segments;
415 		for (i = 0; i < n; i++)
416 			dst->u.rw.seg[i] = src->u.rw.seg[i];
417 		break;
418 	case BLKIF_OP_DISCARD:
419 		dst->u.discard.flag = src->u.discard.flag;
420 		dst->u.discard.id = src->u.discard.id;
421 		dst->u.discard.sector_number = src->u.discard.sector_number;
422 		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
423 		break;
424 	case BLKIF_OP_INDIRECT:
425 		dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
426 		dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
427 		dst->u.indirect.handle = src->u.indirect.handle;
428 		dst->u.indirect.id = src->u.indirect.id;
429 		dst->u.indirect.sector_number = src->u.indirect.sector_number;
430 		barrier();
431 		j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
432 		for (i = 0; i < j; i++)
433 			dst->u.indirect.indirect_grefs[i] =
434 				src->u.indirect.indirect_grefs[i];
435 		break;
436 	default:
437 		/*
438 		 * Don't know how to translate this op. Only get the
439 		 * ID so failure can be reported to the frontend.
440 		 */
441 		dst->u.other.id = src->u.other.id;
442 		break;
443 	}
444 }
445 
446 static inline void blkif_get_x86_64_req(struct blkif_request *dst,
447 					struct blkif_x86_64_request *src)
448 {
449 	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
450 	dst->operation = src->operation;
451 	switch (src->operation) {
452 	case BLKIF_OP_READ:
453 	case BLKIF_OP_WRITE:
454 	case BLKIF_OP_WRITE_BARRIER:
455 	case BLKIF_OP_FLUSH_DISKCACHE:
456 		dst->u.rw.nr_segments = src->u.rw.nr_segments;
457 		dst->u.rw.handle = src->u.rw.handle;
458 		dst->u.rw.id = src->u.rw.id;
459 		dst->u.rw.sector_number = src->u.rw.sector_number;
460 		barrier();
461 		if (n > dst->u.rw.nr_segments)
462 			n = dst->u.rw.nr_segments;
463 		for (i = 0; i < n; i++)
464 			dst->u.rw.seg[i] = src->u.rw.seg[i];
465 		break;
466 	case BLKIF_OP_DISCARD:
467 		dst->u.discard.flag = src->u.discard.flag;
468 		dst->u.discard.id = src->u.discard.id;
469 		dst->u.discard.sector_number = src->u.discard.sector_number;
470 		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
471 		break;
472 	case BLKIF_OP_INDIRECT:
473 		dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
474 		dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
475 		dst->u.indirect.handle = src->u.indirect.handle;
476 		dst->u.indirect.id = src->u.indirect.id;
477 		dst->u.indirect.sector_number = src->u.indirect.sector_number;
478 		barrier();
479 		j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
480 		for (i = 0; i < j; i++)
481 			dst->u.indirect.indirect_grefs[i] =
482 				src->u.indirect.indirect_grefs[i];
483 		break;
484 	default:
485 		/*
486 		 * Don't know how to translate this op. Only get the
487 		 * ID so failure can be reported to the frontend.
488 		 */
489 		dst->u.other.id = src->u.other.id;
490 		break;
491 	}
492 }
493 
494 #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
495