xref: /openbmc/linux/drivers/block/xen-blkback/common.h (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1dfc07b13SKonrad Rzeszutek Wilk /*
2dfc07b13SKonrad Rzeszutek Wilk  * This program is free software; you can redistribute it and/or
3dfc07b13SKonrad Rzeszutek Wilk  * modify it under the terms of the GNU General Public License version 2
4dfc07b13SKonrad Rzeszutek Wilk  * as published by the Free Software Foundation; or, when distributed
5dfc07b13SKonrad Rzeszutek Wilk  * separately from the Linux kernel or incorporated into other
6dfc07b13SKonrad Rzeszutek Wilk  * software packages, subject to the following license:
7dfc07b13SKonrad Rzeszutek Wilk  *
8dfc07b13SKonrad Rzeszutek Wilk  * Permission is hereby granted, free of charge, to any person obtaining a copy
9dfc07b13SKonrad Rzeszutek Wilk  * of this source file (the "Software"), to deal in the Software without
10dfc07b13SKonrad Rzeszutek Wilk  * restriction, including without limitation the rights to use, copy, modify,
11dfc07b13SKonrad Rzeszutek Wilk  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12dfc07b13SKonrad Rzeszutek Wilk  * and to permit persons to whom the Software is furnished to do so, subject to
13dfc07b13SKonrad Rzeszutek Wilk  * the following conditions:
14dfc07b13SKonrad Rzeszutek Wilk  *
15dfc07b13SKonrad Rzeszutek Wilk  * The above copyright notice and this permission notice shall be included in
16dfc07b13SKonrad Rzeszutek Wilk  * all copies or substantial portions of the Software.
17dfc07b13SKonrad Rzeszutek Wilk  *
18dfc07b13SKonrad Rzeszutek Wilk  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19dfc07b13SKonrad Rzeszutek Wilk  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20dfc07b13SKonrad Rzeszutek Wilk  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21dfc07b13SKonrad Rzeszutek Wilk  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22dfc07b13SKonrad Rzeszutek Wilk  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23dfc07b13SKonrad Rzeszutek Wilk  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24dfc07b13SKonrad Rzeszutek Wilk  * IN THE SOFTWARE.
25dfc07b13SKonrad Rzeszutek Wilk  */
26dfc07b13SKonrad Rzeszutek Wilk 
275a577e38SKonrad Rzeszutek Wilk #ifndef __XEN_BLKIF__BACKEND__COMMON_H__
285a577e38SKonrad Rzeszutek Wilk #define __XEN_BLKIF__BACKEND__COMMON_H__
29dfc07b13SKonrad Rzeszutek Wilk 
30dfc07b13SKonrad Rzeszutek Wilk #include <linux/module.h>
31dfc07b13SKonrad Rzeszutek Wilk #include <linux/interrupt.h>
32dfc07b13SKonrad Rzeszutek Wilk #include <linux/slab.h>
33dfc07b13SKonrad Rzeszutek Wilk #include <linux/blkdev.h>
34dfc07b13SKonrad Rzeszutek Wilk #include <linux/vmalloc.h>
35dfc07b13SKonrad Rzeszutek Wilk #include <linux/wait.h>
36dfc07b13SKonrad Rzeszutek Wilk #include <linux/io.h>
370a8704a5SRoger Pau Monne #include <linux/rbtree.h>
38dfc07b13SKonrad Rzeszutek Wilk #include <asm/setup.h>
39dfc07b13SKonrad Rzeszutek Wilk #include <asm/hypervisor.h>
40dfc07b13SKonrad Rzeszutek Wilk #include <xen/grant_table.h>
4167de5dfbSJulien Grall #include <xen/page.h>
42dfc07b13SKonrad Rzeszutek Wilk #include <xen/xenbus.h>
43452a6b2bSKonrad Rzeszutek Wilk #include <xen/interface/io/ring.h>
44452a6b2bSKonrad Rzeszutek Wilk #include <xen/interface/io/blkif.h>
45452a6b2bSKonrad Rzeszutek Wilk #include <xen/interface/io/protocols.h>
46dfc07b13SKonrad Rzeszutek Wilk 
4786839c56SBob Liu extern unsigned int xen_blkif_max_ring_order;
48d62d8600SBob Liu extern unsigned int xenblk_max_queues;
49402b27f9SRoger Pau Monne /*
50402b27f9SRoger Pau Monne  * This is the maximum number of segments that would be allowed in indirect
51402b27f9SRoger Pau Monne  * requests. This value will also be passed to the frontend.
52402b27f9SRoger Pau Monne  */
53402b27f9SRoger Pau Monne #define MAX_INDIRECT_SEGMENTS 256
54402b27f9SRoger Pau Monne 
5567de5dfbSJulien Grall /*
5667de5dfbSJulien Grall  * Xen use 4K pages. The guest may use different page size (4K or 64K)
5767de5dfbSJulien Grall  * Number of Xen pages per segment
5867de5dfbSJulien Grall  */
5967de5dfbSJulien Grall #define XEN_PAGES_PER_SEGMENT   (PAGE_SIZE / XEN_PAGE_SIZE)
6067de5dfbSJulien Grall 
6167de5dfbSJulien Grall #define XEN_PAGES_PER_INDIRECT_FRAME \
6267de5dfbSJulien Grall 	(XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
63402b27f9SRoger Pau Monne #define SEGS_PER_INDIRECT_FRAME	\
6467de5dfbSJulien Grall 	(XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT)
6567de5dfbSJulien Grall 
66402b27f9SRoger Pau Monne #define MAX_INDIRECT_PAGES \
67402b27f9SRoger Pau Monne 	((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
6867de5dfbSJulien Grall #define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME)
69402b27f9SRoger Pau Monne 
70452a6b2bSKonrad Rzeszutek Wilk /* Not a real protocol.  Used to generate ring structs which contain
71452a6b2bSKonrad Rzeszutek Wilk  * the elements common to all protocols only.  This way we get a
72452a6b2bSKonrad Rzeszutek Wilk  * compiler-checkable way to use common struct elements, so we can
73452a6b2bSKonrad Rzeszutek Wilk  * avoid using switch(protocol) in a number of places.  */
74452a6b2bSKonrad Rzeszutek Wilk struct blkif_common_request {
75452a6b2bSKonrad Rzeszutek Wilk 	char dummy;
76452a6b2bSKonrad Rzeszutek Wilk };
77089bc014SJan Beulich 
78089bc014SJan Beulich /* i386 protocol version */
79452a6b2bSKonrad Rzeszutek Wilk 
80b3cb0d6aSLi Dongyang struct blkif_x86_32_request_rw {
81452a6b2bSKonrad Rzeszutek Wilk 	uint8_t        nr_segments;  /* number of segments                   */
82452a6b2bSKonrad Rzeszutek Wilk 	blkif_vdev_t   handle;       /* only for read/write requests         */
83452a6b2bSKonrad Rzeszutek Wilk 	uint64_t       id;           /* private guest value, echoed in resp  */
8497e36834SKonrad Rzeszutek Wilk 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
8597e36834SKonrad Rzeszutek Wilk 	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
8697e36834SKonrad Rzeszutek Wilk } __attribute__((__packed__));
8797e36834SKonrad Rzeszutek Wilk 
8897e36834SKonrad Rzeszutek Wilk struct blkif_x86_32_request_discard {
895ea42986SKonrad Rzeszutek Wilk 	uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
9097e36834SKonrad Rzeszutek Wilk 	blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
9197e36834SKonrad Rzeszutek Wilk 	uint64_t       id;           /* private guest value, echoed in resp  */
9297e36834SKonrad Rzeszutek Wilk 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
9397e36834SKonrad Rzeszutek Wilk 	uint64_t       nr_sectors;
9497e36834SKonrad Rzeszutek Wilk } __attribute__((__packed__));
9597e36834SKonrad Rzeszutek Wilk 
960e367ae4SDavid Vrabel struct blkif_x86_32_request_other {
970e367ae4SDavid Vrabel 	uint8_t        _pad1;
980e367ae4SDavid Vrabel 	blkif_vdev_t   _pad2;
990e367ae4SDavid Vrabel 	uint64_t       id;           /* private guest value, echoed in resp  */
1000e367ae4SDavid Vrabel } __attribute__((__packed__));
1010e367ae4SDavid Vrabel 
102402b27f9SRoger Pau Monne struct blkif_x86_32_request_indirect {
103402b27f9SRoger Pau Monne 	uint8_t        indirect_op;
104402b27f9SRoger Pau Monne 	uint16_t       nr_segments;
105402b27f9SRoger Pau Monne 	uint64_t       id;
106402b27f9SRoger Pau Monne 	blkif_sector_t sector_number;
107402b27f9SRoger Pau Monne 	blkif_vdev_t   handle;
108402b27f9SRoger Pau Monne 	uint16_t       _pad1;
109402b27f9SRoger Pau Monne 	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
110402b27f9SRoger Pau Monne 	/*
111402b27f9SRoger Pau Monne 	 * The maximum number of indirect segments (and pages) that will
112402b27f9SRoger Pau Monne 	 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
113402b27f9SRoger Pau Monne 	 * is also exported to the guest (via xenstore
114402b27f9SRoger Pau Monne 	 * feature-max-indirect-segments entry), so the frontend knows how
115402b27f9SRoger Pau Monne 	 * many indirect segments the backend supports.
116402b27f9SRoger Pau Monne 	 */
117402b27f9SRoger Pau Monne 	uint64_t       _pad2;        /* make it 64 byte aligned */
118402b27f9SRoger Pau Monne } __attribute__((__packed__));
119402b27f9SRoger Pau Monne 
12097e36834SKonrad Rzeszutek Wilk struct blkif_x86_32_request {
12197e36834SKonrad Rzeszutek Wilk 	uint8_t        operation;    /* BLKIF_OP_???                         */
122b3cb0d6aSLi Dongyang 	union {
123b3cb0d6aSLi Dongyang 		struct blkif_x86_32_request_rw rw;
124b3cb0d6aSLi Dongyang 		struct blkif_x86_32_request_discard discard;
1250e367ae4SDavid Vrabel 		struct blkif_x86_32_request_other other;
126402b27f9SRoger Pau Monne 		struct blkif_x86_32_request_indirect indirect;
127b3cb0d6aSLi Dongyang 	} u;
12897e36834SKonrad Rzeszutek Wilk } __attribute__((__packed__));
12997e36834SKonrad Rzeszutek Wilk 
130452a6b2bSKonrad Rzeszutek Wilk /* x86_64 protocol version */
131b3cb0d6aSLi Dongyang 
132b3cb0d6aSLi Dongyang struct blkif_x86_64_request_rw {
13397e36834SKonrad Rzeszutek Wilk 	uint8_t        nr_segments;  /* number of segments                   */
13497e36834SKonrad Rzeszutek Wilk 	blkif_vdev_t   handle;       /* only for read/write requests         */
13597e36834SKonrad Rzeszutek Wilk 	uint32_t       _pad1;        /* offsetof(blkif_reqest..,u.rw.id)==8  */
13697e36834SKonrad Rzeszutek Wilk 	uint64_t       id;
137b3cb0d6aSLi Dongyang 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
138b3cb0d6aSLi Dongyang 	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
13997e36834SKonrad Rzeszutek Wilk } __attribute__((__packed__));
140b3cb0d6aSLi Dongyang 
141b3cb0d6aSLi Dongyang struct blkif_x86_64_request_discard {
1425ea42986SKonrad Rzeszutek Wilk 	uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
14397e36834SKonrad Rzeszutek Wilk 	blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
14497e36834SKonrad Rzeszutek Wilk         uint32_t       _pad2;        /* offsetof(blkif_..,u.discard.id)==8   */
14597e36834SKonrad Rzeszutek Wilk 	uint64_t       id;
146b3cb0d6aSLi Dongyang 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
147b3cb0d6aSLi Dongyang 	uint64_t       nr_sectors;
14897e36834SKonrad Rzeszutek Wilk } __attribute__((__packed__));
149b3cb0d6aSLi Dongyang 
1500e367ae4SDavid Vrabel struct blkif_x86_64_request_other {
1510e367ae4SDavid Vrabel 	uint8_t        _pad1;
1520e367ae4SDavid Vrabel 	blkif_vdev_t   _pad2;
1530e367ae4SDavid Vrabel 	uint32_t       _pad3;        /* offsetof(blkif_..,u.discard.id)==8   */
1540e367ae4SDavid Vrabel 	uint64_t       id;           /* private guest value, echoed in resp  */
1550e367ae4SDavid Vrabel } __attribute__((__packed__));
1560e367ae4SDavid Vrabel 
157402b27f9SRoger Pau Monne struct blkif_x86_64_request_indirect {
158402b27f9SRoger Pau Monne 	uint8_t        indirect_op;
159402b27f9SRoger Pau Monne 	uint16_t       nr_segments;
160402b27f9SRoger Pau Monne 	uint32_t       _pad1;        /* offsetof(blkif_..,u.indirect.id)==8   */
161402b27f9SRoger Pau Monne 	uint64_t       id;
162402b27f9SRoger Pau Monne 	blkif_sector_t sector_number;
163402b27f9SRoger Pau Monne 	blkif_vdev_t   handle;
164402b27f9SRoger Pau Monne 	uint16_t       _pad2;
165402b27f9SRoger Pau Monne 	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
166402b27f9SRoger Pau Monne 	/*
167402b27f9SRoger Pau Monne 	 * The maximum number of indirect segments (and pages) that will
168402b27f9SRoger Pau Monne 	 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
169402b27f9SRoger Pau Monne 	 * is also exported to the guest (via xenstore
170402b27f9SRoger Pau Monne 	 * feature-max-indirect-segments entry), so the frontend knows how
171402b27f9SRoger Pau Monne 	 * many indirect segments the backend supports.
172402b27f9SRoger Pau Monne 	 */
173402b27f9SRoger Pau Monne 	uint32_t       _pad3;        /* make it 64 byte aligned */
174402b27f9SRoger Pau Monne } __attribute__((__packed__));
175402b27f9SRoger Pau Monne 
176452a6b2bSKonrad Rzeszutek Wilk struct blkif_x86_64_request {
177452a6b2bSKonrad Rzeszutek Wilk 	uint8_t        operation;    /* BLKIF_OP_???                         */
178b3cb0d6aSLi Dongyang 	union {
179b3cb0d6aSLi Dongyang 		struct blkif_x86_64_request_rw rw;
180b3cb0d6aSLi Dongyang 		struct blkif_x86_64_request_discard discard;
1810e367ae4SDavid Vrabel 		struct blkif_x86_64_request_other other;
182402b27f9SRoger Pau Monne 		struct blkif_x86_64_request_indirect indirect;
183b3cb0d6aSLi Dongyang 	} u;
18497e36834SKonrad Rzeszutek Wilk } __attribute__((__packed__));
18597e36834SKonrad Rzeszutek Wilk 
186452a6b2bSKonrad Rzeszutek Wilk DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
187089bc014SJan Beulich 		  struct blkif_response);
188452a6b2bSKonrad Rzeszutek Wilk DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
189089bc014SJan Beulich 		  struct blkif_response __packed);
190452a6b2bSKonrad Rzeszutek Wilk DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
191089bc014SJan Beulich 		  struct blkif_response);
192452a6b2bSKonrad Rzeszutek Wilk 
193452a6b2bSKonrad Rzeszutek Wilk union blkif_back_rings {
194452a6b2bSKonrad Rzeszutek Wilk 	struct blkif_back_ring        native;
195452a6b2bSKonrad Rzeszutek Wilk 	struct blkif_common_back_ring common;
196452a6b2bSKonrad Rzeszutek Wilk 	struct blkif_x86_32_back_ring x86_32;
197452a6b2bSKonrad Rzeszutek Wilk 	struct blkif_x86_64_back_ring x86_64;
198452a6b2bSKonrad Rzeszutek Wilk };
199452a6b2bSKonrad Rzeszutek Wilk 
200452a6b2bSKonrad Rzeszutek Wilk enum blkif_protocol {
201452a6b2bSKonrad Rzeszutek Wilk 	BLKIF_PROTOCOL_NATIVE = 1,
202452a6b2bSKonrad Rzeszutek Wilk 	BLKIF_PROTOCOL_X86_32 = 2,
203452a6b2bSKonrad Rzeszutek Wilk 	BLKIF_PROTOCOL_X86_64 = 3,
204452a6b2bSKonrad Rzeszutek Wilk };
205452a6b2bSKonrad Rzeszutek Wilk 
206b042a3caSDavid Vrabel /*
207b042a3caSDavid Vrabel  * Default protocol if the frontend doesn't specify one.
208b042a3caSDavid Vrabel  */
209b042a3caSDavid Vrabel #ifdef CONFIG_X86
210b042a3caSDavid Vrabel #  define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
211b042a3caSDavid Vrabel #else
212b042a3caSDavid Vrabel #  define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
213b042a3caSDavid Vrabel #endif
214b042a3caSDavid Vrabel 
2153d814731SKonrad Rzeszutek Wilk struct xen_vbd {
21601f37f2dSKonrad Rzeszutek Wilk 	/* What the domain refers to this vbd as. */
21701f37f2dSKonrad Rzeszutek Wilk 	blkif_vdev_t		handle;
21801f37f2dSKonrad Rzeszutek Wilk 	/* Non-zero -> read-only */
21901f37f2dSKonrad Rzeszutek Wilk 	unsigned char		readonly;
22001f37f2dSKonrad Rzeszutek Wilk 	/* VDISK_xxx */
22101f37f2dSKonrad Rzeszutek Wilk 	unsigned char		type;
22201f37f2dSKonrad Rzeszutek Wilk 	/* phys device that this vbd maps to. */
22301f37f2dSKonrad Rzeszutek Wilk 	u32			pdevice;
224dfc07b13SKonrad Rzeszutek Wilk 	struct block_device	*bdev;
22501f37f2dSKonrad Rzeszutek Wilk 	/* Cached size parameter. */
22601f37f2dSKonrad Rzeszutek Wilk 	sector_t		size;
2271f999572SOliver Chick 	unsigned int		flush_support:1;
2281f999572SOliver Chick 	unsigned int		discard_secure:1;
229*06ba5d2eSSeongJae Park 	/* Connect-time cached feature_persistent parameter value */
230*06ba5d2eSSeongJae Park 	unsigned int		feature_gnt_persistent_parm:1;
231*06ba5d2eSSeongJae Park 	/* Persistent grants feature negotiation result */
2320a8704a5SRoger Pau Monne 	unsigned int		feature_gnt_persistent:1;
2330a8704a5SRoger Pau Monne 	unsigned int		overflow_max_grants:1;
234dfc07b13SKonrad Rzeszutek Wilk };
235dfc07b13SKonrad Rzeszutek Wilk 
236dfc07b13SKonrad Rzeszutek Wilk struct backend_info;
237dfc07b13SKonrad Rzeszutek Wilk 
238bf0720c4SRoger Pau Monne /* Number of requests that we can fit in a ring */
23969b91edeSBob Liu #define XEN_BLKIF_REQS_PER_PAGE		32
240bf0720c4SRoger Pau Monne 
2410a8704a5SRoger Pau Monne struct persistent_gnt {
2420a8704a5SRoger Pau Monne 	struct page *page;
2430a8704a5SRoger Pau Monne 	grant_ref_t gnt;
2440a8704a5SRoger Pau Monne 	grant_handle_t handle;
245973e5405SJuergen Gross 	unsigned long last_used;
246d77ff24eSJuergen Gross 	bool active;
2470a8704a5SRoger Pau Monne 	struct rb_node node;
2483f3aad5eSRoger Pau Monne 	struct list_head remove_node;
2490a8704a5SRoger Pau Monne };
2500a8704a5SRoger Pau Monne 
25159795700SBob Liu /* Per-ring information. */
25259795700SBob Liu struct xen_blkif_ring {
253dfc07b13SKonrad Rzeszutek Wilk 	/* Physical parameters of the comms window. */
254dfc07b13SKonrad Rzeszutek Wilk 	unsigned int		irq;
255dfc07b13SKonrad Rzeszutek Wilk 	union blkif_back_rings	blk_rings;
2562d073846SDavid Vrabel 	void			*blk_ring;
257dfc07b13SKonrad Rzeszutek Wilk 	/* Private fields. */
258dfc07b13SKonrad Rzeszutek Wilk 	spinlock_t		blk_ring_lock;
259dfc07b13SKonrad Rzeszutek Wilk 
260dfc07b13SKonrad Rzeszutek Wilk 	wait_queue_head_t	wq;
261c05f3e3cSRoger Pau Monne 	atomic_t		inflight;
26246464411SJuergen Gross 	bool			active;
26359795700SBob Liu 	/* One thread per blkif ring. */
264dfc07b13SKonrad Rzeszutek Wilk 	struct task_struct	*xenblkd;
265dfc07b13SKonrad Rzeszutek Wilk 	unsigned int		waiting_reqs;
266dfc07b13SKonrad Rzeszutek Wilk 
267bf0720c4SRoger Pau Monne 	/* List of all 'pending_req' available */
268bf0720c4SRoger Pau Monne 	struct list_head	pending_free;
269bf0720c4SRoger Pau Monne 	/* And its spinlock. */
270bf0720c4SRoger Pau Monne 	spinlock_t		pending_free_lock;
271bf0720c4SRoger Pau Monne 	wait_queue_head_t	pending_free_wq;
272bf0720c4SRoger Pau Monne 
273d4bf0065SBob Liu 	/* Tree to store persistent grants. */
274d4bf0065SBob Liu 	struct rb_root		persistent_gnts;
275d4bf0065SBob Liu 	unsigned int		persistent_gnt_c;
276d4bf0065SBob Liu 	atomic_t		persistent_gnt_in_use;
277d4bf0065SBob Liu 	unsigned long           next_lru;
278d4bf0065SBob Liu 
279db6fbc10SBob Liu 	/* Statistics. */
280dfc07b13SKonrad Rzeszutek Wilk 	unsigned long		st_print;
281986cacbdSZoltan Kiss 	unsigned long long	st_rd_req;
282986cacbdSZoltan Kiss 	unsigned long long	st_wr_req;
283986cacbdSZoltan Kiss 	unsigned long long	st_oo_req;
284986cacbdSZoltan Kiss 	unsigned long long	st_f_req;
285986cacbdSZoltan Kiss 	unsigned long long	st_ds_req;
286986cacbdSZoltan Kiss 	unsigned long long	st_rd_sect;
287986cacbdSZoltan Kiss 	unsigned long long	st_wr_sect;
288dfc07b13SKonrad Rzeszutek Wilk 
289d4bf0065SBob Liu 	/* Used by the kworker that offload work from the persistent purge. */
290d4bf0065SBob Liu 	struct list_head	persistent_purge_list;
291d4bf0065SBob Liu 	struct work_struct	persistent_purge_work;
292d4bf0065SBob Liu 
293d4bf0065SBob Liu 	/* Buffer of free pages to map grant refs. */
294ca33479cSJuergen Gross 	struct gnttab_page_cache free_pages;
295d4bf0065SBob Liu 
296814d04e7SValentin Priescu 	struct work_struct	free_work;
2978e3f8755SKonrad Rzeszutek Wilk 	/* Thread shutdown wait queue. */
2988e3f8755SKonrad Rzeszutek Wilk 	wait_queue_head_t	shutdown_wq;
29959795700SBob Liu 	struct xen_blkif	*blkif;
30059795700SBob Liu };
30159795700SBob Liu 
302dfc07b13SKonrad Rzeszutek Wilk struct xen_blkif {
303dfc07b13SKonrad Rzeszutek Wilk 	/* Unique identifier for this interface. */
304dfc07b13SKonrad Rzeszutek Wilk 	domid_t			domid;
305dfc07b13SKonrad Rzeszutek Wilk 	unsigned int		handle;
306dfc07b13SKonrad Rzeszutek Wilk 	/* Comms information. */
307dfc07b13SKonrad Rzeszutek Wilk 	enum blkif_protocol	blk_protocol;
308dfc07b13SKonrad Rzeszutek Wilk 	/* The VBD attached to this interface. */
309dfc07b13SKonrad Rzeszutek Wilk 	struct xen_vbd		vbd;
310dfc07b13SKonrad Rzeszutek Wilk 	/* Back pointer to the backend_info. */
311dfc07b13SKonrad Rzeszutek Wilk 	struct backend_info	*be;
312dfc07b13SKonrad Rzeszutek Wilk 	atomic_t		refcnt;
313dfc07b13SKonrad Rzeszutek Wilk 	/* for barrier (drain) requests */
314dfc07b13SKonrad Rzeszutek Wilk 	struct completion	drain_complete;
315dfc07b13SKonrad Rzeszutek Wilk 	atomic_t		drain;
316dfc07b13SKonrad Rzeszutek Wilk 
317dfc07b13SKonrad Rzeszutek Wilk 	struct work_struct	free_work;
31886839c56SBob Liu 	unsigned int		nr_ring_pages;
319d75e7f63SPaul Durrant 	bool			multi_ref;
32059795700SBob Liu 	/* All rings for this device. */
3212fb1ef4fSKonrad Rzeszutek Wilk 	struct xen_blkif_ring	*rings;
3222fb1ef4fSKonrad Rzeszutek Wilk 	unsigned int		nr_rings;
323cb9369bdSSeongJae Park 	unsigned long		buffer_squeeze_end;
324dfc07b13SKonrad Rzeszutek Wilk };
325dfc07b13SKonrad Rzeszutek Wilk 
326402b27f9SRoger Pau Monne struct seg_buf {
327402b27f9SRoger Pau Monne 	unsigned long offset;
328402b27f9SRoger Pau Monne 	unsigned int nsec;
329402b27f9SRoger Pau Monne };
330402b27f9SRoger Pau Monne 
331bb642e83SRoger Pau Monne struct grant_page {
332bb642e83SRoger Pau Monne 	struct page		*page;
333bb642e83SRoger Pau Monne 	struct persistent_gnt	*persistent_gnt;
334bb642e83SRoger Pau Monne 	grant_handle_t		handle;
335bb642e83SRoger Pau Monne 	grant_ref_t		gref;
336bb642e83SRoger Pau Monne };
337bb642e83SRoger Pau Monne 
338bf0720c4SRoger Pau Monne /*
339bf0720c4SRoger Pau Monne  * Each outstanding request that we've passed to the lower device layers has a
340bf0720c4SRoger Pau Monne  * 'pending_req' allocated to it. Each buffer_head that completes decrements
341bf0720c4SRoger Pau Monne  * the pendcnt towards zero. When it hits zero, the specified domain has a
342bf0720c4SRoger Pau Monne  * response queued for it, with the saved 'id' passed back.
343bf0720c4SRoger Pau Monne  */
344bf0720c4SRoger Pau Monne struct pending_req {
34559795700SBob Liu 	struct xen_blkif_ring   *ring;
346bf0720c4SRoger Pau Monne 	u64			id;
3476684fa1cSJulien Grall 	int			nr_segs;
348bf0720c4SRoger Pau Monne 	atomic_t		pendcnt;
349bf0720c4SRoger Pau Monne 	unsigned short		operation;
350bf0720c4SRoger Pau Monne 	int			status;
351bf0720c4SRoger Pau Monne 	struct list_head	free_list;
352bb642e83SRoger Pau Monne 	struct grant_page	*segments[MAX_INDIRECT_SEGMENTS];
353402b27f9SRoger Pau Monne 	/* Indirect descriptors */
354bb642e83SRoger Pau Monne 	struct grant_page	*indirect_pages[MAX_INDIRECT_PAGES];
355402b27f9SRoger Pau Monne 	struct seg_buf		seg[MAX_INDIRECT_SEGMENTS];
356402b27f9SRoger Pau Monne 	struct bio		*biolist[MAX_INDIRECT_SEGMENTS];
357c43cf3eaSJennifer Herbert 	struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
358c43cf3eaSJennifer Herbert 	struct page                   *unmap_pages[MAX_INDIRECT_SEGMENTS];
359c43cf3eaSJennifer Herbert 	struct gntab_unmap_queue_data gnttab_unmap_data;
360bf0720c4SRoger Pau Monne };
361bf0720c4SRoger Pau Monne 
36242c7841dSKonrad Rzeszutek Wilk 
363a782483cSChristoph Hellwig #define vbd_sz(_v)	bdev_nr_sectors((_v)->bdev)
364dfc07b13SKonrad Rzeszutek Wilk 
3658b6bf747SKonrad Rzeszutek Wilk #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
3668b6bf747SKonrad Rzeszutek Wilk #define xen_blkif_put(_b)				\
367dfc07b13SKonrad Rzeszutek Wilk 	do {						\
368dfc07b13SKonrad Rzeszutek Wilk 		if (atomic_dec_and_test(&(_b)->refcnt))	\
369814d04e7SValentin Priescu 			schedule_work(&(_b)->free_work);\
370dfc07b13SKonrad Rzeszutek Wilk 	} while (0)
371dfc07b13SKonrad Rzeszutek Wilk 
372dfc07b13SKonrad Rzeszutek Wilk struct phys_req {
373dfc07b13SKonrad Rzeszutek Wilk 	unsigned short		dev;
374b3cb0d6aSLi Dongyang 	blkif_sector_t		nr_sects;
375dfc07b13SKonrad Rzeszutek Wilk 	struct block_device	*bdev;
376dfc07b13SKonrad Rzeszutek Wilk 	blkif_sector_t		sector_number;
377dfc07b13SKonrad Rzeszutek Wilk };
37814855954SPaul Durrant 
3798b6bf747SKonrad Rzeszutek Wilk int xen_blkif_interface_init(void);
38014855954SPaul Durrant void xen_blkif_interface_fini(void);
381dfc07b13SKonrad Rzeszutek Wilk 
3828b6bf747SKonrad Rzeszutek Wilk int xen_blkif_xenbus_init(void);
38314855954SPaul Durrant void xen_blkif_xenbus_fini(void);
384dfc07b13SKonrad Rzeszutek Wilk 
3858b6bf747SKonrad Rzeszutek Wilk irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
3868b6bf747SKonrad Rzeszutek Wilk int xen_blkif_schedule(void *arg);
38759795700SBob Liu void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
388dfc07b13SKonrad Rzeszutek Wilk 
38924f567f9SKonrad Rzeszutek Wilk int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
390dfc07b13SKonrad Rzeszutek Wilk 			      struct backend_info *be, int state);
391dfc07b13SKonrad Rzeszutek Wilk 
39229bde093SKonrad Rzeszutek Wilk int xen_blkbk_barrier(struct xenbus_transaction xbt,
39329bde093SKonrad Rzeszutek Wilk 		      struct backend_info *be, int state);
3948b6bf747SKonrad Rzeszutek Wilk struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
395abb97b8cSRoger Pau Monne void xen_blkbk_unmap_purged_grants(struct work_struct *work);
396dfc07b13SKonrad Rzeszutek Wilk 
3975a577e38SKonrad Rzeszutek Wilk #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
398