xref: /openbmc/linux/drivers/scsi/sd_zbc.c (revision 5f832a395859de7191e638e3777ae57f37d46d08)
1 /*
2  * SCSI Zoned Block commands
3  *
4  * Copyright (C) 2014-2015 SUSE Linux GmbH
5  * Written by: Hannes Reinecke <hare@suse.de>
6  * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
7  * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version
11  * 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; see the file COPYING.  If not, write to
20  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
21  * USA.
22  *
23  */
24 
25 #include <linux/blkdev.h>
26 
27 #include <asm/unaligned.h>
28 
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
31 
32 #include "sd.h"
33 
34 /**
35  * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
36  * @sdkp: The disk the report originated from
37  * @buf: Address of the report zone descriptor
38  * @zone: the destination zone structure
39  *
40  * All LBA sized values are converted to 512B sectors unit.
41  */
42 static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
43 				struct blk_zone *zone)
44 {
45 	struct scsi_device *sdp = sdkp->device;
46 
47 	memset(zone, 0, sizeof(struct blk_zone));
48 
49 	zone->type = buf[0] & 0x0f;
50 	zone->cond = (buf[1] >> 4) & 0xf;
51 	if (buf[1] & 0x01)
52 		zone->reset = 1;
53 	if (buf[1] & 0x02)
54 		zone->non_seq = 1;
55 
56 	zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
57 	zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
58 	zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
59 	if (zone->type != ZBC_ZONE_TYPE_CONV &&
60 	    zone->cond == ZBC_ZONE_COND_FULL)
61 		zone->wp = zone->start + zone->len;
62 }
63 
64 /**
65  * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
66  * @sdkp: The target disk
67  * @buf: Buffer to use for the reply
68  * @buflen: the buffer size
69  * @lba: Start LBA of the report
70  * @partial: Do partial report
71  *
72  * For internal use during device validation.
73  * Using partial=true can significantly speed up execution of a report zones
74  * command because the disk does not have to count all possible report matching
75  * zones and will only report the count of zones fitting in the command reply
76  * buffer.
77  */
78 static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
79 			       unsigned int buflen, sector_t lba,
80 			       bool partial)
81 {
82 	struct scsi_device *sdp = sdkp->device;
83 	const int timeout = sdp->request_queue->rq_timeout;
84 	struct scsi_sense_hdr sshdr;
85 	unsigned char cmd[16];
86 	unsigned int rep_len;
87 	int result;
88 
89 	memset(cmd, 0, 16);
90 	cmd[0] = ZBC_IN;
91 	cmd[1] = ZI_REPORT_ZONES;
92 	put_unaligned_be64(lba, &cmd[2]);
93 	put_unaligned_be32(buflen, &cmd[10]);
94 	if (partial)
95 		cmd[14] = ZBC_REPORT_ZONE_PARTIAL;
96 	memset(buf, 0, buflen);
97 
98 	result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
99 				  buf, buflen, &sshdr,
100 				  timeout, SD_MAX_RETRIES, NULL);
101 	if (result) {
102 		sd_printk(KERN_ERR, sdkp,
103 			  "REPORT ZONES lba %llu failed with %d/%d\n",
104 			  (unsigned long long)lba,
105 			  host_byte(result), driver_byte(result));
106 		return -EIO;
107 	}
108 
109 	rep_len = get_unaligned_be32(&buf[0]);
110 	if (rep_len < 64) {
111 		sd_printk(KERN_ERR, sdkp,
112 			  "REPORT ZONES report invalid length %u\n",
113 			  rep_len);
114 		return -EIO;
115 	}
116 
117 	return 0;
118 }
119 
120 /**
121  * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
122  * @cmd: The command to setup
123  *
124  * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
125  */
126 int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
127 {
128 	struct request *rq = cmd->request;
129 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
130 	sector_t lba, sector = blk_rq_pos(rq);
131 	unsigned int nr_bytes = blk_rq_bytes(rq);
132 	int ret;
133 
134 	WARN_ON(nr_bytes == 0);
135 
136 	if (!sd_is_zoned(sdkp))
137 		/* Not a zoned device */
138 		return BLKPREP_KILL;
139 
140 	ret = scsi_init_io(cmd);
141 	if (ret != BLKPREP_OK)
142 		return ret;
143 
144 	cmd->cmd_len = 16;
145 	memset(cmd->cmnd, 0, cmd->cmd_len);
146 	cmd->cmnd[0] = ZBC_IN;
147 	cmd->cmnd[1] = ZI_REPORT_ZONES;
148 	lba = sectors_to_logical(sdkp->device, sector);
149 	put_unaligned_be64(lba, &cmd->cmnd[2]);
150 	put_unaligned_be32(nr_bytes, &cmd->cmnd[10]);
151 	/* Do partial report for speeding things up */
152 	cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL;
153 
154 	cmd->sc_data_direction = DMA_FROM_DEVICE;
155 	cmd->sdb.length = nr_bytes;
156 	cmd->transfersize = sdkp->device->sector_size;
157 	cmd->allowed = 0;
158 
159 	return BLKPREP_OK;
160 }
161 
162 /**
163  * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
164  * @scmd: The completed report zones command
165  * @good_bytes: reply size in bytes
166  *
167  * Convert all reported zone descriptors to struct blk_zone. The conversion
168  * is done in-place, directly in the request specified sg buffer.
169  */
170 static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
171 					 unsigned int good_bytes)
172 {
173 	struct request *rq = scmd->request;
174 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
175 	struct sg_mapping_iter miter;
176 	struct blk_zone_report_hdr hdr;
177 	struct blk_zone zone;
178 	unsigned int offset, bytes = 0;
179 	unsigned long flags;
180 	u8 *buf;
181 
182 	if (good_bytes < 64)
183 		return;
184 
185 	memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));
186 
187 	sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
188 		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
189 
190 	local_irq_save(flags);
191 	while (sg_miter_next(&miter) && bytes < good_bytes) {
192 
193 		buf = miter.addr;
194 		offset = 0;
195 
196 		if (bytes == 0) {
197 			/* Set the report header */
198 			hdr.nr_zones = min_t(unsigned int,
199 					 (good_bytes - 64) / 64,
200 					 get_unaligned_be32(&buf[0]) / 64);
201 			memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
202 			offset += 64;
203 			bytes += 64;
204 		}
205 
206 		/* Parse zone descriptors */
207 		while (offset < miter.length && hdr.nr_zones) {
208 			WARN_ON(offset > miter.length);
209 			buf = miter.addr + offset;
210 			sd_zbc_parse_report(sdkp, buf, &zone);
211 			memcpy(buf, &zone, sizeof(struct blk_zone));
212 			offset += 64;
213 			bytes += 64;
214 			hdr.nr_zones--;
215 		}
216 
217 		if (!hdr.nr_zones)
218 			break;
219 
220 	}
221 	sg_miter_stop(&miter);
222 	local_irq_restore(flags);
223 }
224 
225 /**
226  * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
227  * @sdkp: The target disk
228  */
229 static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
230 {
231 	return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
232 }
233 
234 /**
235  * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
236  * @cmd: the command to setup
237  *
238  * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
239  */
240 int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
241 {
242 	struct request *rq = cmd->request;
243 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
244 	sector_t sector = blk_rq_pos(rq);
245 	sector_t block = sectors_to_logical(sdkp->device, sector);
246 
247 	if (!sd_is_zoned(sdkp))
248 		/* Not a zoned device */
249 		return BLKPREP_KILL;
250 
251 	if (sdkp->device->changed)
252 		return BLKPREP_KILL;
253 
254 	if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
255 		/* Unaligned request */
256 		return BLKPREP_KILL;
257 
258 	cmd->cmd_len = 16;
259 	memset(cmd->cmnd, 0, cmd->cmd_len);
260 	cmd->cmnd[0] = ZBC_OUT;
261 	cmd->cmnd[1] = ZO_RESET_WRITE_POINTER;
262 	put_unaligned_be64(block, &cmd->cmnd[2]);
263 
264 	rq->timeout = SD_TIMEOUT;
265 	cmd->sc_data_direction = DMA_NONE;
266 	cmd->transfersize = 0;
267 	cmd->allowed = 0;
268 
269 	return BLKPREP_OK;
270 }
271 
272 /**
273  * sd_zbc_complete - ZBC command post processing.
274  * @cmd: Completed command
275  * @good_bytes: Command reply bytes
276  * @sshdr: command sense header
277  *
278  * Called from sd_done(). Process report zones reply and handle reset zone
279  * and write commands errors.
280  */
281 void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
282 		     struct scsi_sense_hdr *sshdr)
283 {
284 	int result = cmd->result;
285 	struct request *rq = cmd->request;
286 
287 	switch (req_op(rq)) {
288 	case REQ_OP_ZONE_RESET:
289 
290 		if (result &&
291 		    sshdr->sense_key == ILLEGAL_REQUEST &&
292 		    sshdr->asc == 0x24)
293 			/*
294 			 * INVALID FIELD IN CDB error: reset of a conventional
295 			 * zone was attempted. Nothing to worry about, so be
296 			 * quiet about the error.
297 			 */
298 			rq->rq_flags |= RQF_QUIET;
299 		break;
300 
301 	case REQ_OP_WRITE:
302 	case REQ_OP_WRITE_ZEROES:
303 	case REQ_OP_WRITE_SAME:
304 		break;
305 
306 	case REQ_OP_ZONE_REPORT:
307 
308 		if (!result)
309 			sd_zbc_report_zones_complete(cmd, good_bytes);
310 		break;
311 
312 	}
313 }
314 
315 /**
316  * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics
317  * @sdkp: Target disk
318  * @buf: Buffer where to store the VPD page data
319  *
320  * Read VPD page B6, get information and check that reads are unconstrained.
321  */
322 static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
323 					      unsigned char *buf)
324 {
325 
326 	if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
327 		sd_printk(KERN_NOTICE, sdkp,
328 			  "Read zoned characteristics VPD page failed\n");
329 		return -ENODEV;
330 	}
331 
332 	if (sdkp->device->type != TYPE_ZBC) {
333 		/* Host-aware */
334 		sdkp->urswrz = 1;
335 		sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
336 		sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
337 		sdkp->zones_max_open = 0;
338 	} else {
339 		/* Host-managed */
340 		sdkp->urswrz = buf[4] & 1;
341 		sdkp->zones_optimal_open = 0;
342 		sdkp->zones_optimal_nonseq = 0;
343 		sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
344 	}
345 
346 	/*
347 	 * Check for unconstrained reads: host-managed devices with
348 	 * constrained reads (drives failing read after write pointer)
349 	 * are not supported.
350 	 */
351 	if (!sdkp->urswrz) {
352 		if (sdkp->first_scan)
353 			sd_printk(KERN_NOTICE, sdkp,
354 			  "constrained reads devices are not supported\n");
355 		return -ENODEV;
356 	}
357 
358 	return 0;
359 }
360 
361 #define SD_ZBC_BUF_SIZE 131072U
362 
363 /**
364  * sd_zbc_check_zones - Check the device capacity and zone sizes
365  * @sdkp: Target disk
366  *
367  * Check that the device capacity as reported by READ CAPACITY matches the
368  * max_lba value (plus one)of the report zones command reply. Also check that
369  * all zones of the device have an equal size, only allowing the last zone of
370  * the disk to have a smaller size (runt zone). The zone size must also be a
371  * power of two.
372  *
373  * Returns the zone size in number of blocks upon success or an error code
374  * upon failure.
375  */
376 static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
377 {
378 	u64 zone_blocks = 0;
379 	sector_t max_lba, block = 0;
380 	unsigned char *buf;
381 	unsigned char *rec;
382 	unsigned int buf_len;
383 	unsigned int list_length;
384 	int ret;
385 	u8 same;
386 
387 	/* Get a buffer */
388 	buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
389 	if (!buf)
390 		return -ENOMEM;
391 
392 	/* Do a report zone to get max_lba and the same field */
393 	ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0, false);
394 	if (ret)
395 		goto out_free;
396 
397 	if (sdkp->rc_basis == 0) {
398 		/* The max_lba field is the capacity of this device */
399 		max_lba = get_unaligned_be64(&buf[8]);
400 		if (sdkp->capacity != max_lba + 1) {
401 			if (sdkp->first_scan)
402 				sd_printk(KERN_WARNING, sdkp,
403 					"Changing capacity from %llu to max LBA+1 %llu\n",
404 					(unsigned long long)sdkp->capacity,
405 					(unsigned long long)max_lba + 1);
406 			sdkp->capacity = max_lba + 1;
407 		}
408 	}
409 
410 	/*
411 	 * Check same field: for any value other than 0, we know that all zones
412 	 * have the same size.
413 	 */
414 	same = buf[4] & 0x0f;
415 	if (same > 0) {
416 		rec = &buf[64];
417 		zone_blocks = get_unaligned_be64(&rec[8]);
418 		goto out;
419 	}
420 
421 	/*
422 	 * Check the size of all zones: all zones must be of
423 	 * equal size, except the last zone which can be smaller
424 	 * than other zones.
425 	 */
426 	do {
427 
428 		/* Parse REPORT ZONES header */
429 		list_length = get_unaligned_be32(&buf[0]) + 64;
430 		rec = buf + 64;
431 		buf_len = min(list_length, SD_ZBC_BUF_SIZE);
432 
433 		/* Parse zone descriptors */
434 		while (rec < buf + buf_len) {
435 			u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
436 
437 			if (zone_blocks == 0) {
438 				zone_blocks = this_zone_blocks;
439 			} else if (this_zone_blocks != zone_blocks &&
440 				   (block + this_zone_blocks < sdkp->capacity
441 				    || this_zone_blocks > zone_blocks)) {
442 				zone_blocks = 0;
443 				goto out;
444 			}
445 			block += this_zone_blocks;
446 			rec += 64;
447 		}
448 
449 		if (block < sdkp->capacity) {
450 			ret = sd_zbc_report_zones(sdkp, buf,
451 						  SD_ZBC_BUF_SIZE, block, true);
452 			if (ret)
453 				goto out_free;
454 		}
455 
456 	} while (block < sdkp->capacity);
457 
458 out:
459 	if (!zone_blocks) {
460 		if (sdkp->first_scan)
461 			sd_printk(KERN_NOTICE, sdkp,
462 				  "Devices with non constant zone "
463 				  "size are not supported\n");
464 		ret = -ENODEV;
465 	} else if (!is_power_of_2(zone_blocks)) {
466 		if (sdkp->first_scan)
467 			sd_printk(KERN_NOTICE, sdkp,
468 				  "Devices with non power of 2 zone "
469 				  "size are not supported\n");
470 		ret = -ENODEV;
471 	} else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
472 		if (sdkp->first_scan)
473 			sd_printk(KERN_NOTICE, sdkp,
474 				  "Zone size too large\n");
475 		ret = -EFBIG;
476 	} else {
477 		*zblocks = zone_blocks;
478 		ret = 0;
479 	}
480 
481 out_free:
482 	kfree(buf);
483 
484 	return ret;
485 }
486 
487 /**
488  * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
489  * @nr_zones: Number of zones to allocate space for.
490  * @numa_node: NUMA node to allocate the memory from.
491  */
492 static inline unsigned long *
493 sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
494 {
495 	return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
496 			    GFP_KERNEL, numa_node);
497 }
498 
499 /**
500  * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
501  * @sdkp: disk used
502  * @buf: report reply buffer
503  * @buflen: length of @buf
504  * @zone_shift: logarithm base 2 of the number of blocks in a zone
505  * @seq_zones_bitmap: bitmap of sequential zones to set
506  *
507  * Parse reported zone descriptors in @buf to identify sequential zones and
508  * set the reported zone bit in @seq_zones_bitmap accordingly.
509  * Since read-only and offline zones cannot be written, do not
510  * mark them as sequential in the bitmap.
511  * Return the LBA after the last zone reported.
512  */
513 static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
514 				     unsigned int buflen, u32 zone_shift,
515 				     unsigned long *seq_zones_bitmap)
516 {
517 	sector_t lba, next_lba = sdkp->capacity;
518 	unsigned int buf_len, list_length;
519 	unsigned char *rec;
520 	u8 type, cond;
521 
522 	list_length = get_unaligned_be32(&buf[0]) + 64;
523 	buf_len = min(list_length, buflen);
524 	rec = buf + 64;
525 
526 	while (rec < buf + buf_len) {
527 		type = rec[0] & 0x0f;
528 		cond = (rec[1] >> 4) & 0xf;
529 		lba = get_unaligned_be64(&rec[16]);
530 		if (type != ZBC_ZONE_TYPE_CONV &&
531 		    cond != ZBC_ZONE_COND_READONLY &&
532 		    cond != ZBC_ZONE_COND_OFFLINE)
533 			set_bit(lba >> zone_shift, seq_zones_bitmap);
534 		next_lba = lba + get_unaligned_be64(&rec[8]);
535 		rec += 64;
536 	}
537 
538 	return next_lba;
539 }
540 
541 /**
542  * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
543  * @sdkp: target disk
544  * @zone_shift: logarithm base 2 of the number of blocks in a zone
545  * @nr_zones: number of zones to set up a seq zone bitmap for
546  *
547  * Allocate a zone bitmap and initialize it by identifying sequential zones.
548  */
549 static unsigned long *
550 sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
551 			      u32 nr_zones)
552 {
553 	struct request_queue *q = sdkp->disk->queue;
554 	unsigned long *seq_zones_bitmap;
555 	sector_t lba = 0;
556 	unsigned char *buf;
557 	int ret = -ENOMEM;
558 
559 	seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
560 	if (!seq_zones_bitmap)
561 		return ERR_PTR(-ENOMEM);
562 
563 	buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
564 	if (!buf)
565 		goto out;
566 
567 	while (lba < sdkp->capacity) {
568 		ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
569 					  lba, true);
570 		if (ret)
571 			goto out;
572 		lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
573 					   zone_shift, seq_zones_bitmap);
574 	}
575 
576 	if (lba != sdkp->capacity) {
577 		/* Something went wrong */
578 		ret = -EIO;
579 	}
580 
581 out:
582 	kfree(buf);
583 	if (ret) {
584 		kfree(seq_zones_bitmap);
585 		return ERR_PTR(ret);
586 	}
587 	return seq_zones_bitmap;
588 }
589 
590 static void sd_zbc_cleanup(struct scsi_disk *sdkp)
591 {
592 	struct request_queue *q = sdkp->disk->queue;
593 
594 	kfree(q->seq_zones_bitmap);
595 	q->seq_zones_bitmap = NULL;
596 
597 	kfree(q->seq_zones_wlock);
598 	q->seq_zones_wlock = NULL;
599 
600 	q->nr_zones = 0;
601 }
602 
603 static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
604 {
605 	struct request_queue *q = sdkp->disk->queue;
606 	u32 zone_shift = ilog2(zone_blocks);
607 	u32 nr_zones;
608 	int ret;
609 
610 	/* chunk_sectors indicates the zone size */
611 	blk_queue_chunk_sectors(q,
612 			logical_to_sectors(sdkp->device, zone_blocks));
613 	nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
614 
615 	/*
616 	 * Initialize the device request queue information if the number
617 	 * of zones changed.
618 	 */
619 	if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
620 		unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
621 		size_t zone_bitmap_size;
622 
623 		if (nr_zones) {
624 			seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
625 								   q->node);
626 			if (!seq_zones_wlock) {
627 				ret = -ENOMEM;
628 				goto err;
629 			}
630 
631 			seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
632 							zone_shift, nr_zones);
633 			if (IS_ERR(seq_zones_bitmap)) {
634 				ret = PTR_ERR(seq_zones_bitmap);
635 				kfree(seq_zones_wlock);
636 				goto err;
637 			}
638 		}
639 		zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
640 			sizeof(unsigned long);
641 		blk_mq_freeze_queue(q);
642 		if (q->nr_zones != nr_zones) {
643 			/* READ16/WRITE16 is mandatory for ZBC disks */
644 			sdkp->device->use_16_for_rw = 1;
645 			sdkp->device->use_10_for_rw = 0;
646 
647 			sdkp->zone_blocks = zone_blocks;
648 			sdkp->zone_shift = zone_shift;
649 			sdkp->nr_zones = nr_zones;
650 			q->nr_zones = nr_zones;
651 			swap(q->seq_zones_wlock, seq_zones_wlock);
652 			swap(q->seq_zones_bitmap, seq_zones_bitmap);
653 		} else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
654 				  zone_bitmap_size) != 0) {
655 			memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
656 			       zone_bitmap_size);
657 		}
658 		blk_mq_unfreeze_queue(q);
659 		kfree(seq_zones_wlock);
660 		kfree(seq_zones_bitmap);
661 	}
662 
663 	return 0;
664 
665 err:
666 	sd_zbc_cleanup(sdkp);
667 	return ret;
668 }
669 
670 int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
671 {
672 	u32 zone_blocks;
673 	int ret;
674 
675 	if (!sd_is_zoned(sdkp))
676 		/*
677 		 * Device managed or normal SCSI disk,
678 		 * no special handling required
679 		 */
680 		return 0;
681 
682 	/* Check zoned block device characteristics (unconstrained reads) */
683 	ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
684 	if (ret)
685 		goto err;
686 
687 	/*
688 	 * Check zone size: only devices with a constant zone size (except
689 	 * an eventual last runt zone) that is a power of 2 are supported.
690 	 */
691 	ret = sd_zbc_check_zones(sdkp, &zone_blocks);
692 	if (ret != 0)
693 		goto err;
694 
695 	/* The drive satisfies the kernel restrictions: set it up */
696 	ret = sd_zbc_setup(sdkp, zone_blocks);
697 	if (ret)
698 		goto err;
699 
700 	return 0;
701 
702 err:
703 	sdkp->capacity = 0;
704 	sd_zbc_cleanup(sdkp);
705 
706 	return ret;
707 }
708 
709 void sd_zbc_remove(struct scsi_disk *sdkp)
710 {
711 	sd_zbc_cleanup(sdkp);
712 }
713 
714 void sd_zbc_print_zones(struct scsi_disk *sdkp)
715 {
716 	if (!sd_is_zoned(sdkp) || !sdkp->capacity)
717 		return;
718 
719 	if (sdkp->capacity & (sdkp->zone_blocks - 1))
720 		sd_printk(KERN_NOTICE, sdkp,
721 			  "%u zones of %u logical blocks + 1 runt zone\n",
722 			  sdkp->nr_zones - 1,
723 			  sdkp->zone_blocks);
724 	else
725 		sd_printk(KERN_NOTICE, sdkp,
726 			  "%u zones of %u logical blocks\n",
727 			  sdkp->nr_zones,
728 			  sdkp->zone_blocks);
729 }
730