xref: /openbmc/linux/drivers/scsi/sd_zbc.c (revision 2d972b6a)
1 /*
2  * SCSI Zoned Block commands
3  *
4  * Copyright (C) 2014-2015 SUSE Linux GmbH
5  * Written by: Hannes Reinecke <hare@suse.de>
6  * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
7  * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version
11  * 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; see the file COPYING.  If not, write to
20  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
21  * USA.
22  *
23  */
24 
25 #include <linux/blkdev.h>
26 
27 #include <asm/unaligned.h>
28 
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
31 
32 #include "sd.h"
33 
34 /**
35  * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
36  * @sdkp: The disk the report originated from
37  * @buf: Address of the report zone descriptor
38  * @zone: the destination zone structure
39  *
40  * All LBA sized values are converted to 512B sectors unit.
41  */
42 static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
43 				struct blk_zone *zone)
44 {
45 	struct scsi_device *sdp = sdkp->device;
46 
47 	memset(zone, 0, sizeof(struct blk_zone));
48 
49 	zone->type = buf[0] & 0x0f;
50 	zone->cond = (buf[1] >> 4) & 0xf;
51 	if (buf[1] & 0x01)
52 		zone->reset = 1;
53 	if (buf[1] & 0x02)
54 		zone->non_seq = 1;
55 
56 	zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
57 	zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
58 	zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
59 	if (zone->type != ZBC_ZONE_TYPE_CONV &&
60 	    zone->cond == ZBC_ZONE_COND_FULL)
61 		zone->wp = zone->start + zone->len;
62 }
63 
64 /**
65  * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
66  * @sdkp: The target disk
67  * @buf: Buffer to use for the reply
68  * @buflen: the buffer size
69  * @lba: Start LBA of the report
70  *
71  * For internal use during device validation.
72  */
73 static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
74 			       unsigned int buflen, sector_t lba)
75 {
76 	struct scsi_device *sdp = sdkp->device;
77 	const int timeout = sdp->request_queue->rq_timeout;
78 	struct scsi_sense_hdr sshdr;
79 	unsigned char cmd[16];
80 	unsigned int rep_len;
81 	int result;
82 
83 	memset(cmd, 0, 16);
84 	cmd[0] = ZBC_IN;
85 	cmd[1] = ZI_REPORT_ZONES;
86 	put_unaligned_be64(lba, &cmd[2]);
87 	put_unaligned_be32(buflen, &cmd[10]);
88 	memset(buf, 0, buflen);
89 
90 	result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
91 				  buf, buflen, &sshdr,
92 				  timeout, SD_MAX_RETRIES, NULL);
93 	if (result) {
94 		sd_printk(KERN_ERR, sdkp,
95 			  "REPORT ZONES lba %llu failed with %d/%d\n",
96 			  (unsigned long long)lba,
97 			  host_byte(result), driver_byte(result));
98 		return -EIO;
99 	}
100 
101 	rep_len = get_unaligned_be32(&buf[0]);
102 	if (rep_len < 64) {
103 		sd_printk(KERN_ERR, sdkp,
104 			  "REPORT ZONES report invalid length %u\n",
105 			  rep_len);
106 		return -EIO;
107 	}
108 
109 	return 0;
110 }
111 
112 /**
113  * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
114  * @cmd: The command to setup
115  *
116  * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
117  */
118 int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
119 {
120 	struct request *rq = cmd->request;
121 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
122 	sector_t lba, sector = blk_rq_pos(rq);
123 	unsigned int nr_bytes = blk_rq_bytes(rq);
124 	int ret;
125 
126 	WARN_ON(nr_bytes == 0);
127 
128 	if (!sd_is_zoned(sdkp))
129 		/* Not a zoned device */
130 		return BLKPREP_KILL;
131 
132 	ret = scsi_init_io(cmd);
133 	if (ret != BLKPREP_OK)
134 		return ret;
135 
136 	cmd->cmd_len = 16;
137 	memset(cmd->cmnd, 0, cmd->cmd_len);
138 	cmd->cmnd[0] = ZBC_IN;
139 	cmd->cmnd[1] = ZI_REPORT_ZONES;
140 	lba = sectors_to_logical(sdkp->device, sector);
141 	put_unaligned_be64(lba, &cmd->cmnd[2]);
142 	put_unaligned_be32(nr_bytes, &cmd->cmnd[10]);
143 	/* Do partial report for speeding things up */
144 	cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL;
145 
146 	cmd->sc_data_direction = DMA_FROM_DEVICE;
147 	cmd->sdb.length = nr_bytes;
148 	cmd->transfersize = sdkp->device->sector_size;
149 	cmd->allowed = 0;
150 
151 	/*
152 	 * Report may return less bytes than requested. Make sure
153 	 * to report completion on the entire initial request.
154 	 */
155 	rq->__data_len = nr_bytes;
156 
157 	return BLKPREP_OK;
158 }
159 
160 /**
161  * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
162  * @scmd: The completed report zones command
163  * @good_bytes: reply size in bytes
164  *
165  * Convert all reported zone descriptors to struct blk_zone. The conversion
166  * is done in-place, directly in the request specified sg buffer.
167  */
168 static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
169 					 unsigned int good_bytes)
170 {
171 	struct request *rq = scmd->request;
172 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
173 	struct sg_mapping_iter miter;
174 	struct blk_zone_report_hdr hdr;
175 	struct blk_zone zone;
176 	unsigned int offset, bytes = 0;
177 	unsigned long flags;
178 	u8 *buf;
179 
180 	if (good_bytes < 64)
181 		return;
182 
183 	memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));
184 
185 	sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
186 		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
187 
188 	local_irq_save(flags);
189 	while (sg_miter_next(&miter) && bytes < good_bytes) {
190 
191 		buf = miter.addr;
192 		offset = 0;
193 
194 		if (bytes == 0) {
195 			/* Set the report header */
196 			hdr.nr_zones = min_t(unsigned int,
197 					 (good_bytes - 64) / 64,
198 					 get_unaligned_be32(&buf[0]) / 64);
199 			memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
200 			offset += 64;
201 			bytes += 64;
202 		}
203 
204 		/* Parse zone descriptors */
205 		while (offset < miter.length && hdr.nr_zones) {
206 			WARN_ON(offset > miter.length);
207 			buf = miter.addr + offset;
208 			sd_zbc_parse_report(sdkp, buf, &zone);
209 			memcpy(buf, &zone, sizeof(struct blk_zone));
210 			offset += 64;
211 			bytes += 64;
212 			hdr.nr_zones--;
213 		}
214 
215 		if (!hdr.nr_zones)
216 			break;
217 
218 	}
219 	sg_miter_stop(&miter);
220 	local_irq_restore(flags);
221 }
222 
223 /**
224  * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
225  * @sdkp: The target disk
226  */
227 static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
228 {
229 	return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
230 }
231 
232 /**
233  * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
234  * @cmd: the command to setup
235  *
236  * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
237  */
238 int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
239 {
240 	struct request *rq = cmd->request;
241 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
242 	sector_t sector = blk_rq_pos(rq);
243 	sector_t block = sectors_to_logical(sdkp->device, sector);
244 
245 	if (!sd_is_zoned(sdkp))
246 		/* Not a zoned device */
247 		return BLKPREP_KILL;
248 
249 	if (sdkp->device->changed)
250 		return BLKPREP_KILL;
251 
252 	if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
253 		/* Unaligned request */
254 		return BLKPREP_KILL;
255 
256 	cmd->cmd_len = 16;
257 	memset(cmd->cmnd, 0, cmd->cmd_len);
258 	cmd->cmnd[0] = ZBC_OUT;
259 	cmd->cmnd[1] = ZO_RESET_WRITE_POINTER;
260 	put_unaligned_be64(block, &cmd->cmnd[2]);
261 
262 	rq->timeout = SD_TIMEOUT;
263 	cmd->sc_data_direction = DMA_NONE;
264 	cmd->transfersize = 0;
265 	cmd->allowed = 0;
266 
267 	return BLKPREP_OK;
268 }
269 
270 /**
271  * sd_zbc_complete - ZBC command post processing.
272  * @cmd: Completed command
273  * @good_bytes: Command reply bytes
274  * @sshdr: command sense header
275  *
276  * Called from sd_done(). Process report zones reply and handle reset zone
277  * and write commands errors.
278  */
279 void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
280 		     struct scsi_sense_hdr *sshdr)
281 {
282 	int result = cmd->result;
283 	struct request *rq = cmd->request;
284 
285 	switch (req_op(rq)) {
286 	case REQ_OP_ZONE_RESET:
287 
288 		if (result &&
289 		    sshdr->sense_key == ILLEGAL_REQUEST &&
290 		    sshdr->asc == 0x24)
291 			/*
292 			 * INVALID FIELD IN CDB error: reset of a conventional
293 			 * zone was attempted. Nothing to worry about, so be
294 			 * quiet about the error.
295 			 */
296 			rq->rq_flags |= RQF_QUIET;
297 		break;
298 
299 	case REQ_OP_WRITE:
300 	case REQ_OP_WRITE_ZEROES:
301 	case REQ_OP_WRITE_SAME:
302 
303 		if (result &&
304 		    sshdr->sense_key == ILLEGAL_REQUEST &&
305 		    sshdr->asc == 0x21)
306 			/*
307 			 * INVALID ADDRESS FOR WRITE error: It is unlikely that
308 			 * retrying write requests failed with any kind of
309 			 * alignement error will result in success. So don't.
310 			 */
311 			cmd->allowed = 0;
312 		break;
313 
314 	case REQ_OP_ZONE_REPORT:
315 
316 		if (!result)
317 			sd_zbc_report_zones_complete(cmd, good_bytes);
318 		break;
319 
320 	}
321 }
322 
323 /**
324  * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics
325  * @sdkp: Target disk
326  * @buf: Buffer where to store the VPD page data
327  *
328  * Read VPD page B6.
329  */
330 static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
331 					     unsigned char *buf)
332 {
333 
334 	if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
335 		sd_printk(KERN_NOTICE, sdkp,
336 			  "Unconstrained-read check failed\n");
337 		return -ENODEV;
338 	}
339 
340 	if (sdkp->device->type != TYPE_ZBC) {
341 		/* Host-aware */
342 		sdkp->urswrz = 1;
343 		sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
344 		sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
345 		sdkp->zones_max_open = 0;
346 	} else {
347 		/* Host-managed */
348 		sdkp->urswrz = buf[4] & 1;
349 		sdkp->zones_optimal_open = 0;
350 		sdkp->zones_optimal_nonseq = 0;
351 		sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
352 	}
353 
354 	return 0;
355 }
356 
357 /**
358  * sd_zbc_check_capacity - Check reported capacity.
359  * @sdkp: Target disk
360  * @buf: Buffer to use for commands
361  *
362  * ZBC drive may report only the capacity of the first conventional zones at
363  * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply.
364  * Check this here. If the disk reported only its conventional zones capacity,
365  * get the total capacity by doing a report zones.
366  */
367 static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
368 {
369 	sector_t lba;
370 	int ret;
371 
372 	if (sdkp->rc_basis != 0)
373 		return 0;
374 
375 	/* Do a report zone to get the maximum LBA to check capacity */
376 	ret = sd_zbc_report_zones(sdkp, buf, SD_BUF_SIZE, 0);
377 	if (ret)
378 		return ret;
379 
380 	/* The max_lba field is the capacity of this device */
381 	lba = get_unaligned_be64(&buf[8]);
382 	if (lba + 1 == sdkp->capacity)
383 		return 0;
384 
385 	if (sdkp->first_scan)
386 		sd_printk(KERN_WARNING, sdkp,
387 			  "Changing capacity from %llu to max LBA+1 %llu\n",
388 			  (unsigned long long)sdkp->capacity,
389 			  (unsigned long long)lba + 1);
390 	sdkp->capacity = lba + 1;
391 
392 	return 0;
393 }
394 
395 #define SD_ZBC_BUF_SIZE 131072U
396 
397 /**
398  * sd_zbc_check_zone_size - Check the device zone sizes
399  * @sdkp: Target disk
400  *
401  * Check that all zones of the device are equal. The last zone can however
402  * be smaller. The zone size must also be a power of two number of LBAs.
403  *
404  * Returns the zone size in bytes upon success or an error code upon failure.
405  */
406 static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
407 {
408 	u64 zone_blocks = 0;
409 	sector_t block = 0;
410 	unsigned char *buf;
411 	unsigned char *rec;
412 	unsigned int buf_len;
413 	unsigned int list_length;
414 	int ret;
415 	u8 same;
416 
417 	/* Get a buffer */
418 	buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
419 	if (!buf)
420 		return -ENOMEM;
421 
422 	/* Do a report zone to get the same field */
423 	ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
424 	if (ret)
425 		goto out_free;
426 
427 	same = buf[4] & 0x0f;
428 	if (same > 0) {
429 		rec = &buf[64];
430 		zone_blocks = get_unaligned_be64(&rec[8]);
431 		goto out;
432 	}
433 
434 	/*
435 	 * Check the size of all zones: all zones must be of
436 	 * equal size, except the last zone which can be smaller
437 	 * than other zones.
438 	 */
439 	do {
440 
441 		/* Parse REPORT ZONES header */
442 		list_length = get_unaligned_be32(&buf[0]) + 64;
443 		rec = buf + 64;
444 		buf_len = min(list_length, SD_ZBC_BUF_SIZE);
445 
446 		/* Parse zone descriptors */
447 		while (rec < buf + buf_len) {
448 			u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
449 
450 			if (zone_blocks == 0) {
451 				zone_blocks = this_zone_blocks;
452 			} else if (this_zone_blocks != zone_blocks &&
453 				   (block + this_zone_blocks < sdkp->capacity
454 				    || this_zone_blocks > zone_blocks)) {
455 				this_zone_blocks = 0;
456 				goto out;
457 			}
458 			block += this_zone_blocks;
459 			rec += 64;
460 		}
461 
462 		if (block < sdkp->capacity) {
463 			ret = sd_zbc_report_zones(sdkp, buf,
464 						  SD_ZBC_BUF_SIZE, block);
465 			if (ret)
466 				goto out_free;
467 		}
468 
469 	} while (block < sdkp->capacity);
470 
471 out:
472 	if (!zone_blocks) {
473 		if (sdkp->first_scan)
474 			sd_printk(KERN_NOTICE, sdkp,
475 				  "Devices with non constant zone "
476 				  "size are not supported\n");
477 		ret = -ENODEV;
478 	} else if (!is_power_of_2(zone_blocks)) {
479 		if (sdkp->first_scan)
480 			sd_printk(KERN_NOTICE, sdkp,
481 				  "Devices with non power of 2 zone "
482 				  "size are not supported\n");
483 		ret = -ENODEV;
484 	} else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
485 		if (sdkp->first_scan)
486 			sd_printk(KERN_NOTICE, sdkp,
487 				  "Zone size too large\n");
488 		ret = -ENODEV;
489 	} else {
490 		ret = zone_blocks;
491 	}
492 
493 out_free:
494 	kfree(buf);
495 
496 	return ret;
497 }
498 
499 /**
500  * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
501  * @nr_zones: Number of zones to allocate space for.
502  * @numa_node: NUMA node to allocate the memory from.
503  */
504 static inline unsigned long *
505 sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
506 {
507 	return kzalloc_node(BITS_TO_LONGS(nr_zones) * sizeof(unsigned long),
508 			    GFP_KERNEL, numa_node);
509 }
510 
511 /**
512  * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
513  * @sdkp: disk used
514  * @buf: report reply buffer
515  * @buflen: length of @buf
516  * @zone_shift: logarithm base 2 of the number of blocks in a zone
517  * @seq_zones_bitmap: bitmap of sequential zones to set
518  *
519  * Parse reported zone descriptors in @buf to identify sequential zones and
520  * set the reported zone bit in @seq_zones_bitmap accordingly.
521  * Since read-only and offline zones cannot be written, do not
522  * mark them as sequential in the bitmap.
523  * Return the LBA after the last zone reported.
524  */
525 static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
526 				     unsigned int buflen, u32 zone_shift,
527 				     unsigned long *seq_zones_bitmap)
528 {
529 	sector_t lba, next_lba = sdkp->capacity;
530 	unsigned int buf_len, list_length;
531 	unsigned char *rec;
532 	u8 type, cond;
533 
534 	list_length = get_unaligned_be32(&buf[0]) + 64;
535 	buf_len = min(list_length, buflen);
536 	rec = buf + 64;
537 
538 	while (rec < buf + buf_len) {
539 		type = rec[0] & 0x0f;
540 		cond = (rec[1] >> 4) & 0xf;
541 		lba = get_unaligned_be64(&rec[16]);
542 		if (type != ZBC_ZONE_TYPE_CONV &&
543 		    cond != ZBC_ZONE_COND_READONLY &&
544 		    cond != ZBC_ZONE_COND_OFFLINE)
545 			set_bit(lba >> zone_shift, seq_zones_bitmap);
546 		next_lba = lba + get_unaligned_be64(&rec[8]);
547 		rec += 64;
548 	}
549 
550 	return next_lba;
551 }
552 
553 /**
554  * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
555  * @sdkp: target disk
556  * @zone_shift: logarithm base 2 of the number of blocks in a zone
557  * @nr_zones: number of zones to set up a seq zone bitmap for
558  *
559  * Allocate a zone bitmap and initialize it by identifying sequential zones.
560  */
561 static unsigned long *
562 sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
563 			      u32 nr_zones)
564 {
565 	struct request_queue *q = sdkp->disk->queue;
566 	unsigned long *seq_zones_bitmap;
567 	sector_t lba = 0;
568 	unsigned char *buf;
569 	int ret = -ENOMEM;
570 
571 	seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
572 	if (!seq_zones_bitmap)
573 		return ERR_PTR(-ENOMEM);
574 
575 	buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
576 	if (!buf)
577 		goto out;
578 
579 	while (lba < sdkp->capacity) {
580 		ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, lba);
581 		if (ret)
582 			goto out;
583 		lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
584 					   zone_shift, seq_zones_bitmap);
585 	}
586 
587 	if (lba != sdkp->capacity) {
588 		/* Something went wrong */
589 		ret = -EIO;
590 	}
591 
592 out:
593 	kfree(buf);
594 	if (ret) {
595 		kfree(seq_zones_bitmap);
596 		return ERR_PTR(ret);
597 	}
598 	return seq_zones_bitmap;
599 }
600 
601 static void sd_zbc_cleanup(struct scsi_disk *sdkp)
602 {
603 	struct request_queue *q = sdkp->disk->queue;
604 
605 	kfree(q->seq_zones_bitmap);
606 	q->seq_zones_bitmap = NULL;
607 
608 	kfree(q->seq_zones_wlock);
609 	q->seq_zones_wlock = NULL;
610 
611 	q->nr_zones = 0;
612 }
613 
614 static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
615 {
616 	struct request_queue *q = sdkp->disk->queue;
617 	u32 zone_shift = ilog2(zone_blocks);
618 	u32 nr_zones;
619 	int ret;
620 
621 	/* chunk_sectors indicates the zone size */
622 	blk_queue_chunk_sectors(q,
623 			logical_to_sectors(sdkp->device, zone_blocks));
624 	nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
625 
626 	/*
627 	 * Initialize the device request queue information if the number
628 	 * of zones changed.
629 	 */
630 	if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
631 		unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
632 		size_t zone_bitmap_size;
633 
634 		if (nr_zones) {
635 			seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
636 								   q->node);
637 			if (!seq_zones_wlock) {
638 				ret = -ENOMEM;
639 				goto err;
640 			}
641 
642 			seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
643 							zone_shift, nr_zones);
644 			if (IS_ERR(seq_zones_bitmap)) {
645 				ret = PTR_ERR(seq_zones_bitmap);
646 				kfree(seq_zones_wlock);
647 				goto err;
648 			}
649 		}
650 		zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
651 			sizeof(unsigned long);
652 		blk_mq_freeze_queue(q);
653 		if (q->nr_zones != nr_zones) {
654 			/* READ16/WRITE16 is mandatory for ZBC disks */
655 			sdkp->device->use_16_for_rw = 1;
656 			sdkp->device->use_10_for_rw = 0;
657 
658 			sdkp->zone_blocks = zone_blocks;
659 			sdkp->zone_shift = zone_shift;
660 			sdkp->nr_zones = nr_zones;
661 			q->nr_zones = nr_zones;
662 			swap(q->seq_zones_wlock, seq_zones_wlock);
663 			swap(q->seq_zones_bitmap, seq_zones_bitmap);
664 		} else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
665 				  zone_bitmap_size) != 0) {
666 			memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
667 			       zone_bitmap_size);
668 		}
669 		blk_mq_unfreeze_queue(q);
670 		kfree(seq_zones_wlock);
671 		kfree(seq_zones_bitmap);
672 	}
673 
674 	return 0;
675 
676 err:
677 	sd_zbc_cleanup(sdkp);
678 	return ret;
679 }
680 
681 int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
682 {
683 	int64_t zone_blocks;
684 	int ret;
685 
686 	if (!sd_is_zoned(sdkp))
687 		/*
688 		 * Device managed or normal SCSI disk,
689 		 * no special handling required
690 		 */
691 		return 0;
692 
693 	/* Get zoned block device characteristics */
694 	ret = sd_zbc_read_zoned_characteristics(sdkp, buf);
695 	if (ret)
696 		goto err;
697 
698 	/*
699 	 * Check for unconstrained reads: host-managed devices with
700 	 * constrained reads (drives failing read after write pointer)
701 	 * are not supported.
702 	 */
703 	if (!sdkp->urswrz) {
704 		if (sdkp->first_scan)
705 			sd_printk(KERN_NOTICE, sdkp,
706 			  "constrained reads devices are not supported\n");
707 		ret = -ENODEV;
708 		goto err;
709 	}
710 
711 	/* Check capacity */
712 	ret = sd_zbc_check_capacity(sdkp, buf);
713 	if (ret)
714 		goto err;
715 
716 	/*
717 	 * Check zone size: only devices with a constant zone size (except
718 	 * an eventual last runt zone) that is a power of 2 are supported.
719 	 */
720 	zone_blocks = sd_zbc_check_zone_size(sdkp);
721 	ret = -EFBIG;
722 	if (zone_blocks != (u32)zone_blocks)
723 		goto err;
724 	ret = zone_blocks;
725 	if (ret < 0)
726 		goto err;
727 
728 	/* The drive satisfies the kernel restrictions: set it up */
729 	ret = sd_zbc_setup(sdkp, zone_blocks);
730 	if (ret)
731 		goto err;
732 
733 	return 0;
734 
735 err:
736 	sdkp->capacity = 0;
737 	sd_zbc_cleanup(sdkp);
738 
739 	return ret;
740 }
741 
742 void sd_zbc_remove(struct scsi_disk *sdkp)
743 {
744 	sd_zbc_cleanup(sdkp);
745 }
746 
747 void sd_zbc_print_zones(struct scsi_disk *sdkp)
748 {
749 	if (!sd_is_zoned(sdkp) || !sdkp->capacity)
750 		return;
751 
752 	if (sdkp->capacity & (sdkp->zone_blocks - 1))
753 		sd_printk(KERN_NOTICE, sdkp,
754 			  "%u zones of %u logical blocks + 1 runt zone\n",
755 			  sdkp->nr_zones - 1,
756 			  sdkp->zone_blocks);
757 	else
758 		sd_printk(KERN_NOTICE, sdkp,
759 			  "%u zones of %u logical blocks\n",
760 			  sdkp->nr_zones,
761 			  sdkp->zone_blocks);
762 }
763