dm.c (5e3cdecf7834a764b9d24f6e696adf3e03813fab) dm.c (e76239a3748c90a8b0e197f8f4544a8ce52f126e)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-core.h"

--- 444 unchanged lines hidden (view full) ---

453
454static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
455{
456 struct mapped_device *md = bdev->bd_disk->private_data;
457
458 return dm_get_geometry(md, geo);
459}
460
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-core.h"

--- 444 unchanged lines hidden (view full) ---

453
454static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
455{
456 struct mapped_device *md = bdev->bd_disk->private_data;
457
458 return dm_get_geometry(md, geo);
459}
460
461static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
462 struct blk_zone *zones, unsigned int *nr_zones,
463 gfp_t gfp_mask)
464{
465#ifdef CONFIG_BLK_DEV_ZONED
466 struct mapped_device *md = disk->private_data;
467 struct dm_target *tgt;
468 struct dm_table *map;
469 int srcu_idx, ret;
470
471 if (dm_suspended_md(md))
472 return -EAGAIN;
473
474 map = dm_get_live_table(md, &srcu_idx);
475 if (!map)
476 return -EIO;
477
478 tgt = dm_table_find_target(map, sector);
479 if (!dm_target_is_valid(tgt)) {
480 ret = -EIO;
481 goto out;
482 }
483
484 /*
485 * If we are executing this, we already know that the block device
486 * is a zoned device and so each target should have support for that
487 * type of drive. A missing report_zones method means that the target
488 * driver has a problem.
489 */
490 if (WARN_ON(!tgt->type->report_zones)) {
491 ret = -EIO;
492 goto out;
493 }
494
495 /*
496 * blkdev_report_zones() will loop and call this again to cover all the
497 * zones of the target, eventually moving on to the next target.
498 * So there is no need to loop here trying to fill the entire array
499 * of zones.
500 */
501 ret = tgt->type->report_zones(tgt, sector, zones,
502 nr_zones, gfp_mask);
503
504out:
505 dm_put_live_table(md, srcu_idx);
506 return ret;
507#else
508 return -ENOTSUPP;
509#endif
510}
511
461static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
462 struct block_device **bdev)
463 __acquires(md->io_barrier)
464{
465 struct dm_target *tgt;
466 struct dm_table *map;
467 int r;
468

--- 681 unchanged lines hidden (view full) ---

1150 BUG_ON(bi_size > *tio->len_ptr);
1151 BUG_ON(n_sectors > bi_size);
1152 *tio->len_ptr -= bi_size - n_sectors;
1153 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1154}
1155EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1156
1157/*
512static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
513 struct block_device **bdev)
514 __acquires(md->io_barrier)
515{
516 struct dm_target *tgt;
517 struct dm_table *map;
518 int r;
519

--- 681 unchanged lines hidden (view full) ---

1201 BUG_ON(bi_size > *tio->len_ptr);
1202 BUG_ON(n_sectors > bi_size);
1203 *tio->len_ptr -= bi_size - n_sectors;
1204 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1205}
1206EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1207
1208/*
1158 * The zone descriptors obtained with a zone report indicate zone positions
1159 * within the target backing device, regardless of that device is a partition
1160 * and regardless of the target mapping start sector on the device or partition.
1161 * The zone descriptors start sector and write pointer position must be adjusted
1162 * to match their relative position within the dm device.
1163 * A target may call dm_remap_zone_report() after completion of a
1164 * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
1165 * backing device.
1209 * The zone descriptors obtained with a zone report indicate
1210 * zone positions within the underlying device of the target. The zone
1211 * descriptors must be remapped to match their position within the dm device.
1212 * The caller target should obtain the zones information using
1213 * blkdev_report_zones() to ensure that remapping for partition offset is
1214 * already handled.
1166 */
1215 */
1167void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
1216void dm_remap_zone_report(struct dm_target *ti, sector_t start,
1217 struct blk_zone *zones, unsigned int *nr_zones)
1168{
1169#ifdef CONFIG_BLK_DEV_ZONED
1218{
1219#ifdef CONFIG_BLK_DEV_ZONED
1170 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1171 struct bio *report_bio = tio->io->orig_bio;
1172 struct blk_zone_report_hdr *hdr = NULL;
1173 struct blk_zone *zone;
1220 struct blk_zone *zone;
1174 unsigned int nr_rep = 0;
1175 unsigned int ofst;
1176 sector_t part_offset;
1177 struct bio_vec bvec;
1178 struct bvec_iter iter;
1179 void *addr;
1221 unsigned int nrz = *nr_zones;
1222 int i;
1180
1223
1181 if (bio->bi_status)
1182 return;
1183
1184 /*
1224 /*
1185 * bio sector was incremented by the request size on completion. Taking
1186 * into account the original request sector, the target start offset on
1187 * the backing device and the target mapping offset (ti->begin), the
1188 * start sector of the backing device. The partition offset is always 0
1189 * if the target uses a whole device.
1225 * Remap the start sector and write pointer position of the zones in
1226 * the array. Since we may have obtained from the target underlying
1227 * device more zones that the target size, also adjust the number
1228 * of zones.
1190 */
1229 */
1191 part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
1192
1193 /*
1194 * Remap the start sector of the reported zones. For sequential zones,
1195 * also remap the write pointer position.
1196 */
1197 bio_for_each_segment(bvec, report_bio, iter) {
1198 addr = kmap_atomic(bvec.bv_page);
1199
1200 /* Remember the report header in the first page */
1201 if (!hdr) {
1202 hdr = addr;
1203 ofst = sizeof(struct blk_zone_report_hdr);
1204 } else
1205 ofst = 0;
1206
1207 /* Set zones start sector */
1208 while (hdr->nr_zones && ofst < bvec.bv_len) {
1209 zone = addr + ofst;
1210 zone->start -= part_offset;
1211 if (zone->start >= start + ti->len) {
1212 hdr->nr_zones = 0;
1213 break;
1214 }
1215 zone->start = zone->start + ti->begin - start;
1216 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
1217 if (zone->cond == BLK_ZONE_COND_FULL)
1218 zone->wp = zone->start + zone->len;
1219 else if (zone->cond == BLK_ZONE_COND_EMPTY)
1220 zone->wp = zone->start;
1221 else
1222 zone->wp = zone->wp + ti->begin - start - part_offset;
1223 }
1224 ofst += sizeof(struct blk_zone);
1225 hdr->nr_zones--;
1226 nr_rep++;
1230 for (i = 0; i < nrz; i++) {
1231 zone = zones + i;
1232 if (zone->start >= start + ti->len) {
1233 memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
1234 break;
1227 }
1228
1235 }
1236
1229 if (addr != hdr)
1230 kunmap_atomic(addr);
1237 zone->start = zone->start + ti->begin - start;
1238 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
1239 continue;
1231
1240
1232 if (!hdr->nr_zones)
1233 break;
1241 if (zone->cond == BLK_ZONE_COND_FULL)
1242 zone->wp = zone->start + zone->len;
1243 else if (zone->cond == BLK_ZONE_COND_EMPTY)
1244 zone->wp = zone->start;
1245 else
1246 zone->wp = zone->wp + ti->begin - start;
1234 }
1235
1247 }
1248
1236 if (hdr) {
1237 hdr->nr_zones = nr_rep;
1238 kunmap_atomic(hdr);
1239 }
1240
1241 bio_advance(report_bio, report_bio->bi_iter.bi_size);
1242
1249 *nr_zones = i;
1243#else /* !CONFIG_BLK_DEV_ZONED */
1250#else /* !CONFIG_BLK_DEV_ZONED */
1244 bio->bi_status = BLK_STS_NOTSUPP;
1251 *nr_zones = 0;
1245#endif
1246}
1247EXPORT_SYMBOL_GPL(dm_remap_zone_report);
1248
1249static blk_qc_t __map_bio(struct dm_target_io *tio)
1250{
1251 int r;
1252 sector_t sector;

--- 69 unchanged lines hidden (view full) ---

1322 return -EIO;
1323 }
1324
1325 r = bio_integrity_clone(clone, bio, GFP_NOIO);
1326 if (r < 0)
1327 return r;
1328 }
1329
1252#endif
1253}
1254EXPORT_SYMBOL_GPL(dm_remap_zone_report);
1255
1256static blk_qc_t __map_bio(struct dm_target_io *tio)
1257{
1258 int r;
1259 sector_t sector;

--- 69 unchanged lines hidden (view full) ---

1329 return -EIO;
1330 }
1331
1332 r = bio_integrity_clone(clone, bio, GFP_NOIO);
1333 if (r < 0)
1334 return r;
1335 }
1336
1330 if (bio_op(bio) != REQ_OP_ZONE_REPORT)
1331 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1337 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1332 clone->bi_iter.bi_size = to_bytes(len);
1333
1334 if (unlikely(bio_integrity(bio) != NULL))
1335 bio_integrity_trim(clone);
1336
1337 return 0;
1338}
1339

--- 196 unchanged lines hidden (view full) ---

1536 return true;
1537}
1538
1539/*
1540 * Select the correct strategy for processing a non-flush bio.
1541 */
1542static int __split_and_process_non_flush(struct clone_info *ci)
1543{
1338 clone->bi_iter.bi_size = to_bytes(len);
1339
1340 if (unlikely(bio_integrity(bio) != NULL))
1341 bio_integrity_trim(clone);
1342
1343 return 0;
1344}
1345

--- 196 unchanged lines hidden (view full) ---

1542 return true;
1543}
1544
1545/*
1546 * Select the correct strategy for processing a non-flush bio.
1547 */
1548static int __split_and_process_non_flush(struct clone_info *ci)
1549{
1544 struct bio *bio = ci->bio;
1545 struct dm_target *ti;
1546 unsigned len;
1547 int r;
1548
1549 ti = dm_table_find_target(ci->map, ci->sector);
1550 if (!dm_target_is_valid(ti))
1551 return -EIO;
1552
1553 if (unlikely(__process_abnormal_io(ci, ti, &r)))
1554 return r;
1555
1550 struct dm_target *ti;
1551 unsigned len;
1552 int r;
1553
1554 ti = dm_table_find_target(ci->map, ci->sector);
1555 if (!dm_target_is_valid(ti))
1556 return -EIO;
1557
1558 if (unlikely(__process_abnormal_io(ci, ti, &r)))
1559 return r;
1560
1556 if (bio_op(bio) == REQ_OP_ZONE_REPORT)
1557 len = ci->sector_count;
1558 else
1559 len = min_t(sector_t, max_io_len(ci->sector, ti),
1560 ci->sector_count);
1561 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1561
1562 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1563 if (r < 0)
1564 return r;
1565
1566 ci->sector += len;
1567 ci->sector_count -= len;
1568

--- 42 unchanged lines hidden (view full) ---

1611 if (current->bio_list && ci.sector_count && !error) {
1612 /*
1613 * Remainder must be passed to generic_make_request()
1614 * so that it gets handled *after* bios already submitted
1615 * have been completely processed.
1616 * We take a clone of the original to store in
1617 * ci.io->orig_bio to be used by end_io_acct() and
1618 * for dec_pending to use for completion handling.
1562
1563 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1564 if (r < 0)
1565 return r;
1566
1567 ci->sector += len;
1568 ci->sector_count -= len;
1569

--- 42 unchanged lines hidden (view full) ---

1612 if (current->bio_list && ci.sector_count && !error) {
1613 /*
1614 * Remainder must be passed to generic_make_request()
1615 * so that it gets handled *after* bios already submitted
1616 * have been completely processed.
1617 * We take a clone of the original to store in
1618 * ci.io->orig_bio to be used by end_io_acct() and
1619 * for dec_pending to use for completion handling.
1619 * As this path is not used for REQ_OP_ZONE_REPORT,
1620 * the usage of io->orig_bio in dm_remap_zone_report()
1621 * won't be affected by this reassignment.
1622 */
1623 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1624 GFP_NOIO, &md->queue->bio_split);
1625 ci.io->orig_bio = b;
1626 bio_chain(b, bio);
1627 ret = generic_make_request(bio);
1628 break;
1629 }

--- 1532 unchanged lines hidden (view full) ---

3162 .pr_clear = dm_pr_clear,
3163};
3164
3165static const struct block_device_operations dm_blk_dops = {
3166 .open = dm_blk_open,
3167 .release = dm_blk_close,
3168 .ioctl = dm_blk_ioctl,
3169 .getgeo = dm_blk_getgeo,
1620 */
1621 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1622 GFP_NOIO, &md->queue->bio_split);
1623 ci.io->orig_bio = b;
1624 bio_chain(b, bio);
1625 ret = generic_make_request(bio);
1626 break;
1627 }

--- 1532 unchanged lines hidden (view full) ---

3160 .pr_clear = dm_pr_clear,
3161};
3162
3163static const struct block_device_operations dm_blk_dops = {
3164 .open = dm_blk_open,
3165 .release = dm_blk_close,
3166 .ioctl = dm_blk_ioctl,
3167 .getgeo = dm_blk_getgeo,
3168 .report_zones = dm_blk_report_zones,
3170 .pr_ops = &dm_pr_ops,
3171 .owner = THIS_MODULE
3172};
3173
3174static const struct dax_operations dm_dax_ops = {
3175 .direct_access = dm_dax_direct_access,
3176 .copy_from_iter = dm_dax_copy_from_iter,
3177 .copy_to_iter = dm_dax_copy_to_iter,

--- 20 unchanged lines hidden ---
3169 .pr_ops = &dm_pr_ops,
3170 .owner = THIS_MODULE
3171};
3172
3173static const struct dax_operations dm_dax_ops = {
3174 .direct_access = dm_dax_direct_access,
3175 .copy_from_iter = dm_dax_copy_from_iter,
3176 .copy_to_iter = dm_dax_copy_to_iter,

--- 20 unchanged lines hidden ---