/openbmc/qemu/hw/block/ |
H A D | xen_blkif.h | 36 uint64_t nr_sectors; /* # of contiguous sectors to discard */ member 62 uint64_t nr_sectors; /* # of contiguous sectors to discard */ member 108 d->nr_sectors = s->nr_sectors; in blkif_get_x86_32_req() 134 d->nr_sectors = s->nr_sectors; in blkif_get_x86_64_req()
|
/openbmc/linux/block/ |
H A D | blk-ia-ranges.c | 25 return sprintf(buf, "%llu\n", iar->nr_sectors); in blk_ia_range_nr_sectors_show() 186 sector < iar->sector + iar->nr_sectors) in disk_find_ia_range() 219 swap(iar->nr_sectors, tmp->nr_sectors); in disk_check_ia_ranges() 222 sector += iar->nr_sectors; in disk_check_ia_ranges() 247 new->ia_range[i].nr_sectors != old->ia_range[i].nr_sectors) in disk_ia_ranges_changed()
|
H A D | blk-zoned.c | 251 sector_t sector, sector_t nr_sectors, gfp_t gfp_mask) in blkdev_zone_mgmt() argument 256 sector_t end_sector = sector + nr_sectors; in blkdev_zone_mgmt() 277 if (!bdev_is_zone_start(bdev, nr_sectors) && end_sector != capacity) in blkdev_zone_mgmt() 286 if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) { in blkdev_zone_mgmt() 364 if (zrange->sector + zrange->nr_sectors <= zrange->sector || in blkdev_truncate_zone_range() 365 zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk)) in blkdev_truncate_zone_range() 370 end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1; in blkdev_truncate_zone_range() 422 ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, in blkdev_zone_mgmt_ioctl()
|
H A D | blk-core.c | 537 unsigned int nr_sectors = bio_sectors(bio); in bio_check_eod() local 539 if (nr_sectors && in bio_check_eod() 540 (nr_sectors > maxsector || in bio_check_eod() 541 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { in bio_check_eod() 545 bio->bi_iter.bi_sector, nr_sectors, maxsector); in bio_check_eod() 576 int nr_sectors = bio_sectors(bio); in blk_check_zone_append() local 592 if (nr_sectors > q->limits.chunk_sectors) in blk_check_zone_append() 596 if (nr_sectors > q->limits.max_zone_append_sectors) in blk_check_zone_append()
|
/openbmc/linux/drivers/md/bcache/ |
H A D | writeback.h | 80 unsigned int nr_sectors) in bcache_dev_stripe_dirty() argument 91 if (nr_sectors <= dc->disk.stripe_size) in bcache_dev_stripe_dirty() 94 nr_sectors -= dc->disk.stripe_size; in bcache_dev_stripe_dirty() 149 uint64_t offset, int nr_sectors);
|
H A D | writeback.c | 597 uint64_t offset, int nr_sectors) in bcache_dev_sectors_dirty_add() argument 611 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); in bcache_dev_sectors_dirty_add() 615 while (nr_sectors) { in bcache_dev_sectors_dirty_add() 616 int s = min_t(unsigned int, abs(nr_sectors), in bcache_dev_sectors_dirty_add() 619 if (nr_sectors < 0) in bcache_dev_sectors_dirty_add() 635 nr_sectors -= s; in bcache_dev_sectors_dirty_add()
|
/openbmc/linux/block/partitions/ |
H A D | ibm.c | 201 sector_t nr_sectors, in find_lnx1_partitions() argument 223 size = nr_sectors; in find_lnx1_partitions() 297 sector_t nr_sectors; in ibm_partition() local 312 nr_sectors = bdev_nr_sectors(bdev); in ibm_partition() 313 if (nr_sectors == 0) in ibm_partition() 340 label, labelsect, nr_sectors, in ibm_partition() 357 size = nr_sectors; in ibm_partition()
|
/openbmc/linux/drivers/block/null_blk/ |
H A D | null_blk.h | 145 sector_t nr_sectors); 147 sector_t sector, unsigned int nr_sectors); 156 sector_t sector, sector_t nr_sectors); 174 enum req_op op, sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd() argument
|
H A D | zoned.c | 246 unsigned int nr_sectors = len >> SECTOR_SHIFT; in null_zone_valid_read_len() local 250 sector + nr_sectors <= zone->wp) in null_zone_valid_read_len() 375 unsigned int nr_sectors, bool append) in null_zone_write() argument 387 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); in null_zone_write() 417 if (zone->wp + nr_sectors > zone->start + zone->capacity) { in null_zone_write() 444 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); in null_zone_write() 448 zone->wp += nr_sectors; in null_zone_write() 673 sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd() argument 681 return null_zone_write(cmd, sector, nr_sectors, false); in null_process_zoned_cmd() 683 return null_zone_write(cmd, sector, nr_sectors, true); in null_process_zoned_cmd() [all …]
|
H A D | main.c | 1219 sector_t sector, sector_t nr_sectors) in null_handle_discard() argument 1222 size_t n = nr_sectors << SECTOR_SHIFT; in null_handle_discard() 1382 sector_t nr_sectors) in null_handle_badblocks() argument 1388 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors)) in null_handle_badblocks() 1397 sector_t nr_sectors) in null_handle_memory_backed() argument 1403 return null_handle_discard(dev, sector, nr_sectors); in null_handle_memory_backed() 1466 sector_t sector, unsigned int nr_sectors) in null_process_cmd() argument 1472 ret = null_handle_badblocks(cmd, sector, nr_sectors); in null_process_cmd() 1478 return null_handle_memory_backed(cmd, op, sector, nr_sectors); in null_process_cmd() 1484 sector_t nr_sectors, enum req_op op) in null_handle_cmd() argument [all …]
|
/openbmc/linux/fs/btrfs/ |
H A D | scrub.c | 122 u16 nr_sectors; member 257 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; in init_scrub_stripe() 269 stripe->sectors = kcalloc(stripe->nr_sectors, in init_scrub_stripe() 700 ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors); in scrub_verify_one_sector() 720 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) { in scrub_verify_one_sector() 758 for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) { in scrub_verify_one_stripe() 769 for (i = 0; i < stripe->nr_sectors; i++) { in calc_sector_number() 774 ASSERT(i < stripe->nr_sectors); in calc_sector_number() 793 ASSERT(sector_nr < stripe->nr_sectors); in scrub_repair_read_endio() 829 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) { in scrub_stripe_submit_repair_read() [all …]
|
H A D | raid56.c | 176 for (i = 0; i < rbio->nr_sectors; i++) { in cache_rbio_pages() 247 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) { in index_stripe_sectors() 882 ASSERT(index >= 0 && index < rbio->nr_sectors); in sector_in_rbio() 949 rbio->nr_sectors = num_sectors; in alloc_rbio() 1244 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in rmw_assemble_write_bios() 1250 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_assemble_write_bios() 1285 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_assemble_write_bios() 1376 for (i = 0; i < rbio->nr_sectors; i++) { in find_stripe_sector() 1416 for (i = 0; i < rbio->nr_sectors; i++) { in get_bio_sector_nr() 1426 ASSERT(i < rbio->nr_sectors); in get_bio_sector_nr() [all …]
|
H A D | raid56.h | 62 u16 nr_sectors; member
|
H A D | zoned.c | 367 sector_t nr_sectors; in btrfs_get_dev_zone_info() local 424 nr_sectors = bdev_nr_sectors(bdev); in btrfs_get_dev_zone_info() 426 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors); in btrfs_get_dev_zone_info() 427 if (!IS_ALIGNED(nr_sectors, zone_sectors)) in btrfs_get_dev_zone_info() 484 while (sector < nr_sectors) { in btrfs_get_dev_zone_info() 883 sector_t nr_sectors; in btrfs_sb_log_location_bdev() local 897 nr_sectors = bdev_nr_sectors(bdev); in btrfs_sb_log_location_bdev() 898 nr_zones = nr_sectors >> zone_sectors_shift; in btrfs_sb_log_location_bdev() 1014 sector_t nr_sectors; in btrfs_reset_sb_log_zones() local 1021 nr_sectors = bdev_nr_sectors(bdev); in btrfs_reset_sb_log_zones() [all …]
|
/openbmc/linux/drivers/md/ |
H A D | dm-zone.c | 359 unsigned int nr_sectors; member 425 unsigned int nr_sectors) in dm_zone_map_bio_end() argument 444 WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors); in dm_zone_map_bio_end() 451 if (nr_sectors != orig_bio_details->nr_sectors) { in dm_zone_map_bio_end() 455 WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors); in dm_zone_map_bio_end() 534 orig_bio_details.nr_sectors = bio_sectors(clone); in dm_zone_map_bio()
|
H A D | dm-log-writes.c | 98 __le64 nr_sectors; member 127 sector_t nr_sectors; member 327 entry.nr_sectors = cpu_to_le64(block->nr_sectors); in log_one_block() 451 lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors); in log_writes_kthread() 704 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); in log_writes_map()
|
H A D | md.h | 609 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) in md_sync_acct() argument 611 atomic_add(nr_sectors, &bdev->bd_disk->sync_io); in md_sync_acct() 614 static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) in md_sync_acct_bio() argument 616 md_sync_acct(bio->bi_bdev, nr_sectors); in md_sync_acct_bio()
|
H A D | dm-zoned-target.c | 631 unsigned int nr_sectors = bio_sectors(bio); in dmz_map() local 640 bio_op(bio), (unsigned long long)sector, nr_sectors, in dmz_map() 645 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) in dmz_map() 649 if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK)) in dmz_map() 659 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { in dmz_map() 669 if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd)) in dmz_map()
|
/openbmc/linux/fs/zonefs/ |
H A D | trace.h | 31 __field(sector_t, nr_sectors) 39 __entry->nr_sectors = z->z_size >> SECTOR_SHIFT; 44 __entry->nr_sectors
|
/openbmc/qemu/migration/ |
H A D | block-dirty-bitmap.c | 425 uint64_t start_sector, uint32_t nr_sectors) in send_bitmap_bits() argument 432 (uint64_t)nr_sectors << BDRV_SECTOR_BITS); in send_bitmap_bits() 439 (uint64_t)nr_sectors << BDRV_SECTOR_BITS); in send_bitmap_bits() 447 trace_send_bitmap_bits(flags, start_sector, nr_sectors, buf_size); in send_bitmap_bits() 452 qemu_put_be32(f, nr_sectors); in send_bitmap_bits() 694 uint32_t nr_sectors = MIN(dbms->total_sectors - dbms->cur_sector, in bulk_phase_send_chunk() local 697 send_bitmap_bits(f, s, dbms, dbms->cur_sector, nr_sectors); in bulk_phase_send_chunk() 699 dbms->cur_sector += nr_sectors; in bulk_phase_send_chunk()
|
/openbmc/u-boot/common/spl/ |
H A D | spl_fit.c | 181 int nr_sectors; in spl_load_fit_image() local 221 nr_sectors = get_aligned_image_size(info, length, offset); in spl_load_fit_image() 225 nr_sectors, (void *)load_ptr) != nr_sectors) in spl_load_fit_image()
|
/openbmc/linux/include/uapi/linux/ |
H A D | blkzoned.h | 145 __u64 nr_sectors; member
|
/openbmc/linux/drivers/block/drbd/ |
H A D | drbd_actlog.c | 854 sector_t esector, nr_sectors; in __drbd_change_sync() local 870 nr_sectors = get_capacity(device->vdisk); in __drbd_change_sync() 873 if (!expect(device, sector < nr_sectors)) in __drbd_change_sync() 875 if (!expect(device, esector < nr_sectors)) in __drbd_change_sync() 876 esector = nr_sectors - 1; in __drbd_change_sync() 878 lbnr = BM_SECT_TO_BIT(nr_sectors-1); in __drbd_change_sync() 885 if (unlikely(esector == (nr_sectors-1))) in __drbd_change_sync()
|
/openbmc/linux/drivers/block/xen-blkback/ |
H A D | common.h | 93 uint64_t nr_sectors; member 147 uint64_t nr_sectors; member
|
/openbmc/qemu/include/hw/xen/interface/io/ |
H A D | blkif.h | 659 uint64_t nr_sectors; /* number of contiguous sectors to discard*/ member
|