/openbmc/linux/block/ |
H A D | genhd.c | 59 void set_capacity(struct gendisk *disk, sector_t sectors) in set_capacity() argument 61 bdev_set_nr_sectors(disk->part0, sectors); in set_capacity() 66 * Set disk capacity and notify if the size is not currently zero and will not 69 bool set_capacity_and_notify(struct gendisk *disk, sector_t size) in set_capacity_and_notify() argument 71 sector_t capacity = get_capacity(disk); in set_capacity_and_notify() 74 set_capacity(disk, size); in set_capacity_and_notify() 82 !disk_live(disk) || in set_capacity_and_notify() 83 (disk->flags & GENHD_FL_HIDDEN)) in set_capacity_and_notify() 87 disk->disk_name, capacity, size); in set_capacity_and_notify() 95 kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp); in set_capacity_and_notify() [all …]
|
H A D | disk-events.c | 3 * Disk events - monitor disk events like media change and eject request. 12 struct gendisk *disk; /* the associated disk */ member 41 static unsigned long disk_events_poll_jiffies(struct gendisk *disk) in disk_events_poll_jiffies() argument 43 struct disk_events *ev = disk->ev; in disk_events_poll_jiffies() 52 else if (disk->event_flags & DISK_EVENT_FLAG_POLL) in disk_events_poll_jiffies() 59 * disk_block_events - block and flush disk event checking 60 * @disk: disk to block events for 73 void disk_block_events(struct gendisk *disk) in disk_block_events() argument 75 struct disk_events *ev = disk->ev; in disk_block_events() 93 cancel_delayed_work_sync(&disk->ev->dwork); in disk_block_events() [all …]
|
H A D | holder.c | 12 struct gendisk *disk) in bd_find_holder_disk() argument 16 list_for_each_entry(holder, &disk->slave_bdevs, list) in bd_find_holder_disk() 33 * bd_link_disk_holder - create symlinks between holding disk and slave bdev 35 * @disk: the holding disk 41 * - from "slaves" directory of the holder @disk to the claimed @bdev 42 * - from "holders" directory of the @bdev to the holder @disk 44 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is 51 * ensure that both @bdev and @disk are valid during the creation and 60 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) in bd_link_disk_holder() argument 65 if (WARN_ON_ONCE(!disk->slave_dir)) in bd_link_disk_holder() [all …]
|
H A D | blk-zoned.c | 60 if (!rq->q->disk->seq_zones_wlock) in blk_req_needs_zone_write_lock() 71 if (test_and_set_bit(zno, rq->q->disk->seq_zones_wlock)) in blk_req_zone_write_trylock() 84 rq->q->disk->seq_zones_wlock))) in __blk_req_zone_write_lock() 95 if (rq->q->disk->seq_zones_wlock) in __blk_req_zone_write_unlock() 97 rq->q->disk->seq_zones_wlock)); in __blk_req_zone_write_unlock() 141 struct gendisk *disk = bdev->bd_disk; in blkdev_report_zones() local 142 sector_t capacity = get_capacity(disk); in blkdev_report_zones() 144 if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones)) in blkdev_report_zones() 150 return disk->fops->report_zones(disk, sector, nr_zones, cb, data); in blkdev_report_zones() 183 struct gendisk *disk = bdev->bd_disk; in blkdev_zone_reset_all_emulated() local [all …]
|
H A D | blk-ia-ranges.c | 104 * @disk: Target disk 106 * Register with sysfs a set of independent access ranges for @disk. 108 int disk_register_independent_access_ranges(struct gendisk *disk) in disk_register_independent_access_ranges() argument 110 struct blk_independent_access_ranges *iars = disk->ia_ranges; in disk_register_independent_access_ranges() 111 struct request_queue *q = disk->queue; in disk_register_independent_access_ranges() 126 &disk->queue_kobj, "%s", in disk_register_independent_access_ranges() 129 disk->ia_ranges = NULL; in disk_register_independent_access_ranges() 152 void disk_unregister_independent_access_ranges(struct gendisk *disk) in disk_unregister_independent_access_ranges() argument 154 struct request_queue *q = disk->queue; in disk_unregister_independent_access_ranges() 155 struct blk_independent_access_ranges *iars = disk->ia_ranges; in disk_unregister_independent_access_ranges() [all …]
|
H A D | blk-sysfs.c | 82 if (!q->disk) in queue_ra_show() 84 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); in queue_ra_show() 94 if (!q->disk) in queue_ra_store() 99 q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); in queue_ra_store() 254 if (q->disk) in queue_max_sectors_store() 255 q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); in queue_max_sectors_store() 324 return queue_var_show(disk_nr_zones(q->disk), page); in queue_nr_zones_show() 329 return queue_var_show(bdev_max_open_zones(q->disk->part0), page); in queue_max_open_zones_show() 334 return queue_var_show(bdev_max_active_zones(q->disk->part0), page); in queue_max_active_zones_show() 586 ret = wbt_init(q->disk); in queue_wb_lat_store() [all …]
|
/openbmc/linux/Documentation/admin-guide/ |
H A D | devices.txt | 20 1 block RAM disk 21 0 = /dev/ram0 First RAM disk 22 1 = /dev/ram1 Second RAM disk 24 250 = /dev/initrd Initial RAM disk 27 /dev/initrd refers to a RAM disk which was preloaded 115 3 block First MFM, RLL and IDE hard disk/CD-ROM interface 116 0 = /dev/hda Master: whole disk (or CD-ROM) 117 64 = /dev/hdb Slave: whole disk (or CD-ROM) 119 For partitions, add to the whole disk device number: 120 0 = /dev/hd? Whole disk [all …]
|
/openbmc/linux/block/partitions/ |
H A D | core.c | 17 * Probe partition formats with tables at disk address 0 32 * disk address 0xdc0. Since these may also have stale 129 state->disk = hd; in check_partition() 296 * Must be called either with open_mutex held, before a disk can be opened or 297 * after all disk users are gone. 299 static struct block_device *add_partition(struct gendisk *disk, int partno, in add_partition() argument 304 struct device *ddev = disk_to_dev(disk); in add_partition() 310 lockdep_assert_held(&disk->open_mutex); in add_partition() 319 switch (disk->queue->limits.zoned) { in add_partition() 322 disk->disk_name); in add_partition() [all …]
|
/openbmc/qemu/docs/ |
H A D | block-replication.txt | 21 executes till the next checkpoint. To support disk contents checkpoint, 22 the modified disk contents in the Secondary VM must be buffered, and are 24 effort during a vmstate checkpoint, the disk modification operations of 25 the Primary disk are asynchronously forwarded to the Secondary node. 38 |---------(1)----------+ | Disk Buffer | 46 | Primary Disk | | Secondary Disk | 51 2) Before Primary write requests are written to Secondary disk, the 52 original sector content will be read from Secondary disk and 53 buffered in the Disk buffer, but it will not overwrite the existing 55 previous COW of "Primary Write Requests") in the Disk buffer. [all …]
|
/openbmc/qemu/tests/qemu-iotests/ |
H A D | 185.out | 12 'arguments': { 'device': 'disk', 20 'qemu-io disk "write 0 4M"' } } 25 'arguments': { 'device': 'disk', 35 'arguments': { 'device': 'disk', 39 …roseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}} 40 …roseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}} 44 …croseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "paused", "id": "disk"}} 45 …roseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}} 46 …croseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "paused", "id": "disk"}} 47 …roseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}} [all …]
|
H A D | 243.out | 7 Disk usage: low 13 Disk usage: low 19 Disk usage: high 25 Disk usage: high 32 qcow2 disk usage: low 33 data disk usage: low 40 qcow2 disk usage: low 41 data disk usage: low 48 qcow2 disk usage: low 49 data disk usage: high [all …]
|
H A D | 298 | 25 disk = os.path.join(iotests.test_dir, 'disk') variable 28 drive_opts = f'node-name=disk,driver={iotests.imgfmt},' \ 30 f'file.file.node-name=file,file.file.filename={disk}' 35 iotests.qemu_img_create('-f', iotests.imgfmt, disk, str(10 * MiB)) 40 check = iotests.qemu_img_check(disk) 45 os.remove(disk) 48 self.assertTrue(os.path.getsize(disk) > 100 * MiB) 51 self.assertTrue(os.path.getsize(disk) < 10 * MiB) 83 self.vm.cmd('blockdev-snapshot-sync', node_name='disk', 100 'node-name': 'disk', [all …]
|
/openbmc/linux/Documentation/ABI/testing/ |
H A D | sysfs-fs-f2fs | 1 What: /sys/fs/f2fs/<disk>/gc_max_sleep_time 7 What: /sys/fs/f2fs/<disk>/gc_min_sleep_time 13 What: /sys/fs/f2fs/<disk>/gc_no_gc_sleep_time 19 What: /sys/fs/f2fs/<disk>/gc_idle 31 What: /sys/fs/f2fs/<disk>/reclaim_segments 41 What: /sys/fs/f2fs/<disk>/main_blkaddr 46 What: /sys/fs/f2fs/<disk>/ipu_policy 72 What: /sys/fs/f2fs/<disk>/min_ipu_util 78 What: /sys/fs/f2fs/<disk>/min_fsync_blocks 84 What: /sys/fs/f2fs/<disk>/min_seq_blocks [all …]
|
H A D | sysfs-block-bcache | 1 What: /sys/block/<disk>/bcache/unregister 11 What: /sys/block/<disk>/bcache/clear_stats 17 What: /sys/block/<disk>/bcache/cache 24 What: /sys/block/<disk>/bcache/cache_hits 31 What: /sys/block/<disk>/bcache/cache_misses 37 What: /sys/block/<disk>/bcache/cache_hit_ratio 43 What: /sys/block/<disk>/bcache/sequential_cutoff 51 What: /sys/block/<disk>/bcache/bypassed 59 What: /sys/block/<disk>/bcache/writeback 68 What: /sys/block/<disk>/bcache/writeback_running [all …]
|
H A D | sysfs-fs-ext4 | 1 What: /sys/fs/ext4/<disk>/mb_stats 10 What: /sys/fs/ext4/<disk>/mb_group_prealloc 18 What: /sys/fs/ext4/<disk>/mb_max_to_scan 25 What: /sys/fs/ext4/<disk>/mb_min_to_scan 32 What: /sys/fs/ext4/<disk>/mb_order2_req 40 What: /sys/fs/ext4/<disk>/mb_stream_req 51 What: /sys/fs/ext4/<disk>/inode_readahead_blks 59 What: /sys/fs/ext4/<disk>/delayed_allocation_blocks 67 What: /sys/fs/ext4/<disk>/lifetime_write_kbytes 75 What: /sys/fs/ext4/<disk>/session_write_kbytes [all …]
|
/openbmc/linux/Documentation/ABI/stable/ |
H A D | sysfs-block | 1 What: /sys/block/<disk>/alignment_offset 10 offset from the disk's natural alignment. 13 What: /sys/block/<disk>/discard_alignment 25 What: /sys/block/<disk>/diskseq 29 The /sys/block/<disk>/diskseq files reports the disk 37 What: /sys/block/<disk>/inflight 49 This is related to /sys/block/<disk>/queue/nr_requests 53 What: /sys/block/<disk>/integrity/device_is_integrity_capable 61 What: /sys/block/<disk>/integrity/format 69 What: /sys/block/<disk>/integrity/protection_interval_bytes [all …]
|
/openbmc/linux/Documentation/admin-guide/blockdev/ |
H A D | ramdisk.rst | 2 Using the RAM disk block device with Linux 10 4) An Example of Creating a Compressed RAM Disk 16 The RAM disk driver is a way to use main system memory as a block device. It 22 The RAM disk dynamically grows as more space is required. It does this by using 26 The RAM disk supports up to 16 RAM disks by default, and can be reconfigured 31 To use RAM disk support with your system, run './MAKEDEV ram' from the /dev 35 The new RAM disk also has the ability to load compressed RAM disk images, 37 rescue floppy disk. 48 This parameter tells the RAM disk driver to set up RAM disks of N k size. The 80 If you make a boot disk that has LILO, then for the above, you would use:: [all …]
|
/openbmc/qemu/docs/system/ |
H A D | images.rst | 3 Disk Images 6 QEMU supports many disk image formats, including growable disk images 8 encrypted disk images. 12 Quick start for disk image creation 15 You can create a disk image with the command:: 19 where myimage.img is the disk image filename and mysize is its size in 30 If you use the option ``-snapshot``, all disk images are considered as 33 disk images by using the ``commit`` monitor command (or C-a s in the 44 writable block device using the ``qcow2`` disk image format. Normally 64 ``info snapshots``) and a snapshot of every writable disk image. The VM [all …]
|
/openbmc/linux/drivers/md/ |
H A D | dm-zone.c | 25 struct gendisk *disk = md->disk; in dm_blk_do_report_zones() local 46 args.next_sector < get_capacity(disk)); in dm_blk_do_report_zones() 56 int dm_blk_report_zones(struct gendisk *disk, sector_t sector, in dm_blk_report_zones() argument 59 struct mapped_device *md = disk->private_data; in dm_blk_report_zones() 143 if (md->disk) { in dm_cleanup_zoned_dev() 144 bitmap_free(md->disk->conv_zones_bitmap); in dm_cleanup_zoned_dev() 145 md->disk->conv_zones_bitmap = NULL; in dm_cleanup_zoned_dev() 146 bitmap_free(md->disk->seq_zones_wlock); in dm_cleanup_zoned_dev() 147 md->disk->seq_zones_wlock = NULL; in dm_cleanup_zoned_dev() 181 struct gendisk *disk = md->disk; in dm_zone_revalidate_cb() local [all …]
|
/openbmc/linux/Documentation/driver-api/md/ |
H A D | raid5-cache.rst | 5 Raid 4/5/6 could include an extra disk for data cache besides normal RAID 6 disks. The role of RAID disks isn't changed with the cache disk. The cache disk 19 In both modes, all writes to the array will hit cache disk first. This means 20 the cache disk must be fast and sustainable. 34 The write-through cache will cache all data on cache disk first. After the data 35 is safe on the cache disk, the data will be flushed onto RAID disks. The 40 filesystems) after the data is safe on RAID disks, so cache disk failure 41 doesn't cause data loss. Of course cache disk failure means the array is 44 In write-through mode, the cache disk isn't required to be big. Several 51 cached on cache disk. But the main goal of 'write-back' cache is to speed up [all …]
|
/openbmc/linux/include/linux/ |
H A D | blkdev.h | 212 static inline bool disk_live(struct gendisk *disk) in disk_live() argument 214 return !inode_unhashed(disk->part0->bd_inode); in disk_live() 218 * disk_openers - returns how many openers are there for a disk 219 * @disk: disk to check 221 * This returns the number of openers for a disk. Note that this value is only 222 * stable if disk->open_mutex is held. 227 static inline unsigned int disk_openers(struct gendisk *disk) in disk_openers() argument 229 return atomic_read(&disk->part0->bd_openers); in disk_openers() 233 * disk_has_partscan - return %true if partition scanning is enabled on a disk 234 * @disk: disk to check [all …]
|
/openbmc/openbmc/poky/meta/recipes-core/util-linux/util-linux/ |
H A D | libfdisk-cfdisk-and-sfdisk-sector-size-improvements.patch | 54 disk-utils/fdisk.8.adoc | 2 +- 57 diff --git a/disk-utils/fdisk.8.adoc b/disk-utils/fdisk.8.adoc 59 --- a/disk-utils/fdisk.8.adoc 60 +++ b/disk-utils/fdisk.8.adoc 61 @@ -40,7 +40,7 @@ Note that *partx*(8) provides a rich interface for scripts to print disk layouts 65 -Specify the sector size of the disk. Valid values are 512, 1024, 2048, and 4096. (Recent kernels k… 66 …disk. Valid values are 512, 1024, 2048, and 4096. The kernel is aware of the sector size for regul… 69 …Don't erase the beginning of the first disk sector when creating a new disk label. This feature is… 77 * add ability to work with disk images where libfdisk defaults to 512 81 disk-utils/cfdisk.8.adoc | 3 +++ [all …]
|
/openbmc/linux/drivers/block/ |
H A D | n64cart.c | 117 struct gendisk *disk; in n64cart_probe() local 134 disk = blk_alloc_disk(NUMA_NO_NODE); in n64cart_probe() 135 if (!disk) in n64cart_probe() 138 disk->first_minor = 0; in n64cart_probe() 139 disk->flags = GENHD_FL_NO_PART; in n64cart_probe() 140 disk->fops = &n64cart_fops; in n64cart_probe() 141 disk->private_data = &pdev->dev; in n64cart_probe() 142 strcpy(disk->disk_name, "n64cart"); in n64cart_probe() 144 set_capacity(disk, size >> SECTOR_SHIFT); in n64cart_probe() 145 set_disk_ro(disk, 1); in n64cart_probe() [all …]
|
/openbmc/linux/Documentation/power/ |
H A D | swsusp.rst | 11 If you touch anything on disk between suspend and resume... 20 problems. If your disk driver does not support suspend... (IDE does), 45 echo shutdown > /sys/power/disk; echo disk > /sys/power/state 49 echo platform > /sys/power/disk; echo disk > /sys/power/state 54 echo suspend > /sys/power/disk; echo disk > /sys/power/state 57 support. For suspend and resume to work, make sure your disk drivers 59 suspend/resume with modular disk drivers, see FAQ, but you probably 123 echo 4 > /proc/acpi/sleep # for suspend to disk 128 echo 4b > /proc/acpi/sleep # for suspend to disk via s4bios 139 bringing machine down? Suspend to disk, rearrange power cables, [all …]
|
/openbmc/linux/drivers/block/null_blk/ |
H A D | trace.h | 24 static inline void __assign_disk_name(char *name, struct gendisk *disk) in __assign_disk_name() argument 26 if (disk) in __assign_disk_name() 27 memcpy(name, disk->disk_name, DISK_NAME_LEN); in __assign_disk_name() 38 __array(char, disk, DISK_NAME_LEN) 47 __assign_disk_name(__entry->disk, cmd->rq->q->disk); 50 __print_disk_name(__entry->disk), 60 __array(char, disk, DISK_NAME_LEN) 65 __assign_disk_name(__entry->disk, nullb->disk); 68 __print_disk_name(__entry->disk), __entry->nr_zones)
|