dm-table.c (c6380ecd8e9bee7aba3d9a5a94b58168244c4a61) | dm-table.c (8b904b5b6b58b9a29dcf3f82d936d9e7fd69fda6) |
---|---|
1/* 2 * Copyright (C) 2001 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm-core.h" --- 1847 unchanged lines hidden (view full) --- 1856 bool wc = false, fua = false; 1857 1858 /* 1859 * Copy table's limits to the DM device's request_queue 1860 */ 1861 q->limits = *limits; 1862 1863 if (!dm_table_supports_discards(t)) { | 1/* 2 * Copyright (C) 2001 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm-core.h" --- 1847 unchanged lines hidden (view full) --- 1856 bool wc = false, fua = false; 1857 1858 /* 1859 * Copy table's limits to the DM device's request_queue 1860 */ 1861 q->limits = *limits; 1862 1863 if (!dm_table_supports_discards(t)) { |
1864 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); | 1864 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); |
1865 /* Must also clear discard limits... */ 1866 q->limits.max_discard_sectors = 0; 1867 q->limits.max_hw_discard_sectors = 0; 1868 q->limits.discard_granularity = 0; 1869 q->limits.discard_alignment = 0; 1870 q->limits.discard_misaligned = 0; 1871 } else | 1865 /* Must also clear discard limits... */ 1866 q->limits.max_discard_sectors = 0; 1867 q->limits.max_hw_discard_sectors = 0; 1868 q->limits.discard_granularity = 0; 1869 q->limits.discard_alignment = 0; 1870 q->limits.discard_misaligned = 0; 1871 } else |
1872 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | 1872 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); |
1873 1874 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { 1875 wc = true; 1876 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) 1877 fua = true; 1878 } 1879 blk_queue_write_cache(q, wc, fua); 1880 1881 if (dm_table_supports_dax(t)) | 1873 1874 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { 1875 wc = true; 1876 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) 1877 fua = true; 1878 } 1879 blk_queue_write_cache(q, wc, fua); 1880 1881 if (dm_table_supports_dax(t)) |
1882 queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); | 1882 blk_queue_flag_set(QUEUE_FLAG_DAX, q); |
1883 if (dm_table_supports_dax_write_cache(t)) 1884 dax_write_cache(t->md->dax_dev, true); 1885 1886 /* Ensure that all underlying devices are non-rotational. */ 1887 if (dm_table_all_devices_attribute(t, device_is_nonrot)) | 1883 if (dm_table_supports_dax_write_cache(t)) 1884 dax_write_cache(t->md->dax_dev, true); 1885 1886 /* Ensure that all underlying devices are non-rotational. */ 1887 if (dm_table_all_devices_attribute(t, device_is_nonrot)) |
1888 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | 1888 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
1889 else | 1889 else |
1890 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); | 1890 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); |
1891 1892 if (!dm_table_supports_write_same(t)) 1893 q->limits.max_write_same_sectors = 0; 1894 if (!dm_table_supports_write_zeroes(t)) 1895 q->limits.max_write_zeroes_sectors = 0; 1896 1897 if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) | 1891 1892 if (!dm_table_supports_write_same(t)) 1893 q->limits.max_write_same_sectors = 0; 1894 if (!dm_table_supports_write_zeroes(t)) 1895 q->limits.max_write_zeroes_sectors = 0; 1896 1897 if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) |
1898 queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); | 1898 blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q); |
1899 else | 1899 else |
1900 queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); | 1900 blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q); |
1901 1902 dm_table_verify_integrity(t); 1903 1904 /* 1905 * Determine whether or not this queue's I/O timings contribute 1906 * to the entropy pool, Only request-based targets use this. 1907 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not 1908 * have it set. 1909 */ 1910 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) | 1901 1902 dm_table_verify_integrity(t); 1903 1904 /* 1905 * Determine whether or not this queue's I/O timings contribute 1906 * to the entropy pool, Only request-based targets use this. 1907 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not 1908 * have it set. 1909 */ 1910 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) |
1911 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); | 1911 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); |
1912} 1913 1914unsigned int dm_table_get_num_targets(struct dm_table *t) 1915{ 1916 return t->num_targets; 1917} 1918 1919struct list_head *dm_table_get_devices(struct dm_table *t) --- 157 unchanged lines hidden --- | 1912} 1913 1914unsigned int dm_table_get_num_targets(struct dm_table *t) 1915{ 1916 return t->num_targets; 1917} 1918 1919struct list_head *dm_table_get_devices(struct dm_table *t) --- 157 unchanged lines hidden --- |