bio.c (52b544bd386688177c41d53e748111c29d0ccc98) bio.c (ddcf35d397976421a4ec1d0d00fbcc027a8cb034)
1/*
2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,

--- 14 unchanged lines hidden (view full) ---

23#include <linux/iocontext.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/export.h>
28#include <linux/mempool.h>
29#include <linux/workqueue.h>
30#include <linux/cgroup.h>
1/*
2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,

--- 14 unchanged lines hidden (view full) ---

23#include <linux/iocontext.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/export.h>
28#include <linux/mempool.h>
29#include <linux/workqueue.h>
30#include <linux/cgroup.h>
31#include <linux/blk-cgroup.h>
31
32#include <trace/events/block.h>
33#include "blk.h"
32
33#include <trace/events/block.h>
34#include "blk.h"
35#include "blk-rq-qos.h"
34
35/*
36 * Test patch to inline a certain number of bi_io_vec's inside the bio
37 * itself, to shrink a bio data allocation from two mempool calls to one
38 */
39#define BIO_INLINE_VECS 4
40
41/*

--- 1679 unchanged lines hidden (view full) ---

1721 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1722 schedule_work(&bio_dirty_work);
1723 } else {
1724 bio_put(bio);
1725 }
1726}
1727EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1728
36
37/*
38 * Test patch to inline a certain number of bi_io_vec's inside the bio
39 * itself, to shrink a bio data allocation from two mempool calls to one
40 */
41#define BIO_INLINE_VECS 4
42
43/*

--- 1679 unchanged lines hidden (view full) ---

1723 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1724 schedule_work(&bio_dirty_work);
1725 } else {
1726 bio_put(bio);
1727 }
1728}
1729EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1730
1729void generic_start_io_acct(struct request_queue *q, int rw,
1731void generic_start_io_acct(struct request_queue *q, int op,
1730 unsigned long sectors, struct hd_struct *part)
1731{
1732 unsigned long sectors, struct hd_struct *part)
1733{
1734 const int sgrp = op_stat_group(op);
1732 int cpu = part_stat_lock();
1733
1734 part_round_stats(q, cpu, part);
1735 int cpu = part_stat_lock();
1736
1737 part_round_stats(q, cpu, part);
1735 part_stat_inc(cpu, part, ios[rw]);
1736 part_stat_add(cpu, part, sectors[rw], sectors);
1737 part_inc_in_flight(q, part, rw);
1738 part_stat_inc(cpu, part, ios[sgrp]);
1739 part_stat_add(cpu, part, sectors[sgrp], sectors);
1740 part_inc_in_flight(q, part, op_is_write(op));
1738
1739 part_stat_unlock();
1740}
1741EXPORT_SYMBOL(generic_start_io_acct);
1742
1741
1742 part_stat_unlock();
1743}
1744EXPORT_SYMBOL(generic_start_io_acct);
1745
1743void generic_end_io_acct(struct request_queue *q, int rw,
1746void generic_end_io_acct(struct request_queue *q, int req_op,
1744 struct hd_struct *part, unsigned long start_time)
1745{
1746 unsigned long duration = jiffies - start_time;
1747 struct hd_struct *part, unsigned long start_time)
1748{
1749 unsigned long duration = jiffies - start_time;
1750 const int sgrp = op_stat_group(req_op);
1747 int cpu = part_stat_lock();
1748
1751 int cpu = part_stat_lock();
1752
1749 part_stat_add(cpu, part, ticks[rw], duration);
1753 part_stat_add(cpu, part, ticks[sgrp], duration);
1750 part_round_stats(q, cpu, part);
1754 part_round_stats(q, cpu, part);
1751 part_dec_in_flight(q, part, rw);
1755 part_dec_in_flight(q, part, op_is_write(req_op));
1752
1753 part_stat_unlock();
1754}
1755EXPORT_SYMBOL(generic_end_io_acct);
1756
1757#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1758void bio_flush_dcache_pages(struct bio *bi)
1759{

--- 42 unchanged lines hidden (view full) ---

1802void bio_endio(struct bio *bio)
1803{
1804again:
1805 if (!bio_remaining_done(bio))
1806 return;
1807 if (!bio_integrity_endio(bio))
1808 return;
1809
1756
1757 part_stat_unlock();
1758}
1759EXPORT_SYMBOL(generic_end_io_acct);
1760
1761#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1762void bio_flush_dcache_pages(struct bio *bi)
1763{

--- 42 unchanged lines hidden (view full) ---

1806void bio_endio(struct bio *bio)
1807{
1808again:
1809 if (!bio_remaining_done(bio))
1810 return;
1811 if (!bio_integrity_endio(bio))
1812 return;
1813
1814 if (bio->bi_disk)
1815 rq_qos_done_bio(bio->bi_disk->queue, bio);
1816
1810 /*
1811 * Need to have a real endio function for chained bios, otherwise
1812 * various corner cases will break (like stacking block devices that
1813 * save/restore bi_end_io) - however, we want to avoid unbounded
1814 * recursion and blowing the stack. Tail call optimization would
1815 * handle this, but compiling with frame pointers also disables
1816 * gcc's sibling call optimization.
1817 */

--- 191 unchanged lines hidden (view full) ---

2009 flags |= BIOSET_NEED_RESCUER;
2010
2011 return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
2012}
2013EXPORT_SYMBOL(bioset_init_from_src);
2014
2015#ifdef CONFIG_BLK_CGROUP
2016
1817 /*
1818 * Need to have a real endio function for chained bios, otherwise
1819 * various corner cases will break (like stacking block devices that
1820 * save/restore bi_end_io) - however, we want to avoid unbounded
1821 * recursion and blowing the stack. Tail call optimization would
1822 * handle this, but compiling with frame pointers also disables
1823 * gcc's sibling call optimization.
1824 */

--- 191 unchanged lines hidden (view full) ---

2016 flags |= BIOSET_NEED_RESCUER;
2017
2018 return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
2019}
2020EXPORT_SYMBOL(bioset_init_from_src);
2021
2022#ifdef CONFIG_BLK_CGROUP
2023
2024#ifdef CONFIG_MEMCG
2017/**
2025/**
2026 * bio_associate_blkcg_from_page - associate a bio with the page's blkcg
2027 * @bio: target bio
2028 * @page: the page to lookup the blkcg from
2029 *
2030 * Associate @bio with the blkcg from @page's owning memcg. This works like
2031 * every other associate function wrt references.
2032 */
2033int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
2034{
2035 struct cgroup_subsys_state *blkcg_css;
2036
2037 if (unlikely(bio->bi_css))
2038 return -EBUSY;
2039 if (!page->mem_cgroup)
2040 return 0;
2041 blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
2042 &io_cgrp_subsys);
2043 bio->bi_css = blkcg_css;
2044 return 0;
2045}
2046#endif /* CONFIG_MEMCG */
2047
2048/**
2018 * bio_associate_blkcg - associate a bio with the specified blkcg
2019 * @bio: target bio
2020 * @blkcg_css: css of the blkcg to associate
2021 *
2022 * Associate @bio with the blkcg specified by @blkcg_css. Block layer will
2023 * treat @bio as if it were issued by a task which belongs to the blkcg.
2024 *
2025 * This function takes an extra reference of @blkcg_css which will be put

--- 6 unchanged lines hidden (view full) ---

2032 return -EBUSY;
2033 css_get(blkcg_css);
2034 bio->bi_css = blkcg_css;
2035 return 0;
2036}
2037EXPORT_SYMBOL_GPL(bio_associate_blkcg);
2038
2039/**
2049 * bio_associate_blkcg - associate a bio with the specified blkcg
2050 * @bio: target bio
2051 * @blkcg_css: css of the blkcg to associate
2052 *
2053 * Associate @bio with the blkcg specified by @blkcg_css. Block layer will
2054 * treat @bio as if it were issued by a task which belongs to the blkcg.
2055 *
2056 * This function takes an extra reference of @blkcg_css which will be put

--- 6 unchanged lines hidden (view full) ---

2063 return -EBUSY;
2064 css_get(blkcg_css);
2065 bio->bi_css = blkcg_css;
2066 return 0;
2067}
2068EXPORT_SYMBOL_GPL(bio_associate_blkcg);
2069
2070/**
2071 * bio_associate_blkg - associate a bio with the specified blkg
2072 * @bio: target bio
2073 * @blkg: the blkg to associate
2074 *
2075 * Associate @bio with the blkg specified by @blkg. This is the queue specific
2076 * blkcg information associated with the @bio, a reference will be taken on the
2077 * @blkg and will be freed when the bio is freed.
2078 */
2079int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
2080{
2081 if (unlikely(bio->bi_blkg))
2082 return -EBUSY;
2083 blkg_get(blkg);
2084 bio->bi_blkg = blkg;
2085 return 0;
2086}
2087
2088/**
2040 * bio_disassociate_task - undo bio_associate_current()
2041 * @bio: target bio
2042 */
2043void bio_disassociate_task(struct bio *bio)
2044{
2045 if (bio->bi_ioc) {
2046 put_io_context(bio->bi_ioc);
2047 bio->bi_ioc = NULL;
2048 }
2049 if (bio->bi_css) {
2050 css_put(bio->bi_css);
2051 bio->bi_css = NULL;
2052 }
2089 * bio_disassociate_task - undo bio_associate_current()
2090 * @bio: target bio
2091 */
2092void bio_disassociate_task(struct bio *bio)
2093{
2094 if (bio->bi_ioc) {
2095 put_io_context(bio->bi_ioc);
2096 bio->bi_ioc = NULL;
2097 }
2098 if (bio->bi_css) {
2099 css_put(bio->bi_css);
2100 bio->bi_css = NULL;
2101 }
2102 if (bio->bi_blkg) {
2103 blkg_put(bio->bi_blkg);
2104 bio->bi_blkg = NULL;
2105 }
2053}
2054
2055/**
2056 * bio_clone_blkcg_association - clone blkcg association from src to dst bio
2057 * @dst: destination bio
2058 * @src: source bio
2059 */
2060void bio_clone_blkcg_association(struct bio *dst, struct bio *src)

--- 47 unchanged lines hidden ---
2106}
2107
2108/**
2109 * bio_clone_blkcg_association - clone blkcg association from src to dst bio
2110 * @dst: destination bio
2111 * @src: source bio
2112 */
2113void bio_clone_blkcg_association(struct bio *dst, struct bio *src)

--- 47 unchanged lines hidden ---