Christoph Hellwig | 3dcf60bc | 2019-04-30 14:42:43 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 2 | /* |
| 3 | * Common Block IO controller cgroup interface |
| 4 | * |
| 5 | * Based on ideas and code from CFQ, CFS and BFQ: |
| 6 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
| 7 | * |
| 8 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
| 9 | * Paolo Valente <paolo.valente@unimore.it> |
| 10 | * |
| 11 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> |
| 12 | * Nauman Rafique <nauman@google.com> |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 13 | * |
| 14 | * For policy-specific per-blkcg data: |
| 15 | * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> |
| 16 | * Arianna Avanzini <avanzini.arianna@gmail.com> |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 17 | */ |
| 18 | #include <linux/ioprio.h> |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 19 | #include <linux/kdev_t.h> |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 20 | #include <linux/module.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 21 | #include <linux/sched/signal.h> |
Stephen Rothwell | accee78 | 2009-12-07 19:29:39 +1100 | [diff] [blame] | 22 | #include <linux/err.h> |
Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 23 | #include <linux/blkdev.h> |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 24 | #include <linux/backing-dev.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 25 | #include <linux/slab.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 26 | #include <linux/delay.h> |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 27 | #include <linux/atomic.h> |
Tejun Heo | 36aa9e5 | 2015-08-18 14:55:31 -0700 | [diff] [blame] | 28 | #include <linux/ctype.h> |
Eric W. Biederman | 03248ad | 2022-02-09 12:20:45 -0600 | [diff] [blame] | 29 | #include <linux/resume_user_mode.h> |
Josef Bacik | fd112c7 | 2019-07-09 14:41:29 -0700 | [diff] [blame] | 30 | #include <linux/psi.h> |
Christoph Hellwig | 82d981d | 2021-11-23 19:53:12 +0100 | [diff] [blame] | 31 | #include <linux/part_stat.h> |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 32 | #include "blk.h" |
Ming Lei | 672fdcf | 2022-02-11 18:11:49 +0800 | [diff] [blame] | 33 | #include "blk-cgroup.h" |
Bart Van Assche | 556910e | 2021-06-17 17:44:44 -0700 | [diff] [blame] | 34 | #include "blk-ioprio.h" |
Jens Axboe | a7b36ee | 2021-10-05 09:11:56 -0600 | [diff] [blame] | 35 | #include "blk-throttle.h" |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 36 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 37 | /* |
| 38 | * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. |
| 39 | * blkcg_pol_register_mutex nests outside of it and synchronizes entire |
| 40 | * policy [un]register operations including cgroup file additions / |
| 41 | * removals. Putting cgroup file registration outside blkcg_pol_mutex |
| 42 | * allows grabbing it from cgroup callbacks. |
| 43 | */ |
| 44 | static DEFINE_MUTEX(blkcg_pol_register_mutex); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 45 | static DEFINE_MUTEX(blkcg_pol_mutex); |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 46 | |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 47 | struct blkcg blkcg_root; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 48 | EXPORT_SYMBOL_GPL(blkcg_root); |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 49 | |
Tejun Heo | 496d5e7 | 2015-05-22 17:13:21 -0400 | [diff] [blame] | 50 | struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; |
Tejun Heo | 9b0eb69 | 2019-06-27 13:39:48 -0700 | [diff] [blame] | 51 | EXPORT_SYMBOL_GPL(blkcg_root_css); |
Tejun Heo | 496d5e7 | 2015-05-22 17:13:21 -0400 | [diff] [blame] | 52 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 53 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 54 | |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 55 | static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ |
| 56 | |
Tejun Heo | 07b0fde | 2019-07-16 07:58:31 -0700 | [diff] [blame] | 57 | bool blkcg_debug_stats = false; |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 58 | static struct workqueue_struct *blkcg_punt_bio_wq; |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 59 | |
Yu Kuai | a731763 | 2021-07-07 09:56:49 +0800 | [diff] [blame] | 60 | #define BLKG_DESTROY_BATCH_SIZE 64 |
| 61 | |
Christoph Hellwig | bc5fee9 | 2022-04-20 06:27:20 +0200 | [diff] [blame] | 62 | /** |
| 63 | * blkcg_css - find the current css |
| 64 | * |
| 65 | * Find the css associated with either the kthread or the current task. |
| 66 | * This may return a dying css, so it is up to the caller to use tryget logic |
| 67 | * to confirm it is alive and well. |
| 68 | */ |
| 69 | static struct cgroup_subsys_state *blkcg_css(void) |
| 70 | { |
| 71 | struct cgroup_subsys_state *css; |
| 72 | |
| 73 | css = kthread_blkcg(); |
| 74 | if (css) |
| 75 | return css; |
| 76 | return task_css(current, io_cgrp_id); |
| 77 | } |
| 78 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 79 | static bool blkcg_policy_enabled(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 80 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 81 | { |
| 82 | return pol && test_bit(pol->plid, q->blkcg_pols); |
| 83 | } |
| 84 | |
Ming Lei | d578c77 | 2022-03-23 09:13:08 +0800 | [diff] [blame] | 85 | static void blkg_free_workfn(struct work_struct *work) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 86 | { |
Ming Lei | d578c77 | 2022-03-23 09:13:08 +0800 | [diff] [blame] | 87 | struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, |
| 88 | free_work); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 89 | int i; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 90 | |
Tejun Heo | db61367 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 91 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 92 | if (blkg->pd[i]) |
| 93 | blkcg_policy[i]->pd_free_fn(blkg->pd[i]); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 94 | |
Ming Lei | 0a9a25c | 2022-03-18 21:01:43 +0800 | [diff] [blame] | 95 | if (blkg->q) |
| 96 | blk_put_queue(blkg->q); |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 97 | free_percpu(blkg->iostat_cpu); |
Tejun Heo | ef069b9 | 2019-06-13 15:30:39 -0700 | [diff] [blame] | 98 | percpu_ref_exit(&blkg->refcnt); |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 99 | kfree(blkg); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 100 | } |
| 101 | |
Ming Lei | d578c77 | 2022-03-23 09:13:08 +0800 | [diff] [blame] | 102 | /** |
| 103 | * blkg_free - free a blkg |
| 104 | * @blkg: blkg to free |
| 105 | * |
| 106 | * Free @blkg which may be partially allocated. |
| 107 | */ |
| 108 | static void blkg_free(struct blkcg_gq *blkg) |
| 109 | { |
| 110 | if (!blkg) |
| 111 | return; |
| 112 | |
| 113 | /* |
| 114 | * Both ->pd_free_fn() and request queue's release handler may |
| 115 | * sleep, so free us by scheduling one work func |
| 116 | */ |
| 117 | INIT_WORK(&blkg->free_work, blkg_free_workfn); |
| 118 | schedule_work(&blkg->free_work); |
| 119 | } |
| 120 | |
Dennis Zhou | 7fcf2b0 | 2018-12-05 12:10:38 -0500 | [diff] [blame] | 121 | static void __blkg_release(struct rcu_head *rcu) |
| 122 | { |
| 123 | struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); |
| 124 | |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 125 | WARN_ON(!bio_list_empty(&blkg->async_bios)); |
| 126 | |
Dennis Zhou | 7fcf2b0 | 2018-12-05 12:10:38 -0500 | [diff] [blame] | 127 | /* release the blkcg and parent blkg refs this blkg has been holding */ |
| 128 | css_put(&blkg->blkcg->css); |
| 129 | if (blkg->parent) |
| 130 | blkg_put(blkg->parent); |
Dennis Zhou | 7fcf2b0 | 2018-12-05 12:10:38 -0500 | [diff] [blame] | 131 | blkg_free(blkg); |
| 132 | } |
| 133 | |
| 134 | /* |
| 135 | * A group is RCU protected, but having an rcu lock does not mean that one |
| 136 | * can access all the fields of blkg and assume these are valid. For |
| 137 | * example, don't try to follow throtl_data and request queue links. |
| 138 | * |
| 139 | * Having a reference to blkg under an rcu allows accesses to only values |
| 140 | * local to groups like group stats and group rate limits. |
| 141 | */ |
| 142 | static void blkg_release(struct percpu_ref *ref) |
| 143 | { |
| 144 | struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); |
| 145 | |
| 146 | call_rcu(&blkg->rcu_head, __blkg_release); |
| 147 | } |
| 148 | |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 149 | static void blkg_async_bio_workfn(struct work_struct *work) |
| 150 | { |
| 151 | struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, |
| 152 | async_bio_work); |
| 153 | struct bio_list bios = BIO_EMPTY_LIST; |
| 154 | struct bio *bio; |
Xianting Tian | 192f1c6 | 2020-09-10 14:15:06 +0800 | [diff] [blame] | 155 | struct blk_plug plug; |
| 156 | bool need_plug = false; |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 157 | |
| 158 | /* as long as there are pending bios, @blkg can't go away */ |
| 159 | spin_lock_bh(&blkg->async_bio_lock); |
| 160 | bio_list_merge(&bios, &blkg->async_bios); |
| 161 | bio_list_init(&blkg->async_bios); |
| 162 | spin_unlock_bh(&blkg->async_bio_lock); |
| 163 | |
Xianting Tian | 192f1c6 | 2020-09-10 14:15:06 +0800 | [diff] [blame] | 164 | /* start plug only when bio_list contains at least 2 bios */ |
| 165 | if (bios.head && bios.head->bi_next) { |
| 166 | need_plug = true; |
| 167 | blk_start_plug(&plug); |
| 168 | } |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 169 | while ((bio = bio_list_pop(&bios))) |
| 170 | submit_bio(bio); |
Xianting Tian | 192f1c6 | 2020-09-10 14:15:06 +0800 | [diff] [blame] | 171 | if (need_plug) |
| 172 | blk_finish_plug(&plug); |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 173 | } |
| 174 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 175 | /** |
Christoph Hellwig | bbb1ebe | 2022-04-20 06:27:17 +0200 | [diff] [blame] | 176 | * bio_blkcg_css - return the blkcg CSS associated with a bio |
| 177 | * @bio: target bio |
| 178 | * |
| 179 | * This returns the CSS for the blkcg associated with a bio, or %NULL if not |
| 180 | * associated. Callers are expected to either handle %NULL or know association |
| 181 | * has been done prior to calling this. |
| 182 | */ |
| 183 | struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio) |
| 184 | { |
| 185 | if (!bio || !bio->bi_blkg) |
| 186 | return NULL; |
| 187 | return &bio->bi_blkg->blkcg->css; |
| 188 | } |
| 189 | EXPORT_SYMBOL_GPL(bio_blkcg_css); |
| 190 | |
| 191 | /** |
Christoph Hellwig | 397c9f4 | 2022-04-20 06:27:14 +0200 | [diff] [blame] | 192 | * blkcg_parent - get the parent of a blkcg |
| 193 | * @blkcg: blkcg of interest |
| 194 | * |
| 195 | * Return the parent blkcg of @blkcg. Can be called anytime. |
| 196 | */ |
| 197 | static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) |
| 198 | { |
| 199 | return css_to_blkcg(blkcg->css.parent); |
| 200 | } |
| 201 | |
| 202 | /** |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 203 | * blkg_alloc - allocate a blkg |
| 204 | * @blkcg: block cgroup the new blkg is associated with |
| 205 | * @q: request_queue the new blkg is associated with |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 206 | * @gfp_mask: allocation mask to use |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 207 | * |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 208 | * Allocate a new blkg assocating @blkcg and @q. |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 209 | */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 210 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, |
| 211 | gfp_t gfp_mask) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 212 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 213 | struct blkcg_gq *blkg; |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 214 | int i, cpu; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 215 | |
| 216 | /* alloc and init base part */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 217 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 218 | if (!blkg) |
| 219 | return NULL; |
| 220 | |
Tejun Heo | ef069b9 | 2019-06-13 15:30:39 -0700 | [diff] [blame] | 221 | if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) |
| 222 | goto err_free; |
| 223 | |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 224 | blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); |
| 225 | if (!blkg->iostat_cpu) |
Tejun Heo | 77ea733 | 2015-08-18 14:55:24 -0700 | [diff] [blame] | 226 | goto err_free; |
| 227 | |
Ming Lei | 0a9a25c | 2022-03-18 21:01:43 +0800 | [diff] [blame] | 228 | if (!blk_get_queue(q)) |
| 229 | goto err_free; |
| 230 | |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 231 | blkg->q = q; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 232 | INIT_LIST_HEAD(&blkg->q_node); |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 233 | spin_lock_init(&blkg->async_bio_lock); |
| 234 | bio_list_init(&blkg->async_bios); |
| 235 | INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 236 | blkg->blkcg = blkcg; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 237 | |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 238 | u64_stats_init(&blkg->iostat.sync); |
| 239 | for_each_possible_cpu(cpu) |
| 240 | u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); |
| 241 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 242 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 243 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 244 | struct blkg_policy_data *pd; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 245 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 246 | if (!blkcg_policy_enabled(q, pol)) |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 247 | continue; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 248 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 249 | /* alloc per-policy data and attach it to blkg */ |
Tejun Heo | cf09a8e | 2019-08-28 15:05:51 -0700 | [diff] [blame] | 250 | pd = pol->pd_alloc_fn(gfp_mask, q, blkcg); |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 251 | if (!pd) |
| 252 | goto err_free; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 253 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 254 | blkg->pd[i] = pd; |
| 255 | pd->blkg = blkg; |
Tejun Heo | b276a87 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 256 | pd->plid = i; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 257 | } |
| 258 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 259 | return blkg; |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 260 | |
| 261 | err_free: |
| 262 | blkg_free(blkg); |
| 263 | return NULL; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 264 | } |
| 265 | |
Tejun Heo | 24f2904 | 2015-08-18 14:55:17 -0700 | [diff] [blame] | 266 | struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, |
| 267 | struct request_queue *q, bool update_hint) |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 268 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 269 | struct blkcg_gq *blkg; |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 270 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 271 | /* |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 272 | * Hint didn't match. Look up from the radix tree. Note that the |
| 273 | * hint can only be updated under queue_lock as otherwise @blkg |
| 274 | * could have already been removed from blkg_tree. The caller is |
| 275 | * responsible for grabbing queue_lock if @update_hint. |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 276 | */ |
| 277 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 278 | if (blkg && blkg->q == q) { |
| 279 | if (update_hint) { |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 280 | lockdep_assert_held(&q->queue_lock); |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 281 | rcu_assign_pointer(blkcg->blkg_hint, blkg); |
| 282 | } |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 283 | return blkg; |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 284 | } |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 285 | |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 286 | return NULL; |
| 287 | } |
Tejun Heo | ae11889 | 2015-08-18 14:55:20 -0700 | [diff] [blame] | 288 | EXPORT_SYMBOL_GPL(blkg_lookup_slowpath); |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 289 | |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 290 | /* |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 291 | * If @new_blkg is %NULL, this function tries to allocate a new one as |
| 292 | * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 293 | */ |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 294 | static struct blkcg_gq *blkg_create(struct blkcg *blkcg, |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 295 | struct request_queue *q, |
| 296 | struct blkcg_gq *new_blkg) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 297 | { |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 298 | struct blkcg_gq *blkg; |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 299 | int i, ret; |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 300 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 301 | lockdep_assert_held(&q->queue_lock); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 302 | |
Dennis Zhou | 0273ac3 | 2018-12-11 18:03:08 -0500 | [diff] [blame] | 303 | /* request_queue is dying, do not create/recreate a blkg */ |
| 304 | if (blk_queue_dying(q)) { |
| 305 | ret = -ENODEV; |
| 306 | goto err_free_blkg; |
| 307 | } |
| 308 | |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 309 | /* blkg holds a reference to blkcg */ |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 310 | if (!css_tryget_online(&blkcg->css)) { |
Tejun Heo | 20386ce | 2015-08-18 14:55:28 -0700 | [diff] [blame] | 311 | ret = -ENODEV; |
Tejun Heo | 93e6d5d | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 312 | goto err_free_blkg; |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 313 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 314 | |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 315 | /* allocate */ |
| 316 | if (!new_blkg) { |
| 317 | new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); |
| 318 | if (unlikely(!new_blkg)) { |
| 319 | ret = -ENOMEM; |
Christoph Hellwig | 8c911f3 | 2020-07-01 11:06:21 +0200 | [diff] [blame] | 320 | goto err_put_css; |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 321 | } |
| 322 | } |
| 323 | blkg = new_blkg; |
Tahsin Erdogan | 7fc6b87 | 2017-03-09 00:05:31 -0800 | [diff] [blame] | 324 | |
Tejun Heo | db61367 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 325 | /* link parent */ |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 326 | if (blkcg_parent(blkcg)) { |
| 327 | blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); |
| 328 | if (WARN_ON_ONCE(!blkg->parent)) { |
Tejun Heo | 20386ce | 2015-08-18 14:55:28 -0700 | [diff] [blame] | 329 | ret = -ENODEV; |
Christoph Hellwig | 8c911f3 | 2020-07-01 11:06:21 +0200 | [diff] [blame] | 330 | goto err_put_css; |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 331 | } |
| 332 | blkg_get(blkg->parent); |
| 333 | } |
| 334 | |
Tejun Heo | db61367 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 335 | /* invoke per-policy init */ |
| 336 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 337 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 338 | |
| 339 | if (blkg->pd[i] && pol->pd_init_fn) |
Tejun Heo | a9520cd | 2015-08-18 14:55:14 -0700 | [diff] [blame] | 340 | pol->pd_init_fn(blkg->pd[i]); |
Tejun Heo | db61367 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 341 | } |
| 342 | |
| 343 | /* insert */ |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 344 | spin_lock(&blkcg->lock); |
| 345 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); |
| 346 | if (likely(!ret)) { |
| 347 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
| 348 | list_add(&blkg->q_node, &q->blkg_list); |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 349 | |
| 350 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 351 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 352 | |
| 353 | if (blkg->pd[i] && pol->pd_online_fn) |
Tejun Heo | a9520cd | 2015-08-18 14:55:14 -0700 | [diff] [blame] | 354 | pol->pd_online_fn(blkg->pd[i]); |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 355 | } |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 356 | } |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 357 | blkg->online = true; |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 358 | spin_unlock(&blkcg->lock); |
| 359 | |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 360 | if (!ret) |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 361 | return blkg; |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 362 | |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 363 | /* @blkg failed fully initialized, use the usual release path */ |
| 364 | blkg_put(blkg); |
| 365 | return ERR_PTR(ret); |
| 366 | |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 367 | err_put_css: |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame] | 368 | css_put(&blkcg->css); |
Tejun Heo | 93e6d5d | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 369 | err_free_blkg: |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 370 | blkg_free(new_blkg); |
Tejun Heo | 93e6d5d | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 371 | return ERR_PTR(ret); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 372 | } |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 373 | |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 374 | /** |
Christoph Hellwig | 8c54628 | 2020-06-27 09:31:52 +0200 | [diff] [blame] | 375 | * blkg_lookup_create - lookup blkg, try to create one if not there |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 376 | * @blkcg: blkcg of interest |
| 377 | * @q: request_queue of interest |
| 378 | * |
| 379 | * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 380 | * create one. blkg creation is performed recursively from blkcg_root such |
| 381 | * that all non-root blkg's have access to the parent blkg. This function |
Christoph Hellwig | 8c54628 | 2020-06-27 09:31:52 +0200 | [diff] [blame] | 382 | * should be called under RCU read lock and takes @q->queue_lock. |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 383 | * |
Dennis Zhou | beea9da | 2018-12-05 12:10:28 -0500 | [diff] [blame] | 384 | * Returns the blkg or the closest blkg if blkg_create() fails as it walks |
| 385 | * down from root. |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 386 | */ |
Christoph Hellwig | 8c54628 | 2020-06-27 09:31:52 +0200 | [diff] [blame] | 387 | static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
| 388 | struct request_queue *q) |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 389 | { |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 390 | struct blkcg_gq *blkg; |
Christoph Hellwig | 8c54628 | 2020-06-27 09:31:52 +0200 | [diff] [blame] | 391 | unsigned long flags; |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 392 | |
| 393 | WARN_ON_ONCE(!rcu_read_lock_held()); |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 394 | |
Christoph Hellwig | 8c54628 | 2020-06-27 09:31:52 +0200 | [diff] [blame] | 395 | blkg = blkg_lookup(blkcg, q); |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 396 | if (blkg) |
| 397 | return blkg; |
| 398 | |
Christoph Hellwig | 8c54628 | 2020-06-27 09:31:52 +0200 | [diff] [blame] | 399 | spin_lock_irqsave(&q->queue_lock, flags); |
| 400 | blkg = __blkg_lookup(blkcg, q, true); |
| 401 | if (blkg) |
| 402 | goto found; |
| 403 | |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 404 | /* |
| 405 | * Create blkgs walking down from blkcg_root to @blkcg, so that all |
Dennis Zhou | beea9da | 2018-12-05 12:10:28 -0500 | [diff] [blame] | 406 | * non-root blkgs have access to their parents. Returns the closest |
| 407 | * blkg to the intended blkg should blkg_create() fail. |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 408 | */ |
| 409 | while (true) { |
| 410 | struct blkcg *pos = blkcg; |
| 411 | struct blkcg *parent = blkcg_parent(blkcg); |
Dennis Zhou | beea9da | 2018-12-05 12:10:28 -0500 | [diff] [blame] | 412 | struct blkcg_gq *ret_blkg = q->root_blkg; |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 413 | |
Dennis Zhou | beea9da | 2018-12-05 12:10:28 -0500 | [diff] [blame] | 414 | while (parent) { |
| 415 | blkg = __blkg_lookup(parent, q, false); |
| 416 | if (blkg) { |
| 417 | /* remember closest blkg */ |
| 418 | ret_blkg = blkg; |
| 419 | break; |
| 420 | } |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 421 | pos = parent; |
| 422 | parent = blkcg_parent(parent); |
| 423 | } |
| 424 | |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 425 | blkg = blkg_create(pos, q, NULL); |
Christoph Hellwig | 8c54628 | 2020-06-27 09:31:52 +0200 | [diff] [blame] | 426 | if (IS_ERR(blkg)) { |
| 427 | blkg = ret_blkg; |
| 428 | break; |
| 429 | } |
Dennis Zhou | beea9da | 2018-12-05 12:10:28 -0500 | [diff] [blame] | 430 | if (pos == blkcg) |
Christoph Hellwig | 8c54628 | 2020-06-27 09:31:52 +0200 | [diff] [blame] | 431 | break; |
Dennis Zhou | b978962 | 2018-12-05 12:10:27 -0500 | [diff] [blame] | 432 | } |
| 433 | |
Christoph Hellwig | 8c54628 | 2020-06-27 09:31:52 +0200 | [diff] [blame] | 434 | found: |
| 435 | spin_unlock_irqrestore(&q->queue_lock, flags); |
Dennis Zhou | b978962 | 2018-12-05 12:10:27 -0500 | [diff] [blame] | 436 | return blkg; |
| 437 | } |
| 438 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 439 | static void blkg_destroy(struct blkcg_gq *blkg) |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 440 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 441 | struct blkcg *blkcg = blkg->blkcg; |
Dennis Zhou (Facebook) | 6b06546 | 2018-08-31 16:22:42 -0400 | [diff] [blame] | 442 | int i; |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 443 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 444 | lockdep_assert_held(&blkg->q->queue_lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 445 | lockdep_assert_held(&blkcg->lock); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 446 | |
| 447 | /* Something wrong if we are trying to remove same group twice */ |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 448 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 449 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 450 | |
Dennis Zhou (Facebook) | 6b06546 | 2018-08-31 16:22:42 -0400 | [diff] [blame] | 451 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 452 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 453 | |
| 454 | if (blkg->pd[i] && pol->pd_offline_fn) |
| 455 | pol->pd_offline_fn(blkg->pd[i]); |
| 456 | } |
| 457 | |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 458 | blkg->online = false; |
| 459 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 460 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 461 | list_del_init(&blkg->q_node); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 462 | hlist_del_init_rcu(&blkg->blkcg_node); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 463 | |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 464 | /* |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 465 | * Both setting lookup hint to and clearing it from @blkg are done |
| 466 | * under queue_lock. If it's not pointing to @blkg now, it never |
| 467 | * will. Hint assignment itself can race safely. |
| 468 | */ |
Paul E. McKenney | ec6c676a | 2014-02-17 13:35:57 -0800 | [diff] [blame] | 469 | if (rcu_access_pointer(blkcg->blkg_hint) == blkg) |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 470 | rcu_assign_pointer(blkcg->blkg_hint, NULL); |
| 471 | |
| 472 | /* |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 473 | * Put the reference taken at the time of creation so that when all |
| 474 | * queues are gone, group can be destroyed. |
| 475 | */ |
Dennis Zhou | 7fcf2b0 | 2018-12-05 12:10:38 -0500 | [diff] [blame] | 476 | percpu_ref_kill(&blkg->refcnt); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 477 | } |
| 478 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 479 | /** |
| 480 | * blkg_destroy_all - destroy all blkgs associated with a request_queue |
| 481 | * @q: request_queue of interest |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 482 | * |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 483 | * Destroy all blkgs associated with @q. |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 484 | */ |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 485 | static void blkg_destroy_all(struct request_queue *q) |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 486 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 487 | struct blkcg_gq *blkg, *n; |
Yu Kuai | a731763 | 2021-07-07 09:56:49 +0800 | [diff] [blame] | 488 | int count = BLKG_DESTROY_BATCH_SIZE; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 489 | |
Yu Kuai | a731763 | 2021-07-07 09:56:49 +0800 | [diff] [blame] | 490 | restart: |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 491 | spin_lock_irq(&q->queue_lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 492 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 493 | struct blkcg *blkcg = blkg->blkcg; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 494 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 495 | spin_lock(&blkcg->lock); |
| 496 | blkg_destroy(blkg); |
| 497 | spin_unlock(&blkcg->lock); |
Yu Kuai | a731763 | 2021-07-07 09:56:49 +0800 | [diff] [blame] | 498 | |
| 499 | /* |
| 500 | * in order to avoid holding the spin lock for too long, release |
| 501 | * it when a batch of blkgs are destroyed. |
| 502 | */ |
| 503 | if (!(--count)) { |
| 504 | count = BLKG_DESTROY_BATCH_SIZE; |
| 505 | spin_unlock_irq(&q->queue_lock); |
| 506 | cond_resched(); |
| 507 | goto restart; |
| 508 | } |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 509 | } |
Tejun Heo | 6fe810b | 2015-09-05 15:47:36 -0400 | [diff] [blame] | 510 | |
| 511 | q->root_blkg = NULL; |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 512 | spin_unlock_irq(&q->queue_lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 513 | } |
| 514 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 515 | static int blkcg_reset_stats(struct cgroup_subsys_state *css, |
| 516 | struct cftype *cftype, u64 val) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 517 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 518 | struct blkcg *blkcg = css_to_blkcg(css); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 519 | struct blkcg_gq *blkg; |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 520 | int i, cpu; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 521 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 522 | mutex_lock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 523 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 524 | |
| 525 | /* |
| 526 | * Note that stat reset is racy - it doesn't synchronize against |
| 527 | * stat updates. This is a debug feature which shouldn't exist |
| 528 | * anyway. If you get hit by a race, retry. |
| 529 | */ |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 530 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 531 | for_each_possible_cpu(cpu) { |
| 532 | struct blkg_iostat_set *bis = |
| 533 | per_cpu_ptr(blkg->iostat_cpu, cpu); |
| 534 | memset(bis, 0, sizeof(*bis)); |
| 535 | } |
| 536 | memset(&blkg->iostat, 0, sizeof(blkg->iostat)); |
Tejun Heo | 77ea733 | 2015-08-18 14:55:24 -0700 | [diff] [blame] | 537 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 538 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 539 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 540 | |
Tejun Heo | a9520cd | 2015-08-18 14:55:14 -0700 | [diff] [blame] | 541 | if (blkg->pd[i] && pol->pd_reset_stats_fn) |
| 542 | pol->pd_reset_stats_fn(blkg->pd[i]); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 543 | } |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 544 | } |
Vivek Goyal | f0bdc8c | 2011-05-19 15:38:30 -0400 | [diff] [blame] | 545 | |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 546 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 547 | mutex_unlock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 548 | return 0; |
| 549 | } |
| 550 | |
Tejun Heo | dd165eb | 2015-08-18 14:55:33 -0700 | [diff] [blame] | 551 | const char *blkg_dev_name(struct blkcg_gq *blkg) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 552 | { |
Christoph Hellwig | d152c68 | 2021-08-16 15:46:24 +0200 | [diff] [blame] | 553 | if (!blkg->q->disk || !blkg->q->disk->bdi->dev) |
Christoph Hellwig | edb0872 | 2021-08-09 16:17:43 +0200 | [diff] [blame] | 554 | return NULL; |
Christoph Hellwig | d152c68 | 2021-08-16 15:46:24 +0200 | [diff] [blame] | 555 | return bdi_dev_name(blkg->q->disk->bdi); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 556 | } |
| 557 | |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 558 | /** |
| 559 | * blkcg_print_blkgs - helper for printing per-blkg data |
| 560 | * @sf: seq_file to print to |
| 561 | * @blkcg: blkcg of interest |
| 562 | * @prfill: fill function to print out a blkg |
| 563 | * @pol: policy in question |
| 564 | * @data: data to be passed to @prfill |
| 565 | * @show_total: to print out sum of prfill return values or not |
| 566 | * |
| 567 | * This function invokes @prfill on each blkg of @blkcg if pd for the |
| 568 | * policy specified by @pol exists. @prfill is invoked with @sf, the |
Tejun Heo | 810ecfa | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 569 | * policy data and @data and the matching queue lock held. If @show_total |
| 570 | * is %true, the sum of the return values from @prfill is printed with |
| 571 | * "Total" label at the end. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 572 | * |
| 573 | * This is to be used to construct print functions for |
| 574 | * cftype->read_seq_string method. |
| 575 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 576 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 577 | u64 (*prfill)(struct seq_file *, |
| 578 | struct blkg_policy_data *, int), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 579 | const struct blkcg_policy *pol, int data, |
Tejun Heo | ec39934 | 2012-04-13 13:11:27 -0700 | [diff] [blame] | 580 | bool show_total) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 581 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 582 | struct blkcg_gq *blkg; |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 583 | u64 total = 0; |
| 584 | |
Tejun Heo | 810ecfa | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 585 | rcu_read_lock(); |
Linus Torvalds | ee89f81 | 2013-02-28 12:52:24 -0800 | [diff] [blame] | 586 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 587 | spin_lock_irq(&blkg->q->queue_lock); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 588 | if (blkcg_policy_enabled(blkg->q, pol)) |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 589 | total += prfill(sf, blkg->pd[pol->plid], data); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 590 | spin_unlock_irq(&blkg->q->queue_lock); |
Tejun Heo | 810ecfa | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 591 | } |
| 592 | rcu_read_unlock(); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 593 | |
| 594 | if (show_total) |
| 595 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); |
| 596 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 597 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 598 | |
| 599 | /** |
| 600 | * __blkg_prfill_u64 - prfill helper for a single u64 value |
| 601 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 602 | * @pd: policy private data of interest |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 603 | * @v: value to print |
| 604 | * |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 605 | * Print @v to @sf for the device assocaited with @pd. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 606 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 607 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 608 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 609 | const char *dname = blkg_dev_name(pd->blkg); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 610 | |
| 611 | if (!dname) |
| 612 | return 0; |
| 613 | |
| 614 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); |
| 615 | return v; |
| 616 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 617 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 618 | |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 619 | /* Performs queue bypass and policy enabled checks then looks up blkg. */ |
| 620 | static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg, |
| 621 | const struct blkcg_policy *pol, |
| 622 | struct request_queue *q) |
| 623 | { |
| 624 | WARN_ON_ONCE(!rcu_read_lock_held()); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 625 | lockdep_assert_held(&q->queue_lock); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 626 | |
| 627 | if (!blkcg_policy_enabled(q, pol)) |
| 628 | return ERR_PTR(-EOPNOTSUPP); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 629 | return __blkg_lookup(blkcg, q, true /* update_hint */); |
| 630 | } |
| 631 | |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 632 | /** |
Christoph Hellwig | 22ae8ce | 2020-11-26 09:23:26 +0100 | [diff] [blame] | 633 | * blkcg_conf_open_bdev - parse and open bdev for per-blkg config update |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 634 | * @inputp: input string pointer |
| 635 | * |
| 636 | * Parse the device node prefix part, MAJ:MIN, of per-blkg config update |
Christoph Hellwig | 22ae8ce | 2020-11-26 09:23:26 +0100 | [diff] [blame] | 637 | * from @input and get and return the matching bdev. *@inputp is |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 638 | * updated to point past the device node prefix. Returns an ERR_PTR() |
| 639 | * value on error. |
| 640 | * |
| 641 | * Use this function iff blkg_conf_prep() can't be used for some reason. |
| 642 | */ |
Christoph Hellwig | 22ae8ce | 2020-11-26 09:23:26 +0100 | [diff] [blame] | 643 | struct block_device *blkcg_conf_open_bdev(char **inputp) |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 644 | { |
| 645 | char *input = *inputp; |
| 646 | unsigned int major, minor; |
Christoph Hellwig | 22ae8ce | 2020-11-26 09:23:26 +0100 | [diff] [blame] | 647 | struct block_device *bdev; |
| 648 | int key_len; |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 649 | |
| 650 | if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) |
| 651 | return ERR_PTR(-EINVAL); |
| 652 | |
| 653 | input += key_len; |
| 654 | if (!isspace(*input)) |
| 655 | return ERR_PTR(-EINVAL); |
| 656 | input = skip_spaces(input); |
| 657 | |
Christoph Hellwig | 22ae8ce | 2020-11-26 09:23:26 +0100 | [diff] [blame] | 658 | bdev = blkdev_get_no_open(MKDEV(major, minor)); |
| 659 | if (!bdev) |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 660 | return ERR_PTR(-ENODEV); |
Christoph Hellwig | 22ae8ce | 2020-11-26 09:23:26 +0100 | [diff] [blame] | 661 | if (bdev_is_partition(bdev)) { |
| 662 | blkdev_put_no_open(bdev); |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 663 | return ERR_PTR(-ENODEV); |
| 664 | } |
| 665 | |
| 666 | *inputp = input; |
Christoph Hellwig | 22ae8ce | 2020-11-26 09:23:26 +0100 | [diff] [blame] | 667 | return bdev; |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 668 | } |
| 669 | |
| 670 | /** |
| 671 | * blkg_conf_prep - parse and prepare for per-blkg config update |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 672 | * @blkcg: target block cgroup |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 673 | * @pol: target policy |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 674 | * @input: input string |
| 675 | * @ctx: blkg_conf_ctx to be filled |
| 676 | * |
| 677 | * Parse per-blkg config update from @input and initialize @ctx with the |
Tejun Heo | 36aa9e5 | 2015-08-18 14:55:31 -0700 | [diff] [blame] | 678 | * result. @ctx->blkg points to the blkg to be updated and @ctx->body the |
| 679 | * part of @input following MAJ:MIN. This function returns with RCU read |
| 680 | * lock and queue lock held and must be paired with blkg_conf_finish(). |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 681 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 682 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
Tejun Heo | 36aa9e5 | 2015-08-18 14:55:31 -0700 | [diff] [blame] | 683 | char *input, struct blkg_conf_ctx *ctx) |
Pavel Begunkov | ed6cdde | 2021-10-14 15:03:30 +0100 | [diff] [blame] | 684 | __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 685 | { |
Christoph Hellwig | 22ae8ce | 2020-11-26 09:23:26 +0100 | [diff] [blame] | 686 | struct block_device *bdev; |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 687 | struct request_queue *q; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 688 | struct blkcg_gq *blkg; |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 689 | int ret; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 690 | |
Christoph Hellwig | 22ae8ce | 2020-11-26 09:23:26 +0100 | [diff] [blame] | 691 | bdev = blkcg_conf_open_bdev(&input); |
| 692 | if (IS_ERR(bdev)) |
| 693 | return PTR_ERR(bdev); |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 694 | |
Pavel Begunkov | ed6cdde | 2021-10-14 15:03:30 +0100 | [diff] [blame] | 695 | q = bdev_get_queue(bdev); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 696 | |
Yu Kuai | 0c9d338 | 2021-10-20 09:40:36 +0800 | [diff] [blame] | 697 | /* |
| 698 | * blkcg_deactivate_policy() requires queue to be frozen, we can grab |
| 699 | * q_usage_counter to prevent concurrent with blkcg_deactivate_policy(). |
| 700 | */ |
| 701 | ret = blk_queue_enter(q, 0); |
| 702 | if (ret) |
Yu Kuai | 15c3010 | 2021-11-02 10:07:05 +0800 | [diff] [blame] | 703 | goto fail; |
Yu Kuai | 0c9d338 | 2021-10-20 09:40:36 +0800 | [diff] [blame] | 704 | |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 705 | rcu_read_lock(); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 706 | spin_lock_irq(&q->queue_lock); |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 707 | |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 708 | blkg = blkg_lookup_check(blkcg, pol, q); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 709 | if (IS_ERR(blkg)) { |
| 710 | ret = PTR_ERR(blkg); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 711 | goto fail_unlock; |
Vivek Goyal | 062a644 | 2010-09-15 17:06:33 -0400 | [diff] [blame] | 712 | } |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 713 | |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 714 | if (blkg) |
| 715 | goto success; |
| 716 | |
| 717 | /* |
| 718 | * Create blkgs walking down from blkcg_root to @blkcg, so that all |
| 719 | * non-root blkgs have access to their parents. |
| 720 | */ |
| 721 | while (true) { |
| 722 | struct blkcg *pos = blkcg; |
| 723 | struct blkcg *parent; |
| 724 | struct blkcg_gq *new_blkg; |
| 725 | |
| 726 | parent = blkcg_parent(blkcg); |
| 727 | while (parent && !__blkg_lookup(parent, q, false)) { |
| 728 | pos = parent; |
| 729 | parent = blkcg_parent(parent); |
| 730 | } |
| 731 | |
| 732 | /* Drop locks to do new blkg allocation with GFP_KERNEL. */ |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 733 | spin_unlock_irq(&q->queue_lock); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 734 | rcu_read_unlock(); |
| 735 | |
| 736 | new_blkg = blkg_alloc(pos, q, GFP_KERNEL); |
| 737 | if (unlikely(!new_blkg)) { |
| 738 | ret = -ENOMEM; |
Yu Kuai | 15c3010 | 2021-11-02 10:07:05 +0800 | [diff] [blame] | 739 | goto fail_exit_queue; |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 740 | } |
| 741 | |
Gabriel Krisman Bertazi | f255c19 | 2020-10-22 16:58:42 -0400 | [diff] [blame] | 742 | if (radix_tree_preload(GFP_KERNEL)) { |
| 743 | blkg_free(new_blkg); |
| 744 | ret = -ENOMEM; |
Yu Kuai | 15c3010 | 2021-11-02 10:07:05 +0800 | [diff] [blame] | 745 | goto fail_exit_queue; |
Gabriel Krisman Bertazi | f255c19 | 2020-10-22 16:58:42 -0400 | [diff] [blame] | 746 | } |
| 747 | |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 748 | rcu_read_lock(); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 749 | spin_lock_irq(&q->queue_lock); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 750 | |
| 751 | blkg = blkg_lookup_check(pos, pol, q); |
| 752 | if (IS_ERR(blkg)) { |
| 753 | ret = PTR_ERR(blkg); |
Gabriel Krisman Bertazi | 52abfcb | 2020-10-22 16:58:41 -0400 | [diff] [blame] | 754 | blkg_free(new_blkg); |
Gabriel Krisman Bertazi | f255c19 | 2020-10-22 16:58:42 -0400 | [diff] [blame] | 755 | goto fail_preloaded; |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 756 | } |
| 757 | |
| 758 | if (blkg) { |
| 759 | blkg_free(new_blkg); |
| 760 | } else { |
| 761 | blkg = blkg_create(pos, q, new_blkg); |
Kefeng Wang | 98d669b | 2019-06-05 22:24:27 +0800 | [diff] [blame] | 762 | if (IS_ERR(blkg)) { |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 763 | ret = PTR_ERR(blkg); |
Gabriel Krisman Bertazi | f255c19 | 2020-10-22 16:58:42 -0400 | [diff] [blame] | 764 | goto fail_preloaded; |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 765 | } |
| 766 | } |
| 767 | |
Gabriel Krisman Bertazi | f255c19 | 2020-10-22 16:58:42 -0400 | [diff] [blame] | 768 | radix_tree_preload_end(); |
| 769 | |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 770 | if (pos == blkcg) |
| 771 | goto success; |
| 772 | } |
| 773 | success: |
Yu Kuai | 0c9d338 | 2021-10-20 09:40:36 +0800 | [diff] [blame] | 774 | blk_queue_exit(q); |
Christoph Hellwig | 22ae8ce | 2020-11-26 09:23:26 +0100 | [diff] [blame] | 775 | ctx->bdev = bdev; |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 776 | ctx->blkg = blkg; |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 777 | ctx->body = input; |
Tejun Heo | 726fa694 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 778 | return 0; |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 779 | |
Gabriel Krisman Bertazi | f255c19 | 2020-10-22 16:58:42 -0400 | [diff] [blame] | 780 | fail_preloaded: |
| 781 | radix_tree_preload_end(); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 782 | fail_unlock: |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 783 | spin_unlock_irq(&q->queue_lock); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 784 | rcu_read_unlock(); |
Yu Kuai | 15c3010 | 2021-11-02 10:07:05 +0800 | [diff] [blame] | 785 | fail_exit_queue: |
| 786 | blk_queue_exit(q); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 787 | fail: |
Christoph Hellwig | 22ae8ce | 2020-11-26 09:23:26 +0100 | [diff] [blame] | 788 | blkdev_put_no_open(bdev); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 789 | /* |
| 790 | * If queue was bypassing, we should retry. Do so after a |
| 791 | * short msleep(). It isn't strictly necessary but queue |
| 792 | * can be bypassing for some time and it's always nice to |
| 793 | * avoid busy looping. |
| 794 | */ |
| 795 | if (ret == -EBUSY) { |
| 796 | msleep(10); |
| 797 | ret = restart_syscall(); |
| 798 | } |
| 799 | return ret; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 800 | } |
Pavel Begunkov | 89f3b6d | 2019-09-14 20:31:50 +0300 | [diff] [blame] | 801 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 802 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 803 | /** |
| 804 | * blkg_conf_finish - finish up per-blkg config update |
| 805 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() |
| 806 | * |
| 807 | * Finish up after per-blkg config update. This function must be paired |
| 808 | * with blkg_conf_prep(). |
| 809 | */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 810 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
Pavel Begunkov | ed6cdde | 2021-10-14 15:03:30 +0100 | [diff] [blame] | 811 | __releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 812 | { |
Pavel Begunkov | ed6cdde | 2021-10-14 15:03:30 +0100 | [diff] [blame] | 813 | spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 814 | rcu_read_unlock(); |
Christoph Hellwig | 22ae8ce | 2020-11-26 09:23:26 +0100 | [diff] [blame] | 815 | blkdev_put_no_open(ctx->bdev); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 816 | } |
Pavel Begunkov | 89f3b6d | 2019-09-14 20:31:50 +0300 | [diff] [blame] | 817 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 818 | |
Boris Burkov | cd1fc4b | 2020-06-01 13:11:43 -0700 | [diff] [blame] | 819 | static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) |
| 820 | { |
| 821 | int i; |
| 822 | |
| 823 | for (i = 0; i < BLKG_IOSTAT_NR; i++) { |
| 824 | dst->bytes[i] = src->bytes[i]; |
| 825 | dst->ios[i] = src->ios[i]; |
| 826 | } |
| 827 | } |
| 828 | |
| 829 | static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src) |
| 830 | { |
| 831 | int i; |
| 832 | |
| 833 | for (i = 0; i < BLKG_IOSTAT_NR; i++) { |
| 834 | dst->bytes[i] += src->bytes[i]; |
| 835 | dst->ios[i] += src->ios[i]; |
| 836 | } |
| 837 | } |
| 838 | |
| 839 | static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src) |
| 840 | { |
| 841 | int i; |
| 842 | |
| 843 | for (i = 0; i < BLKG_IOSTAT_NR; i++) { |
| 844 | dst->bytes[i] -= src->bytes[i]; |
| 845 | dst->ios[i] -= src->ios[i]; |
| 846 | } |
| 847 | } |
| 848 | |
Jason Yan | 362b8c1 | 2022-06-29 15:09:16 +0800 | [diff] [blame] | 849 | static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur, |
| 850 | struct blkg_iostat *last) |
| 851 | { |
| 852 | struct blkg_iostat delta; |
| 853 | unsigned long flags; |
| 854 | |
| 855 | /* propagate percpu delta to global */ |
| 856 | flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); |
| 857 | blkg_iostat_set(&delta, cur); |
| 858 | blkg_iostat_sub(&delta, last); |
| 859 | blkg_iostat_add(&blkg->iostat.cur, &delta); |
| 860 | blkg_iostat_add(last, &delta); |
| 861 | u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); |
| 862 | } |
| 863 | |
Boris Burkov | cd1fc4b | 2020-06-01 13:11:43 -0700 | [diff] [blame] | 864 | static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) |
| 865 | { |
| 866 | struct blkcg *blkcg = css_to_blkcg(css); |
| 867 | struct blkcg_gq *blkg; |
| 868 | |
Johannes Weiner | dc26532 | 2021-04-29 22:56:23 -0700 | [diff] [blame] | 869 | /* Root-level stats are sourced from system-wide IO stats */ |
| 870 | if (!cgroup_parent(css->cgroup)) |
| 871 | return; |
| 872 | |
Boris Burkov | cd1fc4b | 2020-06-01 13:11:43 -0700 | [diff] [blame] | 873 | rcu_read_lock(); |
| 874 | |
| 875 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
| 876 | struct blkcg_gq *parent = blkg->parent; |
| 877 | struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu); |
Jason Yan | 362b8c1 | 2022-06-29 15:09:16 +0800 | [diff] [blame] | 878 | struct blkg_iostat cur; |
Boris Burkov | cd1fc4b | 2020-06-01 13:11:43 -0700 | [diff] [blame] | 879 | unsigned int seq; |
| 880 | |
| 881 | /* fetch the current per-cpu values */ |
| 882 | do { |
| 883 | seq = u64_stats_fetch_begin(&bisc->sync); |
| 884 | blkg_iostat_set(&cur, &bisc->cur); |
| 885 | } while (u64_stats_fetch_retry(&bisc->sync, seq)); |
| 886 | |
Jason Yan | 362b8c1 | 2022-06-29 15:09:16 +0800 | [diff] [blame] | 887 | blkcg_iostat_update(blkg, &cur, &bisc->last); |
Boris Burkov | cd1fc4b | 2020-06-01 13:11:43 -0700 | [diff] [blame] | 888 | |
Johannes Weiner | dc26532 | 2021-04-29 22:56:23 -0700 | [diff] [blame] | 889 | /* propagate global delta to parent (unless that's root) */ |
Jason Yan | 362b8c1 | 2022-06-29 15:09:16 +0800 | [diff] [blame] | 890 | if (parent && parent->parent) |
| 891 | blkcg_iostat_update(parent, &blkg->iostat.cur, |
| 892 | &blkg->iostat.last); |
Boris Burkov | cd1fc4b | 2020-06-01 13:11:43 -0700 | [diff] [blame] | 893 | } |
| 894 | |
| 895 | rcu_read_unlock(); |
| 896 | } |
| 897 | |
Boris Burkov | ef45fe4 | 2020-06-01 13:12:05 -0700 | [diff] [blame] | 898 | /* |
Johannes Weiner | dc26532 | 2021-04-29 22:56:23 -0700 | [diff] [blame] | 899 | * We source root cgroup stats from the system-wide stats to avoid |
| 900 | * tracking the same information twice and incurring overhead when no |
| 901 | * cgroups are defined. For that reason, cgroup_rstat_flush in |
| 902 | * blkcg_print_stat does not actually fill out the iostat in the root |
| 903 | * cgroup's blkcg_gq. |
Boris Burkov | ef45fe4 | 2020-06-01 13:12:05 -0700 | [diff] [blame] | 904 | * |
| 905 | * However, we would like to re-use the printing code between the root and |
| 906 | * non-root cgroups to the extent possible. For that reason, we simulate |
| 907 | * flushing the root cgroup's stats by explicitly filling in the iostat |
| 908 | * with disk level statistics. |
| 909 | */ |
| 910 | static void blkcg_fill_root_iostats(void) |
| 911 | { |
| 912 | struct class_dev_iter iter; |
| 913 | struct device *dev; |
| 914 | |
| 915 | class_dev_iter_init(&iter, &block_class, NULL, &disk_type); |
| 916 | while ((dev = class_dev_iter_next(&iter))) { |
Christoph Hellwig | 0d02129 | 2020-11-27 16:43:51 +0100 | [diff] [blame] | 917 | struct block_device *bdev = dev_to_bdev(dev); |
| 918 | struct blkcg_gq *blkg = |
Pavel Begunkov | ed6cdde | 2021-10-14 15:03:30 +0100 | [diff] [blame] | 919 | blk_queue_root_blkg(bdev_get_queue(bdev)); |
Boris Burkov | ef45fe4 | 2020-06-01 13:12:05 -0700 | [diff] [blame] | 920 | struct blkg_iostat tmp; |
| 921 | int cpu; |
Chengming Zhou | f122d10 | 2022-02-13 16:59:02 +0800 | [diff] [blame] | 922 | unsigned long flags; |
Boris Burkov | ef45fe4 | 2020-06-01 13:12:05 -0700 | [diff] [blame] | 923 | |
| 924 | memset(&tmp, 0, sizeof(tmp)); |
| 925 | for_each_possible_cpu(cpu) { |
| 926 | struct disk_stats *cpu_dkstats; |
| 927 | |
Christoph Hellwig | 0d02129 | 2020-11-27 16:43:51 +0100 | [diff] [blame] | 928 | cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu); |
Boris Burkov | ef45fe4 | 2020-06-01 13:12:05 -0700 | [diff] [blame] | 929 | tmp.ios[BLKG_IOSTAT_READ] += |
| 930 | cpu_dkstats->ios[STAT_READ]; |
| 931 | tmp.ios[BLKG_IOSTAT_WRITE] += |
| 932 | cpu_dkstats->ios[STAT_WRITE]; |
| 933 | tmp.ios[BLKG_IOSTAT_DISCARD] += |
| 934 | cpu_dkstats->ios[STAT_DISCARD]; |
| 935 | // convert sectors to bytes |
| 936 | tmp.bytes[BLKG_IOSTAT_READ] += |
| 937 | cpu_dkstats->sectors[STAT_READ] << 9; |
| 938 | tmp.bytes[BLKG_IOSTAT_WRITE] += |
| 939 | cpu_dkstats->sectors[STAT_WRITE] << 9; |
| 940 | tmp.bytes[BLKG_IOSTAT_DISCARD] += |
| 941 | cpu_dkstats->sectors[STAT_DISCARD] << 9; |
Boris Burkov | ef45fe4 | 2020-06-01 13:12:05 -0700 | [diff] [blame] | 942 | } |
Chengming Zhou | f122d10 | 2022-02-13 16:59:02 +0800 | [diff] [blame] | 943 | |
| 944 | flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); |
| 945 | blkg_iostat_set(&blkg->iostat.cur, &tmp); |
| 946 | u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); |
Boris Burkov | ef45fe4 | 2020-06-01 13:12:05 -0700 | [diff] [blame] | 947 | } |
| 948 | } |
| 949 | |
Christoph Hellwig | 49cb516 | 2021-08-10 17:26:22 +0200 | [diff] [blame] | 950 | static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) |
| 951 | { |
| 952 | struct blkg_iostat_set *bis = &blkg->iostat; |
| 953 | u64 rbytes, wbytes, rios, wios, dbytes, dios; |
Christoph Hellwig | 49cb516 | 2021-08-10 17:26:22 +0200 | [diff] [blame] | 954 | const char *dname; |
| 955 | unsigned seq; |
Christoph Hellwig | 49cb516 | 2021-08-10 17:26:22 +0200 | [diff] [blame] | 956 | int i; |
| 957 | |
| 958 | if (!blkg->online) |
| 959 | return; |
| 960 | |
| 961 | dname = blkg_dev_name(blkg); |
| 962 | if (!dname) |
| 963 | return; |
| 964 | |
Christoph Hellwig | 252c651 | 2021-08-10 17:26:23 +0200 | [diff] [blame] | 965 | seq_printf(s, "%s ", dname); |
Christoph Hellwig | 49cb516 | 2021-08-10 17:26:22 +0200 | [diff] [blame] | 966 | |
| 967 | do { |
| 968 | seq = u64_stats_fetch_begin(&bis->sync); |
| 969 | |
| 970 | rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; |
| 971 | wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; |
| 972 | dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; |
| 973 | rios = bis->cur.ios[BLKG_IOSTAT_READ]; |
| 974 | wios = bis->cur.ios[BLKG_IOSTAT_WRITE]; |
| 975 | dios = bis->cur.ios[BLKG_IOSTAT_DISCARD]; |
| 976 | } while (u64_stats_fetch_retry(&bis->sync, seq)); |
| 977 | |
| 978 | if (rbytes || wbytes || rios || wios) { |
Christoph Hellwig | 252c651 | 2021-08-10 17:26:23 +0200 | [diff] [blame] | 979 | seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", |
Christoph Hellwig | 49cb516 | 2021-08-10 17:26:22 +0200 | [diff] [blame] | 980 | rbytes, wbytes, rios, wios, |
| 981 | dbytes, dios); |
| 982 | } |
| 983 | |
| 984 | if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { |
Christoph Hellwig | 252c651 | 2021-08-10 17:26:23 +0200 | [diff] [blame] | 985 | seq_printf(s, " use_delay=%d delay_nsec=%llu", |
Christoph Hellwig | 49cb516 | 2021-08-10 17:26:22 +0200 | [diff] [blame] | 986 | atomic_read(&blkg->use_delay), |
| 987 | atomic64_read(&blkg->delay_nsec)); |
| 988 | } |
| 989 | |
| 990 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 991 | struct blkcg_policy *pol = blkcg_policy[i]; |
Christoph Hellwig | 49cb516 | 2021-08-10 17:26:22 +0200 | [diff] [blame] | 992 | |
| 993 | if (!blkg->pd[i] || !pol->pd_stat_fn) |
| 994 | continue; |
| 995 | |
Wolfgang Bumiller | 3607849 | 2022-01-11 09:31:59 +0100 | [diff] [blame] | 996 | pol->pd_stat_fn(blkg->pd[i], s); |
Christoph Hellwig | 49cb516 | 2021-08-10 17:26:22 +0200 | [diff] [blame] | 997 | } |
| 998 | |
Wolfgang Bumiller | 3607849 | 2022-01-11 09:31:59 +0100 | [diff] [blame] | 999 | seq_puts(s, "\n"); |
Christoph Hellwig | 49cb516 | 2021-08-10 17:26:22 +0200 | [diff] [blame] | 1000 | } |
| 1001 | |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 1002 | static int blkcg_print_stat(struct seq_file *sf, void *v) |
| 1003 | { |
| 1004 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); |
| 1005 | struct blkcg_gq *blkg; |
| 1006 | |
Boris Burkov | ef45fe4 | 2020-06-01 13:12:05 -0700 | [diff] [blame] | 1007 | if (!seq_css(sf)->parent) |
| 1008 | blkcg_fill_root_iostats(); |
| 1009 | else |
| 1010 | cgroup_rstat_flush(blkcg->css.cgroup); |
| 1011 | |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 1012 | rcu_read_lock(); |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 1013 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
Tejun Heo | b081436 | 2019-11-05 08:09:51 -0800 | [diff] [blame] | 1014 | spin_lock_irq(&blkg->q->queue_lock); |
Christoph Hellwig | 49cb516 | 2021-08-10 17:26:22 +0200 | [diff] [blame] | 1015 | blkcg_print_one_stat(blkg, sf); |
Tejun Heo | b081436 | 2019-11-05 08:09:51 -0800 | [diff] [blame] | 1016 | spin_unlock_irq(&blkg->q->queue_lock); |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 1017 | } |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 1018 | rcu_read_unlock(); |
| 1019 | return 0; |
| 1020 | } |
| 1021 | |
Bart Van Assche | e1f3b94 | 2016-06-14 17:04:32 +0200 | [diff] [blame] | 1022 | static struct cftype blkcg_files[] = { |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 1023 | { |
| 1024 | .name = "stat", |
| 1025 | .seq_show = blkcg_print_stat, |
| 1026 | }, |
| 1027 | { } /* terminate */ |
| 1028 | }; |
| 1029 | |
Bart Van Assche | e1f3b94 | 2016-06-14 17:04:32 +0200 | [diff] [blame] | 1030 | static struct cftype blkcg_legacy_files[] = { |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1031 | { |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 1032 | .name = "reset_stats", |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1033 | .write_u64 = blkcg_reset_stats, |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 1034 | }, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 1035 | { } /* terminate */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1036 | }; |
| 1037 | |
Christoph Hellwig | dec223c | 2022-04-20 06:27:15 +0200 | [diff] [blame] | 1038 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 1039 | struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css) |
| 1040 | { |
| 1041 | return &css_to_blkcg(css)->cgwb_list; |
| 1042 | } |
| 1043 | #endif |
| 1044 | |
Dennis Zhou (Facebook) | 59b5771 | 2018-08-31 16:22:43 -0400 | [diff] [blame] | 1045 | /* |
| 1046 | * blkcg destruction is a three-stage process. |
| 1047 | * |
| 1048 | * 1. Destruction starts. The blkcg_css_offline() callback is invoked |
| 1049 | * which offlines writeback. Here we tie the next stage of blkg destruction |
| 1050 | * to the completion of writeback associated with the blkcg. This lets us |
| 1051 | * avoid punting potentially large amounts of outstanding writeback to root |
| 1052 | * while maintaining any ongoing policies. The next stage is triggered when |
| 1053 | * the nr_cgwbs count goes to zero. |
| 1054 | * |
| 1055 | * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called |
| 1056 | * and handles the destruction of blkgs. Here the css reference held by |
| 1057 | * the blkg is put back eventually allowing blkcg_css_free() to be called. |
| 1058 | * This work may occur in cgwb_release_workfn() on the cgwb_release |
| 1059 | * workqueue. Any submitted ios that fail to get the blkg ref will be |
| 1060 | * punted to the root_blkg. |
| 1061 | * |
| 1062 | * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. |
| 1063 | * This finally frees the blkcg. |
| 1064 | */ |
| 1065 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 1066 | /** |
Dennis Zhou (Facebook) | 59b5771 | 2018-08-31 16:22:43 -0400 | [diff] [blame] | 1067 | * blkcg_destroy_blkgs - responsible for shooting down blkgs |
| 1068 | * @blkcg: blkcg of interest |
| 1069 | * |
| 1070 | * blkgs should be removed while holding both q and blkcg locks. As blkcg lock |
| 1071 | * is nested inside q lock, this function performs reverse double lock dancing. |
| 1072 | * Destroying the blkgs releases the reference held on the blkcg's css allowing |
| 1073 | * blkcg_css_free to eventually be called. |
| 1074 | * |
| 1075 | * This is the blkcg counterpart of ioc_release_fn(). |
| 1076 | */ |
Christoph Hellwig | 397c9f4 | 2022-04-20 06:27:14 +0200 | [diff] [blame] | 1077 | static void blkcg_destroy_blkgs(struct blkcg *blkcg) |
Dennis Zhou (Facebook) | 59b5771 | 2018-08-31 16:22:43 -0400 | [diff] [blame] | 1078 | { |
Baolin Wang | 6c635ca | 2021-01-28 13:58:15 +0800 | [diff] [blame] | 1079 | might_sleep(); |
| 1080 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 1081 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 1082 | |
Dennis Zhou (Facebook) | 6b06546 | 2018-08-31 16:22:42 -0400 | [diff] [blame] | 1083 | while (!hlist_empty(&blkcg->blkg_list)) { |
| 1084 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
| 1085 | struct blkcg_gq, blkcg_node); |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 1086 | struct request_queue *q = blkg->q; |
Vivek Goyal | b1c3576 | 2009-12-03 12:59:47 -0500 | [diff] [blame] | 1087 | |
Baolin Wang | 6c635ca | 2021-01-28 13:58:15 +0800 | [diff] [blame] | 1088 | if (need_resched() || !spin_trylock(&q->queue_lock)) { |
| 1089 | /* |
| 1090 | * Given that the system can accumulate a huge number |
| 1091 | * of blkgs in pathological cases, check to see if we |
| 1092 | * need to rescheduling to avoid softlockup. |
| 1093 | */ |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 1094 | spin_unlock_irq(&blkcg->lock); |
Baolin Wang | 6c635ca | 2021-01-28 13:58:15 +0800 | [diff] [blame] | 1095 | cond_resched(); |
Dan Carpenter | a556793 | 2012-03-29 20:57:08 +0200 | [diff] [blame] | 1096 | spin_lock_irq(&blkcg->lock); |
Baolin Wang | 6c635ca | 2021-01-28 13:58:15 +0800 | [diff] [blame] | 1097 | continue; |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 1098 | } |
Baolin Wang | 6c635ca | 2021-01-28 13:58:15 +0800 | [diff] [blame] | 1099 | |
| 1100 | blkg_destroy(blkg); |
| 1101 | spin_unlock(&q->queue_lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 1102 | } |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 1103 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 1104 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 1105 | } |
| 1106 | |
Christoph Hellwig | 397c9f4 | 2022-04-20 06:27:14 +0200 | [diff] [blame] | 1107 | /** |
| 1108 | * blkcg_pin_online - pin online state |
| 1109 | * @blkcg_css: blkcg of interest |
| 1110 | * |
| 1111 | * While pinned, a blkcg is kept online. This is primarily used to |
| 1112 | * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline |
| 1113 | * while an associated cgwb is still active. |
| 1114 | */ |
| 1115 | void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css) |
| 1116 | { |
| 1117 | refcount_inc(&css_to_blkcg(blkcg_css)->online_pin); |
| 1118 | } |
| 1119 | |
| 1120 | /** |
| 1121 | * blkcg_unpin_online - unpin online state |
| 1122 | * @blkcg_css: blkcg of interest |
| 1123 | * |
| 1124 | * This is primarily used to impedance-match blkg and cgwb lifetimes so |
| 1125 | * that blkg doesn't go offline while an associated cgwb is still active. |
| 1126 | * When this count goes to zero, all active cgwbs have finished so the |
| 1127 | * blkcg can continue destruction by calling blkcg_destroy_blkgs(). |
| 1128 | */ |
| 1129 | void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css) |
| 1130 | { |
| 1131 | struct blkcg *blkcg = css_to_blkcg(blkcg_css); |
| 1132 | |
| 1133 | do { |
| 1134 | if (!refcount_dec_and_test(&blkcg->online_pin)) |
| 1135 | break; |
| 1136 | blkcg_destroy_blkgs(blkcg); |
| 1137 | blkcg = blkcg_parent(blkcg); |
| 1138 | } while (blkcg); |
| 1139 | } |
| 1140 | |
| 1141 | /** |
| 1142 | * blkcg_css_offline - cgroup css_offline callback |
| 1143 | * @css: css of interest |
| 1144 | * |
| 1145 | * This function is called when @css is about to go away. Here the cgwbs are |
| 1146 | * offlined first and only once writeback associated with the blkcg has |
| 1147 | * finished do we start step 2 (see above). |
| 1148 | */ |
| 1149 | static void blkcg_css_offline(struct cgroup_subsys_state *css) |
| 1150 | { |
| 1151 | /* this prevents anyone from attaching or migrating to this blkcg */ |
Christoph Hellwig | dec223c | 2022-04-20 06:27:15 +0200 | [diff] [blame] | 1152 | wb_blkcg_offline(css); |
Christoph Hellwig | 397c9f4 | 2022-04-20 06:27:14 +0200 | [diff] [blame] | 1153 | |
| 1154 | /* put the base online pin allowing step 2 to be triggered */ |
| 1155 | blkcg_unpin_online(css); |
| 1156 | } |
| 1157 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1158 | static void blkcg_css_free(struct cgroup_subsys_state *css) |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 1159 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1160 | struct blkcg *blkcg = css_to_blkcg(css); |
Tejun Heo | bc915e6 | 2015-08-18 14:55:08 -0700 | [diff] [blame] | 1161 | int i; |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 1162 | |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 1163 | mutex_lock(&blkcg_pol_mutex); |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 1164 | |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 1165 | list_del(&blkcg->all_blkcgs_node); |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 1166 | |
Tejun Heo | bc915e6 | 2015-08-18 14:55:08 -0700 | [diff] [blame] | 1167 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 1168 | if (blkcg->cpd[i]) |
| 1169 | blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); |
| 1170 | |
| 1171 | mutex_unlock(&blkcg_pol_mutex); |
| 1172 | |
Tejun Heo | bc915e6 | 2015-08-18 14:55:08 -0700 | [diff] [blame] | 1173 | kfree(blkcg); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1174 | } |
| 1175 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1176 | static struct cgroup_subsys_state * |
| 1177 | blkcg_css_alloc(struct cgroup_subsys_state *parent_css) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1178 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1179 | struct blkcg *blkcg; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1180 | struct cgroup_subsys_state *ret; |
| 1181 | int i; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1182 | |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 1183 | mutex_lock(&blkcg_pol_mutex); |
| 1184 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1185 | if (!parent_css) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1186 | blkcg = &blkcg_root; |
Tejun Heo | bc915e6 | 2015-08-18 14:55:08 -0700 | [diff] [blame] | 1187 | } else { |
| 1188 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
| 1189 | if (!blkcg) { |
| 1190 | ret = ERR_PTR(-ENOMEM); |
weiping zhang | 4c18c9e | 2017-08-25 23:49:32 +0800 | [diff] [blame] | 1191 | goto unlock; |
Tejun Heo | bc915e6 | 2015-08-18 14:55:08 -0700 | [diff] [blame] | 1192 | } |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1193 | } |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1194 | |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1195 | for (i = 0; i < BLKCG_MAX_POLS ; i++) { |
| 1196 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 1197 | struct blkcg_policy_data *cpd; |
| 1198 | |
| 1199 | /* |
| 1200 | * If the policy hasn't been attached yet, wait for it |
| 1201 | * to be attached before doing anything else. Otherwise, |
| 1202 | * check if the policy requires any specific per-cgroup |
| 1203 | * data: if it does, allocate and initialize it. |
| 1204 | */ |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 1205 | if (!pol || !pol->cpd_alloc_fn) |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1206 | continue; |
| 1207 | |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 1208 | cpd = pol->cpd_alloc_fn(GFP_KERNEL); |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1209 | if (!cpd) { |
| 1210 | ret = ERR_PTR(-ENOMEM); |
| 1211 | goto free_pd_blkcg; |
| 1212 | } |
Tejun Heo | 8143764 | 2015-08-18 14:55:15 -0700 | [diff] [blame] | 1213 | blkcg->cpd[i] = cpd; |
| 1214 | cpd->blkcg = blkcg; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1215 | cpd->plid = i; |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 1216 | if (pol->cpd_init_fn) |
| 1217 | pol->cpd_init_fn(cpd); |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1218 | } |
| 1219 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1220 | spin_lock_init(&blkcg->lock); |
Tejun Heo | d866dbf | 2019-07-24 10:37:22 -0700 | [diff] [blame] | 1221 | refcount_set(&blkcg->online_pin, 1); |
Tejun Heo | e00f4f4 | 2016-11-21 18:03:32 -0500 | [diff] [blame] | 1222 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1223 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 1224 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 1225 | INIT_LIST_HEAD(&blkcg->cgwb_list); |
| 1226 | #endif |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 1227 | list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); |
| 1228 | |
| 1229 | mutex_unlock(&blkcg_pol_mutex); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1230 | return &blkcg->css; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1231 | |
| 1232 | free_pd_blkcg: |
| 1233 | for (i--; i >= 0; i--) |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 1234 | if (blkcg->cpd[i]) |
| 1235 | blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); |
weiping zhang | 4c18c9e | 2017-08-25 23:49:32 +0800 | [diff] [blame] | 1236 | |
| 1237 | if (blkcg != &blkcg_root) |
| 1238 | kfree(blkcg); |
| 1239 | unlock: |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 1240 | mutex_unlock(&blkcg_pol_mutex); |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1241 | return ret; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1242 | } |
| 1243 | |
Tejun Heo | 4308a43 | 2019-07-24 10:37:55 -0700 | [diff] [blame] | 1244 | static int blkcg_css_online(struct cgroup_subsys_state *css) |
| 1245 | { |
Christoph Hellwig | 397c9f4 | 2022-04-20 06:27:14 +0200 | [diff] [blame] | 1246 | struct blkcg *parent = blkcg_parent(css_to_blkcg(css)); |
Tejun Heo | 4308a43 | 2019-07-24 10:37:55 -0700 | [diff] [blame] | 1247 | |
| 1248 | /* |
| 1249 | * blkcg_pin_online() is used to delay blkcg offline so that blkgs |
| 1250 | * don't go offline while cgwbs are still active on them. Pin the |
| 1251 | * parent so that offline always happens towards the root. |
| 1252 | */ |
| 1253 | if (parent) |
Christoph Hellwig | 397c9f4 | 2022-04-20 06:27:14 +0200 | [diff] [blame] | 1254 | blkcg_pin_online(css); |
Tejun Heo | 4308a43 | 2019-07-24 10:37:55 -0700 | [diff] [blame] | 1255 | return 0; |
| 1256 | } |
| 1257 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1258 | /** |
| 1259 | * blkcg_init_queue - initialize blkcg part of request queue |
| 1260 | * @q: request_queue to initialize |
| 1261 | * |
Christoph Hellwig | c62b37d | 2020-07-01 10:59:43 +0200 | [diff] [blame] | 1262 | * Called from blk_alloc_queue(). Responsible for initializing blkcg |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1263 | * part of new request_queue @q. |
| 1264 | * |
| 1265 | * RETURNS: |
| 1266 | * 0 on success, -errno on failure. |
| 1267 | */ |
| 1268 | int blkcg_init_queue(struct request_queue *q) |
| 1269 | { |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 1270 | struct blkcg_gq *new_blkg, *blkg; |
| 1271 | bool preloaded; |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 1272 | int ret; |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1273 | |
Ming Lei | 472e431 | 2022-02-11 18:11:48 +0800 | [diff] [blame] | 1274 | INIT_LIST_HEAD(&q->blkg_list); |
| 1275 | |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 1276 | new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); |
| 1277 | if (!new_blkg) |
| 1278 | return -ENOMEM; |
| 1279 | |
| 1280 | preloaded = !radix_tree_preload(GFP_KERNEL); |
| 1281 | |
Jiang Biao | bea5488 | 2018-04-19 12:04:26 +0800 | [diff] [blame] | 1282 | /* Make sure the root blkg exists. */ |
Fanjun Kong | 77c570a | 2022-05-17 01:39:30 +0800 | [diff] [blame] | 1283 | /* spin_lock_irq can serve as RCU read-side critical section. */ |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1284 | spin_lock_irq(&q->queue_lock); |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 1285 | blkg = blkg_create(&blkcg_root, q, new_blkg); |
Jiang Biao | 901932a | 2018-04-19 12:06:09 +0800 | [diff] [blame] | 1286 | if (IS_ERR(blkg)) |
| 1287 | goto err_unlock; |
| 1288 | q->root_blkg = blkg; |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1289 | spin_unlock_irq(&q->queue_lock); |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 1290 | |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 1291 | if (preloaded) |
| 1292 | radix_tree_preload_end(); |
| 1293 | |
Bart Van Assche | 556910e | 2021-06-17 17:44:44 -0700 | [diff] [blame] | 1294 | ret = blk_ioprio_init(q); |
| 1295 | if (ret) |
| 1296 | goto err_destroy_all; |
| 1297 | |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 1298 | ret = blk_throtl_init(q); |
Christoph Hellwig | 04be60b | 2018-11-14 17:02:12 +0100 | [diff] [blame] | 1299 | if (ret) |
| 1300 | goto err_destroy_all; |
Yufen Yu | 27029b4 | 2020-08-10 22:21:16 -0400 | [diff] [blame] | 1301 | |
Yanfei Xu | 6f5ddde | 2021-09-15 15:24:26 +0800 | [diff] [blame] | 1302 | ret = blk_iolatency_init(q); |
| 1303 | if (ret) { |
| 1304 | blk_throtl_exit(q); |
Jan Kara | 82b74ca | 2022-06-23 09:48:32 +0200 | [diff] [blame] | 1305 | blk_ioprio_exit(q); |
Yanfei Xu | 6f5ddde | 2021-09-15 15:24:26 +0800 | [diff] [blame] | 1306 | goto err_destroy_all; |
| 1307 | } |
| 1308 | |
Christoph Hellwig | 04be60b | 2018-11-14 17:02:12 +0100 | [diff] [blame] | 1309 | return 0; |
Jiang Biao | 901932a | 2018-04-19 12:06:09 +0800 | [diff] [blame] | 1310 | |
Christoph Hellwig | 04be60b | 2018-11-14 17:02:12 +0100 | [diff] [blame] | 1311 | err_destroy_all: |
Christoph Hellwig | 04be60b | 2018-11-14 17:02:12 +0100 | [diff] [blame] | 1312 | blkg_destroy_all(q); |
Christoph Hellwig | 04be60b | 2018-11-14 17:02:12 +0100 | [diff] [blame] | 1313 | return ret; |
Jiang Biao | 901932a | 2018-04-19 12:06:09 +0800 | [diff] [blame] | 1314 | err_unlock: |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1315 | spin_unlock_irq(&q->queue_lock); |
Jiang Biao | 901932a | 2018-04-19 12:06:09 +0800 | [diff] [blame] | 1316 | if (preloaded) |
| 1317 | radix_tree_preload_end(); |
| 1318 | return PTR_ERR(blkg); |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1319 | } |
| 1320 | |
| 1321 | /** |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1322 | * blkcg_exit_queue - exit and release blkcg part of request_queue |
| 1323 | * @q: request_queue being released |
| 1324 | * |
Marcos Paulo de Souza | 7585d50 | 2019-01-25 00:01:42 -0200 | [diff] [blame] | 1325 | * Called from blk_exit_queue(). Responsible for exiting blkcg part. |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1326 | */ |
| 1327 | void blkcg_exit_queue(struct request_queue *q) |
| 1328 | { |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 1329 | blkg_destroy_all(q); |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1330 | blk_throtl_exit(q); |
| 1331 | } |
| 1332 | |
Tejun Heo | 69d7fde | 2015-08-18 14:55:36 -0700 | [diff] [blame] | 1333 | static void blkcg_bind(struct cgroup_subsys_state *root_css) |
| 1334 | { |
| 1335 | int i; |
| 1336 | |
| 1337 | mutex_lock(&blkcg_pol_mutex); |
| 1338 | |
| 1339 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 1340 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 1341 | struct blkcg *blkcg; |
| 1342 | |
| 1343 | if (!pol || !pol->cpd_bind_fn) |
| 1344 | continue; |
| 1345 | |
| 1346 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) |
| 1347 | if (blkcg->cpd[pol->plid]) |
| 1348 | pol->cpd_bind_fn(blkcg->cpd[pol->plid]); |
| 1349 | } |
| 1350 | mutex_unlock(&blkcg_pol_mutex); |
| 1351 | } |
| 1352 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1353 | static void blkcg_exit(struct task_struct *tsk) |
| 1354 | { |
| 1355 | if (tsk->throttle_queue) |
| 1356 | blk_put_queue(tsk->throttle_queue); |
| 1357 | tsk->throttle_queue = NULL; |
| 1358 | } |
| 1359 | |
Tejun Heo | c165b3e | 2015-08-18 14:55:29 -0700 | [diff] [blame] | 1360 | struct cgroup_subsys io_cgrp_subsys = { |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 1361 | .css_alloc = blkcg_css_alloc, |
Tejun Heo | 4308a43 | 2019-07-24 10:37:55 -0700 | [diff] [blame] | 1362 | .css_online = blkcg_css_online, |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 1363 | .css_offline = blkcg_css_offline, |
| 1364 | .css_free = blkcg_css_free, |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 1365 | .css_rstat_flush = blkcg_rstat_flush, |
Tejun Heo | 69d7fde | 2015-08-18 14:55:36 -0700 | [diff] [blame] | 1366 | .bind = blkcg_bind, |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 1367 | .dfl_cftypes = blkcg_files, |
Tejun Heo | 880f50e2 | 2015-08-18 14:55:30 -0700 | [diff] [blame] | 1368 | .legacy_cftypes = blkcg_legacy_files, |
Tejun Heo | c165b3e | 2015-08-18 14:55:29 -0700 | [diff] [blame] | 1369 | .legacy_name = "blkio", |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1370 | .exit = blkcg_exit, |
Tejun Heo | 1ced953b | 2014-07-08 18:02:57 -0400 | [diff] [blame] | 1371 | #ifdef CONFIG_MEMCG |
| 1372 | /* |
| 1373 | * This ensures that, if available, memcg is automatically enabled |
| 1374 | * together on the default hierarchy so that the owner cgroup can |
| 1375 | * be retrieved from writeback pages. |
| 1376 | */ |
| 1377 | .depends_on = 1 << memory_cgrp_id, |
| 1378 | #endif |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 1379 | }; |
Tejun Heo | c165b3e | 2015-08-18 14:55:29 -0700 | [diff] [blame] | 1380 | EXPORT_SYMBOL_GPL(io_cgrp_subsys); |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 1381 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1382 | /** |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1383 | * blkcg_activate_policy - activate a blkcg policy on a request_queue |
| 1384 | * @q: request_queue of interest |
| 1385 | * @pol: blkcg policy to activate |
| 1386 | * |
| 1387 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through |
| 1388 | * bypass mode to populate its blkgs with policy_data for @pol. |
| 1389 | * |
| 1390 | * Activation happens with @q bypassed, so nobody would be accessing blkgs |
| 1391 | * from IO path. Update of each blkg is protected by both queue and blkcg |
| 1392 | * locks so that holding either lock and testing blkcg_policy_enabled() is |
| 1393 | * always enough for dereferencing policy data. |
| 1394 | * |
| 1395 | * The caller is responsible for synchronizing [de]activations and policy |
| 1396 | * [un]registerations. Returns 0 on success, -errno on failure. |
| 1397 | */ |
| 1398 | int blkcg_activate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1399 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1400 | { |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1401 | struct blkg_policy_data *pd_prealloc = NULL; |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1402 | struct blkcg_gq *blkg, *pinned_blkg = NULL; |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1403 | int ret; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1404 | |
| 1405 | if (blkcg_policy_enabled(q, pol)) |
| 1406 | return 0; |
| 1407 | |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 1408 | if (queue_is_mq(q)) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1409 | blk_mq_freeze_queue(q); |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1410 | retry: |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1411 | spin_lock_irq(&q->queue_lock); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1412 | |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1413 | /* blkg_list is pushed at the head, reverse walk to allocate parents first */ |
Tejun Heo | 71c8140 | 2019-06-13 15:30:40 -0700 | [diff] [blame] | 1414 | list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1415 | struct blkg_policy_data *pd; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1416 | |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1417 | if (blkg->pd[pol->plid]) |
| 1418 | continue; |
| 1419 | |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1420 | /* If prealloc matches, use it; otherwise try GFP_NOWAIT */ |
| 1421 | if (blkg == pinned_blkg) { |
| 1422 | pd = pd_prealloc; |
| 1423 | pd_prealloc = NULL; |
| 1424 | } else { |
| 1425 | pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q, |
| 1426 | blkg->blkcg); |
| 1427 | } |
| 1428 | |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1429 | if (!pd) { |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1430 | /* |
| 1431 | * GFP_NOWAIT failed. Free the existing one and |
| 1432 | * prealloc for @blkg w/ GFP_KERNEL. |
| 1433 | */ |
| 1434 | if (pinned_blkg) |
| 1435 | blkg_put(pinned_blkg); |
| 1436 | blkg_get(blkg); |
| 1437 | pinned_blkg = blkg; |
| 1438 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1439 | spin_unlock_irq(&q->queue_lock); |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1440 | |
| 1441 | if (pd_prealloc) |
| 1442 | pol->pd_free_fn(pd_prealloc); |
| 1443 | pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q, |
| 1444 | blkg->blkcg); |
| 1445 | if (pd_prealloc) |
| 1446 | goto retry; |
| 1447 | else |
| 1448 | goto enomem; |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1449 | } |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1450 | |
| 1451 | blkg->pd[pol->plid] = pd; |
| 1452 | pd->blkg = blkg; |
Tejun Heo | b276a87 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 1453 | pd->plid = pol->plid; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1454 | } |
| 1455 | |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1456 | /* all allocated, init in the same order */ |
| 1457 | if (pol->pd_init_fn) |
| 1458 | list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) |
| 1459 | pol->pd_init_fn(blkg->pd[pol->plid]); |
| 1460 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1461 | __set_bit(pol->plid, q->blkcg_pols); |
| 1462 | ret = 0; |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1463 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1464 | spin_unlock_irq(&q->queue_lock); |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1465 | out: |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 1466 | if (queue_is_mq(q)) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1467 | blk_mq_unfreeze_queue(q); |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1468 | if (pinned_blkg) |
| 1469 | blkg_put(pinned_blkg); |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 1470 | if (pd_prealloc) |
| 1471 | pol->pd_free_fn(pd_prealloc); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1472 | return ret; |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1473 | |
| 1474 | enomem: |
| 1475 | /* alloc failed, nothing's initialized yet, free everything */ |
| 1476 | spin_lock_irq(&q->queue_lock); |
| 1477 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
Li Jinlin | 858560b | 2021-09-14 12:26:05 +0800 | [diff] [blame] | 1478 | struct blkcg *blkcg = blkg->blkcg; |
| 1479 | |
| 1480 | spin_lock(&blkcg->lock); |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1481 | if (blkg->pd[pol->plid]) { |
| 1482 | pol->pd_free_fn(blkg->pd[pol->plid]); |
| 1483 | blkg->pd[pol->plid] = NULL; |
| 1484 | } |
Li Jinlin | 858560b | 2021-09-14 12:26:05 +0800 | [diff] [blame] | 1485 | spin_unlock(&blkcg->lock); |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1486 | } |
| 1487 | spin_unlock_irq(&q->queue_lock); |
| 1488 | ret = -ENOMEM; |
| 1489 | goto out; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1490 | } |
| 1491 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); |
| 1492 | |
| 1493 | /** |
| 1494 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue |
| 1495 | * @q: request_queue of interest |
| 1496 | * @pol: blkcg policy to deactivate |
| 1497 | * |
| 1498 | * Deactivate @pol on @q. Follows the same synchronization rules as |
| 1499 | * blkcg_activate_policy(). |
| 1500 | */ |
| 1501 | void blkcg_deactivate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1502 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1503 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1504 | struct blkcg_gq *blkg; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1505 | |
| 1506 | if (!blkcg_policy_enabled(q, pol)) |
| 1507 | return; |
| 1508 | |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 1509 | if (queue_is_mq(q)) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1510 | blk_mq_freeze_queue(q); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1511 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1512 | spin_lock_irq(&q->queue_lock); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1513 | |
| 1514 | __clear_bit(pol->plid, q->blkcg_pols); |
| 1515 | |
| 1516 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
Li Jinlin | 858560b | 2021-09-14 12:26:05 +0800 | [diff] [blame] | 1517 | struct blkcg *blkcg = blkg->blkcg; |
| 1518 | |
| 1519 | spin_lock(&blkcg->lock); |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 1520 | if (blkg->pd[pol->plid]) { |
Dennis Zhou (Facebook) | 6b06546 | 2018-08-31 16:22:42 -0400 | [diff] [blame] | 1521 | if (pol->pd_offline_fn) |
Tejun Heo | a9520cd | 2015-08-18 14:55:14 -0700 | [diff] [blame] | 1522 | pol->pd_offline_fn(blkg->pd[pol->plid]); |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 1523 | pol->pd_free_fn(blkg->pd[pol->plid]); |
| 1524 | blkg->pd[pol->plid] = NULL; |
| 1525 | } |
Li Jinlin | 858560b | 2021-09-14 12:26:05 +0800 | [diff] [blame] | 1526 | spin_unlock(&blkcg->lock); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1527 | } |
| 1528 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1529 | spin_unlock_irq(&q->queue_lock); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1530 | |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 1531 | if (queue_is_mq(q)) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1532 | blk_mq_unfreeze_queue(q); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1533 | } |
| 1534 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); |
| 1535 | |
Jason Yan | e55cf79 | 2022-06-29 15:09:17 +0800 | [diff] [blame] | 1536 | static void blkcg_free_all_cpd(struct blkcg_policy *pol) |
| 1537 | { |
| 1538 | struct blkcg *blkcg; |
| 1539 | |
| 1540 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
| 1541 | if (blkcg->cpd[pol->plid]) { |
| 1542 | pol->cpd_free_fn(blkcg->cpd[pol->plid]); |
| 1543 | blkcg->cpd[pol->plid] = NULL; |
| 1544 | } |
| 1545 | } |
| 1546 | } |
| 1547 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1548 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1549 | * blkcg_policy_register - register a blkcg policy |
| 1550 | * @pol: blkcg policy to register |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1551 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1552 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
| 1553 | * successful registration. Returns 0 on success and -errno on failure. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1554 | */ |
Jens Axboe | d5bf029 | 2014-06-22 16:31:56 -0600 | [diff] [blame] | 1555 | int blkcg_policy_register(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1556 | { |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1557 | struct blkcg *blkcg; |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1558 | int i, ret; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 1559 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1560 | mutex_lock(&blkcg_pol_register_mutex); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 1561 | mutex_lock(&blkcg_pol_mutex); |
| 1562 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1563 | /* find an empty slot */ |
| 1564 | ret = -ENOSPC; |
| 1565 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1566 | if (!blkcg_policy[i]) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1567 | break; |
Jens Axboe | 01c5f85 | 2018-09-11 10:59:53 -0600 | [diff] [blame] | 1568 | if (i >= BLKCG_MAX_POLS) { |
| 1569 | pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1570 | goto err_unlock; |
Jens Axboe | 01c5f85 | 2018-09-11 10:59:53 -0600 | [diff] [blame] | 1571 | } |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 1572 | |
weiping zhang | e840107 | 2017-10-17 23:56:21 +0800 | [diff] [blame] | 1573 | /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ |
| 1574 | if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || |
| 1575 | (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) |
| 1576 | goto err_unlock; |
| 1577 | |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1578 | /* register @pol */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1579 | pol->plid = i; |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1580 | blkcg_policy[pol->plid] = pol; |
| 1581 | |
| 1582 | /* allocate and install cpd's */ |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 1583 | if (pol->cpd_alloc_fn) { |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1584 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
| 1585 | struct blkcg_policy_data *cpd; |
| 1586 | |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 1587 | cpd = pol->cpd_alloc_fn(GFP_KERNEL); |
Bart Van Assche | bbb427e | 2016-09-29 08:33:30 -0700 | [diff] [blame] | 1588 | if (!cpd) |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1589 | goto err_free_cpds; |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1590 | |
Tejun Heo | 8143764 | 2015-08-18 14:55:15 -0700 | [diff] [blame] | 1591 | blkcg->cpd[pol->plid] = cpd; |
| 1592 | cpd->blkcg = blkcg; |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1593 | cpd->plid = pol->plid; |
Tejun Heo | 86a5bba | 2019-08-28 15:05:52 -0700 | [diff] [blame] | 1594 | if (pol->cpd_init_fn) |
| 1595 | pol->cpd_init_fn(cpd); |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1596 | } |
| 1597 | } |
| 1598 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1599 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1600 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1601 | /* everything is in place, add intf files for the new policy */ |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 1602 | if (pol->dfl_cftypes) |
| 1603 | WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, |
| 1604 | pol->dfl_cftypes)); |
Tejun Heo | 880f50e2 | 2015-08-18 14:55:30 -0700 | [diff] [blame] | 1605 | if (pol->legacy_cftypes) |
Tejun Heo | c165b3e | 2015-08-18 14:55:29 -0700 | [diff] [blame] | 1606 | WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, |
Tejun Heo | 880f50e2 | 2015-08-18 14:55:30 -0700 | [diff] [blame] | 1607 | pol->legacy_cftypes)); |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1608 | mutex_unlock(&blkcg_pol_register_mutex); |
| 1609 | return 0; |
| 1610 | |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1611 | err_free_cpds: |
Jason Yan | e55cf79 | 2022-06-29 15:09:17 +0800 | [diff] [blame] | 1612 | if (pol->cpd_free_fn) |
| 1613 | blkcg_free_all_cpd(pol); |
| 1614 | |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1615 | blkcg_policy[pol->plid] = NULL; |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1616 | err_unlock: |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 1617 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1618 | mutex_unlock(&blkcg_pol_register_mutex); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1619 | return ret; |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1620 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1621 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1622 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1623 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1624 | * blkcg_policy_unregister - unregister a blkcg policy |
| 1625 | * @pol: blkcg policy to unregister |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1626 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1627 | * Undo blkcg_policy_register(@pol). Might sleep. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1628 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1629 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1630 | { |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1631 | mutex_lock(&blkcg_pol_register_mutex); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 1632 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1633 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1634 | goto out_unlock; |
| 1635 | |
| 1636 | /* kill the intf files first */ |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 1637 | if (pol->dfl_cftypes) |
| 1638 | cgroup_rm_cftypes(pol->dfl_cftypes); |
Tejun Heo | 880f50e2 | 2015-08-18 14:55:30 -0700 | [diff] [blame] | 1639 | if (pol->legacy_cftypes) |
| 1640 | cgroup_rm_cftypes(pol->legacy_cftypes); |
Tejun Heo | 44ea53d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1641 | |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1642 | /* remove cpds and unregister */ |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1643 | mutex_lock(&blkcg_pol_mutex); |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1644 | |
Jason Yan | e55cf79 | 2022-06-29 15:09:17 +0800 | [diff] [blame] | 1645 | if (pol->cpd_free_fn) |
| 1646 | blkcg_free_all_cpd(pol); |
| 1647 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1648 | blkcg_policy[pol->plid] = NULL; |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1649 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 1650 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1651 | out_unlock: |
| 1652 | mutex_unlock(&blkcg_pol_register_mutex); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1653 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1654 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 1655 | |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 1656 | bool __blkcg_punt_bio_submit(struct bio *bio) |
| 1657 | { |
| 1658 | struct blkcg_gq *blkg = bio->bi_blkg; |
| 1659 | |
| 1660 | /* consume the flag first */ |
| 1661 | bio->bi_opf &= ~REQ_CGROUP_PUNT; |
| 1662 | |
| 1663 | /* never bounce for the root cgroup */ |
| 1664 | if (!blkg->parent) |
| 1665 | return false; |
| 1666 | |
| 1667 | spin_lock_bh(&blkg->async_bio_lock); |
| 1668 | bio_list_add(&blkg->async_bios, bio); |
| 1669 | spin_unlock_bh(&blkg->async_bio_lock); |
| 1670 | |
| 1671 | queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); |
| 1672 | return true; |
| 1673 | } |
| 1674 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1675 | /* |
| 1676 | * Scale the accumulated delay based on how long it has been since we updated |
| 1677 | * the delay. We only call this when we are adding delay, in case it's been a |
| 1678 | * while since we added delay, and when we are checking to see if we need to |
| 1679 | * delay a task, to account for any delays that may have occurred. |
| 1680 | */ |
| 1681 | static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) |
| 1682 | { |
| 1683 | u64 old = atomic64_read(&blkg->delay_start); |
| 1684 | |
Tejun Heo | 54c52e1 | 2020-04-13 12:27:55 -0400 | [diff] [blame] | 1685 | /* negative use_delay means no scaling, see blkcg_set_delay() */ |
| 1686 | if (atomic_read(&blkg->use_delay) < 0) |
| 1687 | return; |
| 1688 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1689 | /* |
| 1690 | * We only want to scale down every second. The idea here is that we |
| 1691 | * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain |
| 1692 | * time window. We only want to throttle tasks for recent delay that |
| 1693 | * has occurred, in 1 second time windows since that's the maximum |
| 1694 | * things can be throttled. We save the current delay window in |
| 1695 | * blkg->last_delay so we know what amount is still left to be charged |
| 1696 | * to the blkg from this point onward. blkg->last_use keeps track of |
| 1697 | * the use_delay counter. The idea is if we're unthrottling the blkg we |
| 1698 | * are ok with whatever is happening now, and we can take away more of |
| 1699 | * the accumulated delay as we've already throttled enough that |
| 1700 | * everybody is happy with their IO latencies. |
| 1701 | */ |
| 1702 | if (time_before64(old + NSEC_PER_SEC, now) && |
Uros Bizjak | 96388f5 | 2022-07-12 17:44:55 +0200 | [diff] [blame] | 1703 | atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) { |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1704 | u64 cur = atomic64_read(&blkg->delay_nsec); |
| 1705 | u64 sub = min_t(u64, blkg->last_delay, now - old); |
| 1706 | int cur_use = atomic_read(&blkg->use_delay); |
| 1707 | |
| 1708 | /* |
| 1709 | * We've been unthrottled, subtract a larger chunk of our |
| 1710 | * accumulated delay. |
| 1711 | */ |
| 1712 | if (cur_use < blkg->last_use) |
| 1713 | sub = max_t(u64, sub, blkg->last_delay >> 1); |
| 1714 | |
| 1715 | /* |
| 1716 | * This shouldn't happen, but handle it anyway. Our delay_nsec |
| 1717 | * should only ever be growing except here where we subtract out |
| 1718 | * min(last_delay, 1 second), but lord knows bugs happen and I'd |
| 1719 | * rather not end up with negative numbers. |
| 1720 | */ |
| 1721 | if (unlikely(cur < sub)) { |
| 1722 | atomic64_set(&blkg->delay_nsec, 0); |
| 1723 | blkg->last_delay = 0; |
| 1724 | } else { |
| 1725 | atomic64_sub(sub, &blkg->delay_nsec); |
| 1726 | blkg->last_delay = cur - sub; |
| 1727 | } |
| 1728 | blkg->last_use = cur_use; |
| 1729 | } |
| 1730 | } |
| 1731 | |
| 1732 | /* |
| 1733 | * This is called when we want to actually walk up the hierarchy and check to |
| 1734 | * see if we need to throttle, and then actually throttle if there is some |
| 1735 | * accumulated delay. This should only be called upon return to user space so |
| 1736 | * we're not holding some lock that would induce a priority inversion. |
| 1737 | */ |
| 1738 | static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) |
| 1739 | { |
Josef Bacik | fd112c7 | 2019-07-09 14:41:29 -0700 | [diff] [blame] | 1740 | unsigned long pflags; |
Tejun Heo | 5160a5a | 2020-09-01 14:52:52 -0400 | [diff] [blame] | 1741 | bool clamp; |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1742 | u64 now = ktime_to_ns(ktime_get()); |
| 1743 | u64 exp; |
| 1744 | u64 delay_nsec = 0; |
| 1745 | int tok; |
| 1746 | |
| 1747 | while (blkg->parent) { |
Tejun Heo | 5160a5a | 2020-09-01 14:52:52 -0400 | [diff] [blame] | 1748 | int use_delay = atomic_read(&blkg->use_delay); |
| 1749 | |
| 1750 | if (use_delay) { |
| 1751 | u64 this_delay; |
| 1752 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1753 | blkcg_scale_delay(blkg, now); |
Tejun Heo | 5160a5a | 2020-09-01 14:52:52 -0400 | [diff] [blame] | 1754 | this_delay = atomic64_read(&blkg->delay_nsec); |
| 1755 | if (this_delay > delay_nsec) { |
| 1756 | delay_nsec = this_delay; |
| 1757 | clamp = use_delay > 0; |
| 1758 | } |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1759 | } |
| 1760 | blkg = blkg->parent; |
| 1761 | } |
| 1762 | |
| 1763 | if (!delay_nsec) |
| 1764 | return; |
| 1765 | |
| 1766 | /* |
| 1767 | * Let's not sleep for all eternity if we've amassed a huge delay. |
| 1768 | * Swapping or metadata IO can accumulate 10's of seconds worth of |
| 1769 | * delay, and we want userspace to be able to do _something_ so cap the |
Tejun Heo | 5160a5a | 2020-09-01 14:52:52 -0400 | [diff] [blame] | 1770 | * delays at 0.25s. If there's 10's of seconds worth of delay then the |
| 1771 | * tasks will be delayed for 0.25 second for every syscall. If |
| 1772 | * blkcg_set_delay() was used as indicated by negative use_delay, the |
| 1773 | * caller is responsible for regulating the range. |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1774 | */ |
Tejun Heo | 5160a5a | 2020-09-01 14:52:52 -0400 | [diff] [blame] | 1775 | if (clamp) |
| 1776 | delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1777 | |
Josef Bacik | fd112c7 | 2019-07-09 14:41:29 -0700 | [diff] [blame] | 1778 | if (use_memdelay) |
| 1779 | psi_memstall_enter(&pflags); |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1780 | |
| 1781 | exp = ktime_add_ns(now, delay_nsec); |
| 1782 | tok = io_schedule_prepare(); |
| 1783 | do { |
| 1784 | __set_current_state(TASK_KILLABLE); |
| 1785 | if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS)) |
| 1786 | break; |
| 1787 | } while (!fatal_signal_pending(current)); |
| 1788 | io_schedule_finish(tok); |
Josef Bacik | fd112c7 | 2019-07-09 14:41:29 -0700 | [diff] [blame] | 1789 | |
| 1790 | if (use_memdelay) |
| 1791 | psi_memstall_leave(&pflags); |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1792 | } |
| 1793 | |
| 1794 | /** |
| 1795 | * blkcg_maybe_throttle_current - throttle the current task if it has been marked |
| 1796 | * |
| 1797 | * This is only called if we've been marked with set_notify_resume(). Obviously |
| 1798 | * we can be set_notify_resume() for reasons other than blkcg throttling, so we |
| 1799 | * check to see if current->throttle_queue is set and if not this doesn't do |
| 1800 | * anything. This should only ever be called by the resume code, it's not meant |
| 1801 | * to be called by people willy-nilly as it will actually do the work to |
| 1802 | * throttle the task if it is setup for throttling. |
| 1803 | */ |
| 1804 | void blkcg_maybe_throttle_current(void) |
| 1805 | { |
| 1806 | struct request_queue *q = current->throttle_queue; |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1807 | struct blkcg *blkcg; |
| 1808 | struct blkcg_gq *blkg; |
| 1809 | bool use_memdelay = current->use_memdelay; |
| 1810 | |
| 1811 | if (!q) |
| 1812 | return; |
| 1813 | |
| 1814 | current->throttle_queue = NULL; |
| 1815 | current->use_memdelay = false; |
| 1816 | |
| 1817 | rcu_read_lock(); |
Christoph Hellwig | 8277825 | 2022-04-20 06:27:22 +0200 | [diff] [blame] | 1818 | blkcg = css_to_blkcg(blkcg_css()); |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1819 | if (!blkcg) |
| 1820 | goto out; |
| 1821 | blkg = blkg_lookup(blkcg, q); |
| 1822 | if (!blkg) |
| 1823 | goto out; |
Dennis Zhou | 7754f66 | 2018-12-05 12:10:39 -0500 | [diff] [blame] | 1824 | if (!blkg_tryget(blkg)) |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1825 | goto out; |
| 1826 | rcu_read_unlock(); |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1827 | |
| 1828 | blkcg_maybe_throttle_blkg(blkg, use_memdelay); |
| 1829 | blkg_put(blkg); |
Josef Bacik | cc7ecc25 | 2018-07-31 12:39:03 -0400 | [diff] [blame] | 1830 | blk_put_queue(q); |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1831 | return; |
| 1832 | out: |
| 1833 | rcu_read_unlock(); |
| 1834 | blk_put_queue(q); |
| 1835 | } |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1836 | |
| 1837 | /** |
| 1838 | * blkcg_schedule_throttle - this task needs to check for throttling |
Bart Van Assche | 537d71b | 2019-03-20 13:18:45 -0700 | [diff] [blame] | 1839 | * @q: the request queue IO was submitted on |
| 1840 | * @use_memdelay: do we charge this to memory delay for PSI |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1841 | * |
| 1842 | * This is called by the IO controller when we know there's delay accumulated |
| 1843 | * for the blkg for this task. We do not pass the blkg because there are places |
| 1844 | * we call this that may not have that information, the swapping code for |
| 1845 | * instance will only have a request_queue at that point. This set's the |
| 1846 | * notify_resume for the task to check and see if it requires throttling before |
| 1847 | * returning to user space. |
| 1848 | * |
| 1849 | * We will only schedule once per syscall. You can call this over and over |
| 1850 | * again and it will only do the check once upon return to user space, and only |
| 1851 | * throttle once. If the task needs to be throttled again it'll need to be |
| 1852 | * re-set at the next time we see the task. |
| 1853 | */ |
| 1854 | void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) |
| 1855 | { |
| 1856 | if (unlikely(current->flags & PF_KTHREAD)) |
| 1857 | return; |
| 1858 | |
Chunguang Xu | 49d1822 | 2021-01-25 13:05:28 +0800 | [diff] [blame] | 1859 | if (current->throttle_queue != q) { |
| 1860 | if (!blk_get_queue(q)) |
| 1861 | return; |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1862 | |
Chunguang Xu | 49d1822 | 2021-01-25 13:05:28 +0800 | [diff] [blame] | 1863 | if (current->throttle_queue) |
| 1864 | blk_put_queue(current->throttle_queue); |
| 1865 | current->throttle_queue = q; |
| 1866 | } |
| 1867 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1868 | if (use_memdelay) |
| 1869 | current->use_memdelay = use_memdelay; |
| 1870 | set_notify_resume(current); |
| 1871 | } |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1872 | |
| 1873 | /** |
| 1874 | * blkcg_add_delay - add delay to this blkg |
Bart Van Assche | 537d71b | 2019-03-20 13:18:45 -0700 | [diff] [blame] | 1875 | * @blkg: blkg of interest |
| 1876 | * @now: the current time in nanoseconds |
| 1877 | * @delta: how many nanoseconds of delay to add |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1878 | * |
| 1879 | * Charge @delta to the blkg's current delay accumulation. This is used to |
| 1880 | * throttle tasks if an IO controller thinks we need more throttling. |
| 1881 | */ |
| 1882 | void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) |
| 1883 | { |
Tejun Heo | 54c52e1 | 2020-04-13 12:27:55 -0400 | [diff] [blame] | 1884 | if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) |
| 1885 | return; |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1886 | blkcg_scale_delay(blkg, now); |
| 1887 | atomic64_add(delta, &blkg->delay_nsec); |
| 1888 | } |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1889 | |
Christoph Hellwig | 28fc591 | 2020-06-27 09:31:51 +0200 | [diff] [blame] | 1890 | /** |
| 1891 | * blkg_tryget_closest - try and get a blkg ref on the closet blkg |
Christoph Hellwig | 13c7863 | 2020-06-27 09:31:54 +0200 | [diff] [blame] | 1892 | * @bio: target bio |
| 1893 | * @css: target css |
Christoph Hellwig | 28fc591 | 2020-06-27 09:31:51 +0200 | [diff] [blame] | 1894 | * |
Christoph Hellwig | 13c7863 | 2020-06-27 09:31:54 +0200 | [diff] [blame] | 1895 | * As the failure mode here is to walk up the blkg tree, this ensure that the |
| 1896 | * blkg->parent pointers are always valid. This returns the blkg that it ended |
| 1897 | * up taking a reference on or %NULL if no reference was taken. |
Christoph Hellwig | 28fc591 | 2020-06-27 09:31:51 +0200 | [diff] [blame] | 1898 | */ |
Christoph Hellwig | 13c7863 | 2020-06-27 09:31:54 +0200 | [diff] [blame] | 1899 | static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, |
| 1900 | struct cgroup_subsys_state *css) |
Christoph Hellwig | 28fc591 | 2020-06-27 09:31:51 +0200 | [diff] [blame] | 1901 | { |
Christoph Hellwig | 13c7863 | 2020-06-27 09:31:54 +0200 | [diff] [blame] | 1902 | struct blkcg_gq *blkg, *ret_blkg = NULL; |
Christoph Hellwig | 28fc591 | 2020-06-27 09:31:51 +0200 | [diff] [blame] | 1903 | |
Christoph Hellwig | 13c7863 | 2020-06-27 09:31:54 +0200 | [diff] [blame] | 1904 | rcu_read_lock(); |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 1905 | blkg = blkg_lookup_create(css_to_blkcg(css), |
Pavel Begunkov | ed6cdde | 2021-10-14 15:03:30 +0100 | [diff] [blame] | 1906 | bdev_get_queue(bio->bi_bdev)); |
Christoph Hellwig | 28fc591 | 2020-06-27 09:31:51 +0200 | [diff] [blame] | 1907 | while (blkg) { |
| 1908 | if (blkg_tryget(blkg)) { |
| 1909 | ret_blkg = blkg; |
| 1910 | break; |
| 1911 | } |
| 1912 | blkg = blkg->parent; |
| 1913 | } |
Christoph Hellwig | 13c7863 | 2020-06-27 09:31:54 +0200 | [diff] [blame] | 1914 | rcu_read_unlock(); |
Christoph Hellwig | 28fc591 | 2020-06-27 09:31:51 +0200 | [diff] [blame] | 1915 | |
| 1916 | return ret_blkg; |
| 1917 | } |
| 1918 | |
| 1919 | /** |
| 1920 | * bio_associate_blkg_from_css - associate a bio with a specified css |
| 1921 | * @bio: target bio |
| 1922 | * @css: target css |
| 1923 | * |
| 1924 | * Associate @bio with the blkg found by combining the css's blkg and the |
| 1925 | * request_queue of the @bio. An association failure is handled by walking up |
| 1926 | * the blkg tree. Therefore, the blkg associated can be anything between @blkg |
| 1927 | * and q->root_blkg. This situation only happens when a cgroup is dying and |
| 1928 | * then the remaining bios will spill to the closest alive blkg. |
| 1929 | * |
| 1930 | * A reference will be taken on the blkg and will be released when @bio is |
| 1931 | * freed. |
| 1932 | */ |
| 1933 | void bio_associate_blkg_from_css(struct bio *bio, |
| 1934 | struct cgroup_subsys_state *css) |
| 1935 | { |
Christoph Hellwig | 28fc591 | 2020-06-27 09:31:51 +0200 | [diff] [blame] | 1936 | if (bio->bi_blkg) |
| 1937 | blkg_put(bio->bi_blkg); |
| 1938 | |
Christoph Hellwig | a5b9752 | 2020-06-27 09:31:53 +0200 | [diff] [blame] | 1939 | if (css && css->parent) { |
Christoph Hellwig | 13c7863 | 2020-06-27 09:31:54 +0200 | [diff] [blame] | 1940 | bio->bi_blkg = blkg_tryget_closest(bio, css); |
Christoph Hellwig | a5b9752 | 2020-06-27 09:31:53 +0200 | [diff] [blame] | 1941 | } else { |
Pavel Begunkov | ed6cdde | 2021-10-14 15:03:30 +0100 | [diff] [blame] | 1942 | blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); |
| 1943 | bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; |
Christoph Hellwig | a5b9752 | 2020-06-27 09:31:53 +0200 | [diff] [blame] | 1944 | } |
Christoph Hellwig | 28fc591 | 2020-06-27 09:31:51 +0200 | [diff] [blame] | 1945 | } |
| 1946 | EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); |
| 1947 | |
| 1948 | /** |
| 1949 | * bio_associate_blkg - associate a bio with a blkg |
| 1950 | * @bio: target bio |
| 1951 | * |
| 1952 | * Associate @bio with the blkg found from the bio's css and request_queue. |
| 1953 | * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is |
| 1954 | * already associated, the css is reused and association redone as the |
| 1955 | * request_queue may have changed. |
| 1956 | */ |
| 1957 | void bio_associate_blkg(struct bio *bio) |
| 1958 | { |
| 1959 | struct cgroup_subsys_state *css; |
| 1960 | |
| 1961 | rcu_read_lock(); |
| 1962 | |
| 1963 | if (bio->bi_blkg) |
Christoph Hellwig | bbb1ebe | 2022-04-20 06:27:17 +0200 | [diff] [blame] | 1964 | css = bio_blkcg_css(bio); |
Christoph Hellwig | 28fc591 | 2020-06-27 09:31:51 +0200 | [diff] [blame] | 1965 | else |
| 1966 | css = blkcg_css(); |
| 1967 | |
| 1968 | bio_associate_blkg_from_css(bio, css); |
| 1969 | |
| 1970 | rcu_read_unlock(); |
| 1971 | } |
| 1972 | EXPORT_SYMBOL_GPL(bio_associate_blkg); |
| 1973 | |
| 1974 | /** |
| 1975 | * bio_clone_blkg_association - clone blkg association from src to dst bio |
| 1976 | * @dst: destination bio |
| 1977 | * @src: source bio |
| 1978 | */ |
| 1979 | void bio_clone_blkg_association(struct bio *dst, struct bio *src) |
| 1980 | { |
Jan Kara | 22b106e | 2022-06-02 10:12:42 +0200 | [diff] [blame] | 1981 | if (src->bi_blkg) |
| 1982 | bio_associate_blkg_from_css(dst, bio_blkcg_css(src)); |
Christoph Hellwig | 28fc591 | 2020-06-27 09:31:51 +0200 | [diff] [blame] | 1983 | } |
| 1984 | EXPORT_SYMBOL_GPL(bio_clone_blkg_association); |
| 1985 | |
Christoph Hellwig | db18a53 | 2020-06-27 09:31:58 +0200 | [diff] [blame] | 1986 | static int blk_cgroup_io_type(struct bio *bio) |
| 1987 | { |
| 1988 | if (op_is_discard(bio->bi_opf)) |
| 1989 | return BLKG_IOSTAT_DISCARD; |
| 1990 | if (op_is_write(bio->bi_opf)) |
| 1991 | return BLKG_IOSTAT_WRITE; |
| 1992 | return BLKG_IOSTAT_READ; |
| 1993 | } |
| 1994 | |
| 1995 | void blk_cgroup_bio_start(struct bio *bio) |
| 1996 | { |
| 1997 | int rwd = blk_cgroup_io_type(bio), cpu; |
| 1998 | struct blkg_iostat_set *bis; |
Tejun Heo | 3c08b09 | 2021-10-14 13:20:22 -1000 | [diff] [blame] | 1999 | unsigned long flags; |
Christoph Hellwig | db18a53 | 2020-06-27 09:31:58 +0200 | [diff] [blame] | 2000 | |
| 2001 | cpu = get_cpu(); |
| 2002 | bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); |
Tejun Heo | 3c08b09 | 2021-10-14 13:20:22 -1000 | [diff] [blame] | 2003 | flags = u64_stats_update_begin_irqsave(&bis->sync); |
Christoph Hellwig | db18a53 | 2020-06-27 09:31:58 +0200 | [diff] [blame] | 2004 | |
| 2005 | /* |
| 2006 | * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split |
| 2007 | * bio and we would have already accounted for the size of the bio. |
| 2008 | */ |
| 2009 | if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { |
| 2010 | bio_set_flag(bio, BIO_CGROUP_ACCT); |
Colin Ian King | 0b8cc25 | 2020-06-30 16:54:41 +0100 | [diff] [blame] | 2011 | bis->cur.bytes[rwd] += bio->bi_iter.bi_size; |
Christoph Hellwig | db18a53 | 2020-06-27 09:31:58 +0200 | [diff] [blame] | 2012 | } |
| 2013 | bis->cur.ios[rwd]++; |
| 2014 | |
Tejun Heo | 3c08b09 | 2021-10-14 13:20:22 -1000 | [diff] [blame] | 2015 | u64_stats_update_end_irqrestore(&bis->sync, flags); |
Christoph Hellwig | db18a53 | 2020-06-27 09:31:58 +0200 | [diff] [blame] | 2016 | if (cgroup_subsys_on_dfl(io_cgrp_subsys)) |
| 2017 | cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu); |
| 2018 | put_cpu(); |
| 2019 | } |
| 2020 | |
Christoph Hellwig | 216889a | 2022-04-20 06:27:13 +0200 | [diff] [blame] | 2021 | bool blk_cgroup_congested(void) |
| 2022 | { |
| 2023 | struct cgroup_subsys_state *css; |
| 2024 | bool ret = false; |
| 2025 | |
| 2026 | rcu_read_lock(); |
Christoph Hellwig | d200ca1 | 2022-04-20 06:27:21 +0200 | [diff] [blame] | 2027 | for (css = blkcg_css(); css; css = css->parent) { |
Christoph Hellwig | 216889a | 2022-04-20 06:27:13 +0200 | [diff] [blame] | 2028 | if (atomic_read(&css->cgroup->congestion_count)) { |
| 2029 | ret = true; |
| 2030 | break; |
| 2031 | } |
Christoph Hellwig | 216889a | 2022-04-20 06:27:13 +0200 | [diff] [blame] | 2032 | } |
| 2033 | rcu_read_unlock(); |
| 2034 | return ret; |
| 2035 | } |
| 2036 | |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 2037 | static int __init blkcg_init(void) |
| 2038 | { |
| 2039 | blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", |
| 2040 | WQ_MEM_RECLAIM | WQ_FREEZABLE | |
| 2041 | WQ_UNBOUND | WQ_SYSFS, 0); |
| 2042 | if (!blkcg_punt_bio_wq) |
| 2043 | return -ENOMEM; |
| 2044 | return 0; |
| 2045 | } |
| 2046 | subsys_initcall(blkcg_init); |
| 2047 | |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 2048 | module_param(blkcg_debug_stats, bool, 0644); |
| 2049 | MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); |