blob: aa4486bd390493b4bfc45aa05ee0353785388191 [file] [log] [blame]
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -07001/*
2 *
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +01006 * Cgroup v2
7 * Copyright (C) 2019 Red Hat, Inc.
8 * Author: Giuseppe Scrivano <gscrivan@redhat.com>
9 *
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070010 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2.1 of the GNU Lesser General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it would be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 *
18 */
19
20#include <linux/cgroup.h>
Johannes Weiner71f87bee2014-12-10 15:42:34 -080021#include <linux/page_counter.h>
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070022#include <linux/slab.h>
23#include <linux/hugetlb.h>
24#include <linux/hugetlb_cgroup.h>
25
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -070026#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
27#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
28#define MEMFILE_ATTR(val) ((val) & 0xffff)
29
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070030static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
31
Mina Almasrycdc2fcf2020-04-01 21:11:11 -070032static inline struct page_counter *
Mina Almasry1adc4d42020-04-01 21:11:15 -070033__hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
34 bool rsvd)
Mina Almasrycdc2fcf2020-04-01 21:11:11 -070035{
36 if (rsvd)
37 return &h_cg->rsvd_hugepage[idx];
38 return &h_cg->hugepage[idx];
39}
40
Mina Almasry1adc4d42020-04-01 21:11:15 -070041static inline struct page_counter *
42hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
43{
44 return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
45}
46
47static inline struct page_counter *
48hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
49{
50 return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
51}
52
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070053static inline
54struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
55{
Tejun Heoa7c6d552013-08-08 20:11:23 -040056 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070057}
58
59static inline
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070060struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
61{
Tejun Heo073219e2014-02-08 10:36:58 -050062 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070063}
64
65static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
66{
67 return (h_cg == root_h_cgroup);
68}
69
Tejun Heo3f7985182013-08-08 20:11:22 -040070static inline struct hugetlb_cgroup *
71parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070072{
Tejun Heo5c9d5352014-05-16 13:22:48 -040073 return hugetlb_cgroup_from_css(h_cg->css.parent);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070074}
75
Tejun Heo3f7985182013-08-08 20:11:22 -040076static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070077{
Miaohe Linc37213c2022-07-29 16:01:06 +080078 struct hstate *h;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070079
Miaohe Linc37213c2022-07-29 16:01:06 +080080 for_each_hstate(h) {
Mina Almasry1adc4d42020-04-01 21:11:15 -070081 if (page_counter_read(
Miaohe Linc37213c2022-07-29 16:01:06 +080082 hugetlb_cgroup_counter_from_cgroup(h_cg, hstate_index(h))))
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070083 return true;
84 }
85 return false;
86}
87
David Rientjes297880f2016-05-20 16:57:50 -070088static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
89 struct hugetlb_cgroup *parent_h_cgroup)
90{
91 int idx;
92
93 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
Mina Almasry1adc4d42020-04-01 21:11:15 -070094 struct page_counter *fault_parent = NULL;
95 struct page_counter *rsvd_parent = NULL;
David Rientjes297880f2016-05-20 16:57:50 -070096 unsigned long limit;
97 int ret;
98
Mina Almasry1adc4d42020-04-01 21:11:15 -070099 if (parent_h_cgroup) {
100 fault_parent = hugetlb_cgroup_counter_from_cgroup(
101 parent_h_cgroup, idx);
102 rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
103 parent_h_cgroup, idx);
104 }
105 page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
106 idx),
107 fault_parent);
108 page_counter_init(
109 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
110 rsvd_parent);
David Rientjes297880f2016-05-20 16:57:50 -0700111
112 limit = round_down(PAGE_COUNTER_MAX,
Miaohe Lin89384942021-02-24 12:07:36 -0800113 pages_per_huge_page(&hstates[idx]));
Mina Almasry1adc4d42020-04-01 21:11:15 -0700114
115 ret = page_counter_set_max(
116 hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
117 limit);
118 VM_BUG_ON(ret);
119 ret = page_counter_set_max(
120 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
121 limit);
David Rientjes297880f2016-05-20 16:57:50 -0700122 VM_BUG_ON(ret);
123 }
124}
125
Mina Almasryf4776192022-01-14 14:07:48 -0800126static void hugetlb_cgroup_free(struct hugetlb_cgroup *h_cgroup)
127{
128 int node;
129
130 for_each_node(node)
131 kfree(h_cgroup->nodeinfo[node]);
132 kfree(h_cgroup);
133}
134
Tejun Heoeb954192013-08-08 20:11:23 -0400135static struct cgroup_subsys_state *
136hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700137{
Tejun Heoeb954192013-08-08 20:11:23 -0400138 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
139 struct hugetlb_cgroup *h_cgroup;
Mina Almasryf4776192022-01-14 14:07:48 -0800140 int node;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700141
Mina Almasryf4776192022-01-14 14:07:48 -0800142 h_cgroup = kzalloc(struct_size(h_cgroup, nodeinfo, nr_node_ids),
143 GFP_KERNEL);
144
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700145 if (!h_cgroup)
146 return ERR_PTR(-ENOMEM);
147
David Rientjes297880f2016-05-20 16:57:50 -0700148 if (!parent_h_cgroup)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700149 root_h_cgroup = h_cgroup;
David Rientjes297880f2016-05-20 16:57:50 -0700150
Mina Almasryf4776192022-01-14 14:07:48 -0800151 /*
152 * TODO: this routine can waste much memory for nodes which will
153 * never be onlined. It's better to use memory hotplug callback
154 * function.
155 */
156 for_each_node(node) {
Miaohe Lin99249382022-07-29 16:01:05 +0800157 /* Set node_to_alloc to NUMA_NO_NODE for offline nodes. */
Mina Almasryf4776192022-01-14 14:07:48 -0800158 int node_to_alloc =
Miaohe Lin99249382022-07-29 16:01:05 +0800159 node_state(node, N_NORMAL_MEMORY) ? node : NUMA_NO_NODE;
Mina Almasryf4776192022-01-14 14:07:48 -0800160 h_cgroup->nodeinfo[node] =
161 kzalloc_node(sizeof(struct hugetlb_cgroup_per_node),
162 GFP_KERNEL, node_to_alloc);
163 if (!h_cgroup->nodeinfo[node])
164 goto fail_alloc_nodeinfo;
165 }
166
David Rientjes297880f2016-05-20 16:57:50 -0700167 hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700168 return &h_cgroup->css;
Mina Almasryf4776192022-01-14 14:07:48 -0800169
170fail_alloc_nodeinfo:
171 hugetlb_cgroup_free(h_cgroup);
172 return ERR_PTR(-ENOMEM);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700173}
174
Tejun Heoeb954192013-08-08 20:11:23 -0400175static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700176{
Mina Almasryf4776192022-01-14 14:07:48 -0800177 hugetlb_cgroup_free(hugetlb_cgroup_from_css(css));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700178}
179
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700180/*
181 * Should be called with hugetlb_lock held.
182 * Since we are holding hugetlb_lock, pages cannot get moved from
183 * active list or uncharged from the cgroup, So no need to get
184 * page reference and test for page active here. This function
185 * cannot fail.
186 */
Tejun Heo3f7985182013-08-08 20:11:22 -0400187static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700188 struct page *page)
189{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800190 unsigned int nr_pages;
191 struct page_counter *counter;
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700192 struct hugetlb_cgroup *page_hcg;
Tejun Heo3f7985182013-08-08 20:11:22 -0400193 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
Sidhartha Kumarf0747322022-11-01 15:30:52 -0700194 struct folio *folio = page_folio(page);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700195
Sidhartha Kumarf0747322022-11-01 15:30:52 -0700196 page_hcg = hugetlb_cgroup_from_folio(folio);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700197 /*
198 * We can have pages in active list without any cgroup
199 * ie, hugepage with less than 3 pages. We can safely
200 * ignore those pages.
201 */
202 if (!page_hcg || page_hcg != h_cg)
203 goto out;
204
Matthew Wilcox (Oracle)d8c65462019-09-23 15:34:30 -0700205 nr_pages = compound_nr(page);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700206 if (!parent) {
207 parent = root_h_cgroup;
208 /* root has no limit */
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800209 page_counter_charge(&parent->hugepage[idx], nr_pages);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700210 }
211 counter = &h_cg->hugepage[idx];
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800212 /* Take the pages off the local counter */
213 page_counter_cancel(counter, nr_pages);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700214
Sidhartha Kumarde656ed2022-11-01 15:30:53 -0700215 set_hugetlb_cgroup(folio, parent);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700216out:
217 return;
218}
219
220/*
221 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
222 * the parent cgroup.
223 */
Tejun Heoeb954192013-08-08 20:11:23 -0400224static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700225{
Tejun Heoeb954192013-08-08 20:11:23 -0400226 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700227 struct hstate *h;
228 struct page *page;
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700229
230 do {
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700231 for_each_hstate(h) {
Mike Kravetzdb71ef72021-05-04 18:35:07 -0700232 spin_lock_irq(&hugetlb_lock);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700233 list_for_each_entry(page, &h->hugepage_activelist, lru)
Miaohe Linc37213c2022-07-29 16:01:06 +0800234 hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700235
Mike Kravetzdb71ef72021-05-04 18:35:07 -0700236 spin_unlock_irq(&hugetlb_lock);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700237 }
238 cond_resched();
Tejun Heo3f7985182013-08-08 20:11:22 -0400239 } while (hugetlb_cgroup_have_usage(h_cg));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700240}
241
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100242static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
243 enum hugetlb_memory_event event)
244{
245 atomic_long_inc(&hugetlb->events_local[idx][event]);
246 cgroup_file_notify(&hugetlb->events_local_file[idx]);
247
248 do {
249 atomic_long_inc(&hugetlb->events[idx][event]);
250 cgroup_file_notify(&hugetlb->events_file[idx]);
251 } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
252 !hugetlb_cgroup_is_root(hugetlb));
253}
254
Mina Almasry1adc4d42020-04-01 21:11:15 -0700255static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
256 struct hugetlb_cgroup **ptr,
257 bool rsvd)
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700258{
259 int ret = 0;
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800260 struct page_counter *counter;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700261 struct hugetlb_cgroup *h_cg = NULL;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700262
263 if (hugetlb_cgroup_disabled())
264 goto done;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700265again:
266 rcu_read_lock();
267 h_cg = hugetlb_cgroup_from_task(current);
Roman Gushchin0362f322019-11-15 17:34:46 -0800268 if (!css_tryget(&h_cg->css)) {
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700269 rcu_read_unlock();
270 goto again;
271 }
272 rcu_read_unlock();
273
Mina Almasry1adc4d42020-04-01 21:11:15 -0700274 if (!page_counter_try_charge(
275 __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
276 nr_pages, &counter)) {
Johannes Weiner6071ca52015-11-05 18:50:26 -0800277 ret = -ENOMEM;
Mina Almasry726b7bb2020-03-28 19:17:22 -0700278 hugetlb_event(h_cg, idx, HUGETLB_MAX);
Mina Almasry1adc4d42020-04-01 21:11:15 -0700279 css_put(&h_cg->css);
280 goto done;
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100281 }
Mina Almasry1adc4d42020-04-01 21:11:15 -0700282 /* Reservations take a reference to the css because they do not get
283 * reparented.
284 */
285 if (!rsvd)
286 css_put(&h_cg->css);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700287done:
288 *ptr = h_cg;
289 return ret;
290}
291
Mina Almasry1adc4d42020-04-01 21:11:15 -0700292int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
293 struct hugetlb_cgroup **ptr)
294{
295 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
296}
297
298int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
299 struct hugetlb_cgroup **ptr)
300{
301 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
302}
303
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700304/* Should be called with hugetlb_lock held */
Mina Almasry1adc4d42020-04-01 21:11:15 -0700305static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
306 struct hugetlb_cgroup *h_cg,
Sidhartha Kumar541b7c72022-11-01 15:30:58 -0700307 struct folio *folio, bool rsvd)
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700308{
309 if (hugetlb_cgroup_disabled() || !h_cg)
310 return;
311
Sidhartha Kumar541b7c72022-11-01 15:30:58 -0700312 __set_hugetlb_cgroup(folio, h_cg, rsvd);
Mina Almasryf4776192022-01-14 14:07:48 -0800313 if (!rsvd) {
314 unsigned long usage =
Sidhartha Kumar541b7c72022-11-01 15:30:58 -0700315 h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
Mina Almasryf4776192022-01-14 14:07:48 -0800316 /*
317 * This write is not atomic due to fetching usage and writing
318 * to it, but that's fine because we call this with
319 * hugetlb_lock held anyway.
320 */
Sidhartha Kumar541b7c72022-11-01 15:30:58 -0700321 WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
Mina Almasryf4776192022-01-14 14:07:48 -0800322 usage + nr_pages);
323 }
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700324}
325
Mina Almasry1adc4d42020-04-01 21:11:15 -0700326void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
327 struct hugetlb_cgroup *h_cg,
Sidhartha Kumarff7d8532023-01-13 16:30:54 -0600328 struct folio *folio)
Mina Almasry1adc4d42020-04-01 21:11:15 -0700329{
Sidhartha Kumar541b7c72022-11-01 15:30:58 -0700330 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
Mina Almasry1adc4d42020-04-01 21:11:15 -0700331}
332
333void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
334 struct hugetlb_cgroup *h_cg,
Sidhartha Kumarff7d8532023-01-13 16:30:54 -0600335 struct folio *folio)
Mina Almasry1adc4d42020-04-01 21:11:15 -0700336{
Sidhartha Kumar541b7c72022-11-01 15:30:58 -0700337 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
Mina Almasry1adc4d42020-04-01 21:11:15 -0700338}
339
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700340/*
341 * Should be called with hugetlb_lock held
342 */
Sidhartha Kumard4ab0312022-11-01 15:30:57 -0700343static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
344 struct folio *folio, bool rsvd)
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700345{
346 struct hugetlb_cgroup *h_cg;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700347
348 if (hugetlb_cgroup_disabled())
349 return;
Michal Hocko7ea85742014-08-29 15:18:42 -0700350 lockdep_assert_held(&hugetlb_lock);
Sidhartha Kumarf0747322022-11-01 15:30:52 -0700351 h_cg = __hugetlb_cgroup_from_folio(folio, rsvd);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700352 if (unlikely(!h_cg))
353 return;
Sidhartha Kumarf0747322022-11-01 15:30:52 -0700354 __set_hugetlb_cgroup(folio, NULL, rsvd);
Mina Almasry1adc4d42020-04-01 21:11:15 -0700355
356 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
357 rsvd),
358 nr_pages);
359
360 if (rsvd)
361 css_put(&h_cg->css);
Mina Almasryf4776192022-01-14 14:07:48 -0800362 else {
363 unsigned long usage =
Sidhartha Kumard4ab0312022-11-01 15:30:57 -0700364 h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
Mina Almasryf4776192022-01-14 14:07:48 -0800365 /*
366 * This write is not atomic due to fetching usage and writing
367 * to it, but that's fine because we call this with
368 * hugetlb_lock held anyway.
369 */
Sidhartha Kumard4ab0312022-11-01 15:30:57 -0700370 WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
Mina Almasryf4776192022-01-14 14:07:48 -0800371 usage - nr_pages);
372 }
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700373}
374
Sidhartha Kumard4ab0312022-11-01 15:30:57 -0700375void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
376 struct folio *folio)
Mina Almasry1adc4d42020-04-01 21:11:15 -0700377{
Sidhartha Kumard4ab0312022-11-01 15:30:57 -0700378 __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false);
Mina Almasry1adc4d42020-04-01 21:11:15 -0700379}
380
Sidhartha Kumard4ab0312022-11-01 15:30:57 -0700381void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
382 struct folio *folio)
Mina Almasry1adc4d42020-04-01 21:11:15 -0700383{
Sidhartha Kumard4ab0312022-11-01 15:30:57 -0700384 __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true);
Mina Almasry1adc4d42020-04-01 21:11:15 -0700385}
386
387static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
388 struct hugetlb_cgroup *h_cg,
389 bool rsvd)
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700390{
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700391 if (hugetlb_cgroup_disabled() || !h_cg)
392 return;
393
Mina Almasry1adc4d42020-04-01 21:11:15 -0700394 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
395 rsvd),
396 nr_pages);
397
398 if (rsvd)
399 css_put(&h_cg->css);
400}
401
402void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
403 struct hugetlb_cgroup *h_cg)
404{
405 __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
406}
407
408void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
409 struct hugetlb_cgroup *h_cg)
410{
411 __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
412}
413
Mina Almasrye9fe92a2020-04-01 21:11:21 -0700414void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
415 unsigned long end)
Mina Almasry1adc4d42020-04-01 21:11:15 -0700416{
Mina Almasrye9fe92a2020-04-01 21:11:21 -0700417 if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter ||
418 !resv->css)
Mina Almasry1adc4d42020-04-01 21:11:15 -0700419 return;
420
Mina Almasrye9fe92a2020-04-01 21:11:21 -0700421 page_counter_uncharge(resv->reservation_counter,
422 (end - start) * resv->pages_per_hpage);
423 css_put(resv->css);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700424}
425
Mina Almasry075a61d2020-04-01 21:11:28 -0700426void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
427 struct file_region *rg,
Miaohe Lind85aecf2021-03-24 21:37:17 -0700428 unsigned long nr_pages,
429 bool region_del)
Mina Almasry075a61d2020-04-01 21:11:28 -0700430{
431 if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
432 return;
433
Miaohe Lin862f7f62022-07-29 16:01:02 +0800434 if (rg->reservation_counter && resv->pages_per_hpage &&
Mina Almasry075a61d2020-04-01 21:11:28 -0700435 !resv->reservation_counter) {
436 page_counter_uncharge(rg->reservation_counter,
437 nr_pages * resv->pages_per_hpage);
Miaohe Lind85aecf2021-03-24 21:37:17 -0700438 /*
439 * Only do css_put(rg->css) when we delete the entire region
440 * because one file_region must hold exactly one css reference.
441 */
442 if (region_del)
443 css_put(rg->css);
Mina Almasry075a61d2020-04-01 21:11:28 -0700444 }
445}
446
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800447enum {
448 RES_USAGE,
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700449 RES_RSVD_USAGE,
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800450 RES_LIMIT,
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700451 RES_RSVD_LIMIT,
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800452 RES_MAX_USAGE,
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700453 RES_RSVD_MAX_USAGE,
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800454 RES_FAILCNT,
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700455 RES_RSVD_FAILCNT,
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800456};
457
Mina Almasryf4776192022-01-14 14:07:48 -0800458static int hugetlb_cgroup_read_numa_stat(struct seq_file *seq, void *dummy)
459{
460 int nid;
461 struct cftype *cft = seq_cft(seq);
462 int idx = MEMFILE_IDX(cft->private);
463 bool legacy = MEMFILE_ATTR(cft->private);
464 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
465 struct cgroup_subsys_state *css;
466 unsigned long usage;
467
468 if (legacy) {
469 /* Add up usage across all nodes for the non-hierarchical total. */
470 usage = 0;
471 for_each_node_state(nid, N_MEMORY)
472 usage += READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]);
473 seq_printf(seq, "total=%lu", usage * PAGE_SIZE);
474
475 /* Simply print the per-node usage for the non-hierarchical total. */
476 for_each_node_state(nid, N_MEMORY)
477 seq_printf(seq, " N%d=%lu", nid,
478 READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]) *
479 PAGE_SIZE);
480 seq_putc(seq, '\n');
481 }
482
483 /*
484 * The hierarchical total is pretty much the value recorded by the
485 * counter, so use that.
486 */
487 seq_printf(seq, "%stotal=%lu", legacy ? "hierarchical_" : "",
488 page_counter_read(&h_cg->hugepage[idx]) * PAGE_SIZE);
489
490 /*
491 * For each node, transverse the css tree to obtain the hierarchical
492 * node usage.
493 */
494 for_each_node_state(nid, N_MEMORY) {
495 usage = 0;
496 rcu_read_lock();
497 css_for_each_descendant_pre(css, &h_cg->css) {
498 usage += READ_ONCE(hugetlb_cgroup_from_css(css)
499 ->nodeinfo[nid]
500 ->usage[idx]);
501 }
502 rcu_read_unlock();
503 seq_printf(seq, " N%d=%lu", nid, usage * PAGE_SIZE);
504 }
505
506 seq_putc(seq, '\n');
507
508 return 0;
509}
510
Tejun Heo716f4792013-12-05 12:28:03 -0500511static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
512 struct cftype *cft)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700513{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800514 struct page_counter *counter;
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700515 struct page_counter *rsvd_counter;
Tejun Heo182446d2013-08-08 20:11:24 -0400516 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700517
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800518 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700519 rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700520
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800521 switch (MEMFILE_ATTR(cft->private)) {
522 case RES_USAGE:
523 return (u64)page_counter_read(counter) * PAGE_SIZE;
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700524 case RES_RSVD_USAGE:
525 return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800526 case RES_LIMIT:
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700527 return (u64)counter->max * PAGE_SIZE;
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700528 case RES_RSVD_LIMIT:
529 return (u64)rsvd_counter->max * PAGE_SIZE;
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800530 case RES_MAX_USAGE:
531 return (u64)counter->watermark * PAGE_SIZE;
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700532 case RES_RSVD_MAX_USAGE:
533 return (u64)rsvd_counter->watermark * PAGE_SIZE;
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800534 case RES_FAILCNT:
535 return counter->failcnt;
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700536 case RES_RSVD_FAILCNT:
537 return rsvd_counter->failcnt;
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800538 default:
539 BUG();
540 }
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700541}
542
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100543static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
544{
545 int idx;
546 u64 val;
547 struct cftype *cft = seq_cft(seq);
548 unsigned long limit;
549 struct page_counter *counter;
550 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
551
552 idx = MEMFILE_IDX(cft->private);
553 counter = &h_cg->hugepage[idx];
554
555 limit = round_down(PAGE_COUNTER_MAX,
Miaohe Lin89384942021-02-24 12:07:36 -0800556 pages_per_huge_page(&hstates[idx]));
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100557
558 switch (MEMFILE_ATTR(cft->private)) {
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700559 case RES_RSVD_USAGE:
560 counter = &h_cg->rsvd_hugepage[idx];
Joe Perchese4a9bc52020-04-06 20:08:39 -0700561 fallthrough;
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100562 case RES_USAGE:
563 val = (u64)page_counter_read(counter);
564 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
565 break;
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700566 case RES_RSVD_LIMIT:
567 counter = &h_cg->rsvd_hugepage[idx];
Joe Perchese4a9bc52020-04-06 20:08:39 -0700568 fallthrough;
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100569 case RES_LIMIT:
570 val = (u64)counter->max;
571 if (val == limit)
572 seq_puts(seq, "max\n");
573 else
574 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
575 break;
576 default:
577 BUG();
578 }
579
580 return 0;
581}
582
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800583static DEFINE_MUTEX(hugetlb_limit_mutex);
584
Tejun Heo451af5042014-05-13 12:16:21 -0400585static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100586 char *buf, size_t nbytes, loff_t off,
587 const char *max)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700588{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800589 int ret, idx;
590 unsigned long nr_pages;
Tejun Heo451af5042014-05-13 12:16:21 -0400591 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700592 bool rsvd = false;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700593
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800594 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
595 return -EINVAL;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700596
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800597 buf = strstrip(buf);
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100598 ret = page_counter_memparse(buf, max, &nr_pages);
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800599 if (ret)
600 return ret;
601
602 idx = MEMFILE_IDX(of_cft(of)->private);
Miaohe Lin89384942021-02-24 12:07:36 -0800603 nr_pages = round_down(nr_pages, pages_per_huge_page(&hstates[idx]));
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800604
605 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700606 case RES_RSVD_LIMIT:
607 rsvd = true;
Joe Perchese4a9bc52020-04-06 20:08:39 -0700608 fallthrough;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700609 case RES_LIMIT:
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800610 mutex_lock(&hugetlb_limit_mutex);
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700611 ret = page_counter_set_max(
Mina Almasry1adc4d42020-04-01 21:11:15 -0700612 __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700613 nr_pages);
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800614 mutex_unlock(&hugetlb_limit_mutex);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700615 break;
616 default:
617 ret = -EINVAL;
618 break;
619 }
Tejun Heo451af5042014-05-13 12:16:21 -0400620 return ret ?: nbytes;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700621}
622
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100623static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
624 char *buf, size_t nbytes, loff_t off)
625{
626 return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
627}
628
629static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
630 char *buf, size_t nbytes, loff_t off)
631{
632 return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
633}
634
Tejun Heo6770c642014-05-13 12:16:21 -0400635static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
636 char *buf, size_t nbytes, loff_t off)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700637{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800638 int ret = 0;
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700639 struct page_counter *counter, *rsvd_counter;
Tejun Heo6770c642014-05-13 12:16:21 -0400640 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700641
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800642 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700643 rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700644
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800645 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700646 case RES_MAX_USAGE:
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800647 page_counter_reset_watermark(counter);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700648 break;
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700649 case RES_RSVD_MAX_USAGE:
650 page_counter_reset_watermark(rsvd_counter);
651 break;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700652 case RES_FAILCNT:
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800653 counter->failcnt = 0;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700654 break;
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700655 case RES_RSVD_FAILCNT:
656 rsvd_counter->failcnt = 0;
657 break;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700658 default:
659 ret = -EINVAL;
660 break;
661 }
Tejun Heo6770c642014-05-13 12:16:21 -0400662 return ret ?: nbytes;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700663}
664
665static char *mem_fmt(char *buf, int size, unsigned long hsize)
666{
Miaohe Linabfb09e2022-07-29 16:01:03 +0800667 if (hsize >= SZ_1G)
668 snprintf(buf, size, "%luGB", hsize / SZ_1G);
669 else if (hsize >= SZ_1M)
670 snprintf(buf, size, "%luMB", hsize / SZ_1M);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700671 else
Miaohe Linabfb09e2022-07-29 16:01:03 +0800672 snprintf(buf, size, "%luKB", hsize / SZ_1K);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700673 return buf;
674}
675
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100676static int __hugetlb_events_show(struct seq_file *seq, bool local)
677{
678 int idx;
679 long max;
680 struct cftype *cft = seq_cft(seq);
681 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
682
683 idx = MEMFILE_IDX(cft->private);
684
685 if (local)
686 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
687 else
688 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
689
690 seq_printf(seq, "max %lu\n", max);
691
692 return 0;
693}
694
695static int hugetlb_events_show(struct seq_file *seq, void *v)
696{
697 return __hugetlb_events_show(seq, false);
698}
699
700static int hugetlb_events_local_show(struct seq_file *seq, void *v)
701{
702 return __hugetlb_events_show(seq, true);
703}
704
705static void __init __hugetlb_cgroup_file_dfl_init(int idx)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700706{
707 char buf[32];
708 struct cftype *cft;
709 struct hstate *h = &hstates[idx];
710
711 /* format the size */
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700712 mem_fmt(buf, sizeof(buf), huge_page_size(h));
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700713
714 /* Add the limit file */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100715 cft = &h->cgroup_files_dfl[0];
716 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
717 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
718 cft->seq_show = hugetlb_cgroup_read_u64_max;
719 cft->write = hugetlb_cgroup_write_dfl;
720 cft->flags = CFTYPE_NOT_ON_ROOT;
721
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700722 /* Add the reservation limit file */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100723 cft = &h->cgroup_files_dfl[1];
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700724 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max", buf);
725 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
726 cft->seq_show = hugetlb_cgroup_read_u64_max;
727 cft->write = hugetlb_cgroup_write_dfl;
728 cft->flags = CFTYPE_NOT_ON_ROOT;
729
730 /* Add the current usage file */
731 cft = &h->cgroup_files_dfl[2];
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100732 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
733 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
734 cft->seq_show = hugetlb_cgroup_read_u64_max;
735 cft->flags = CFTYPE_NOT_ON_ROOT;
736
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700737 /* Add the current reservation usage file */
738 cft = &h->cgroup_files_dfl[3];
739 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.current", buf);
740 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
741 cft->seq_show = hugetlb_cgroup_read_u64_max;
742 cft->flags = CFTYPE_NOT_ON_ROOT;
743
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100744 /* Add the events file */
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700745 cft = &h->cgroup_files_dfl[4];
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100746 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
747 cft->private = MEMFILE_PRIVATE(idx, 0);
748 cft->seq_show = hugetlb_events_show;
Xu Wangd5a16952020-08-20 17:41:59 -0700749 cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100750 cft->flags = CFTYPE_NOT_ON_ROOT;
751
752 /* Add the events.local file */
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700753 cft = &h->cgroup_files_dfl[5];
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100754 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
755 cft->private = MEMFILE_PRIVATE(idx, 0);
756 cft->seq_show = hugetlb_events_local_show;
757 cft->file_offset = offsetof(struct hugetlb_cgroup,
Xu Wangd5a16952020-08-20 17:41:59 -0700758 events_local_file[idx]);
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100759 cft->flags = CFTYPE_NOT_ON_ROOT;
760
Mina Almasryf4776192022-01-14 14:07:48 -0800761 /* Add the numa stat file */
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700762 cft = &h->cgroup_files_dfl[6];
Mina Almasryf4776192022-01-14 14:07:48 -0800763 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
Miaohe Lin2727cfe2022-07-23 15:38:04 +0800764 cft->private = MEMFILE_PRIVATE(idx, 0);
Mina Almasryf4776192022-01-14 14:07:48 -0800765 cft->seq_show = hugetlb_cgroup_read_numa_stat;
766 cft->flags = CFTYPE_NOT_ON_ROOT;
767
768 /* NULL terminate the last cft */
769 cft = &h->cgroup_files_dfl[7];
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100770 memset(cft, 0, sizeof(*cft));
771
772 WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
773 h->cgroup_files_dfl));
774}
775
776static void __init __hugetlb_cgroup_file_legacy_init(int idx)
777{
778 char buf[32];
779 struct cftype *cft;
780 struct hstate *h = &hstates[idx];
781
782 /* format the size */
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700783 mem_fmt(buf, sizeof(buf), huge_page_size(h));
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100784
785 /* Add the limit file */
786 cft = &h->cgroup_files_legacy[0];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700787 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
788 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
Tejun Heo716f4792013-12-05 12:28:03 -0500789 cft->read_u64 = hugetlb_cgroup_read_u64;
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100790 cft->write = hugetlb_cgroup_write_legacy;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700791
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700792 /* Add the reservation limit file */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100793 cft = &h->cgroup_files_legacy[1];
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700794 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.limit_in_bytes", buf);
795 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
796 cft->read_u64 = hugetlb_cgroup_read_u64;
797 cft->write = hugetlb_cgroup_write_legacy;
798
799 /* Add the usage file */
800 cft = &h->cgroup_files_legacy[2];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700801 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
802 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
Tejun Heo716f4792013-12-05 12:28:03 -0500803 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700804
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700805 /* Add the reservation usage file */
806 cft = &h->cgroup_files_legacy[3];
807 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.usage_in_bytes", buf);
808 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
809 cft->read_u64 = hugetlb_cgroup_read_u64;
810
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700811 /* Add the MAX usage file */
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700812 cft = &h->cgroup_files_legacy[4];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700813 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
814 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
Tejun Heo6770c642014-05-13 12:16:21 -0400815 cft->write = hugetlb_cgroup_reset;
Tejun Heo716f4792013-12-05 12:28:03 -0500816 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700817
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700818 /* Add the MAX reservation usage file */
819 cft = &h->cgroup_files_legacy[5];
820 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max_usage_in_bytes", buf);
821 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_MAX_USAGE);
822 cft->write = hugetlb_cgroup_reset;
823 cft->read_u64 = hugetlb_cgroup_read_u64;
824
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700825 /* Add the failcntfile */
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700826 cft = &h->cgroup_files_legacy[6];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700827 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700828 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
829 cft->write = hugetlb_cgroup_reset;
830 cft->read_u64 = hugetlb_cgroup_read_u64;
831
832 /* Add the reservation failcntfile */
833 cft = &h->cgroup_files_legacy[7];
834 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.failcnt", buf);
835 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_FAILCNT);
Tejun Heo6770c642014-05-13 12:16:21 -0400836 cft->write = hugetlb_cgroup_reset;
Tejun Heo716f4792013-12-05 12:28:03 -0500837 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700838
Mina Almasryf4776192022-01-14 14:07:48 -0800839 /* Add the numa stat file */
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700840 cft = &h->cgroup_files_legacy[8];
Mina Almasryf4776192022-01-14 14:07:48 -0800841 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
842 cft->private = MEMFILE_PRIVATE(idx, 1);
843 cft->seq_show = hugetlb_cgroup_read_numa_stat;
844
845 /* NULL terminate the last cft */
846 cft = &h->cgroup_files_legacy[9];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700847 memset(cft, 0, sizeof(*cft));
848
Tejun Heo2cf669a2014-07-15 11:05:09 -0400849 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100850 h->cgroup_files_legacy));
851}
852
853static void __init __hugetlb_cgroup_file_init(int idx)
854{
855 __hugetlb_cgroup_file_dfl_init(idx);
856 __hugetlb_cgroup_file_legacy_init(idx);
Jianguo Wu7179e7b2012-12-18 14:23:19 -0800857}
858
859void __init hugetlb_cgroup_file_init(void)
860{
861 struct hstate *h;
862
Frank van der Linden59838b22023-10-04 15:32:48 +0000863 for_each_hstate(h)
864 __hugetlb_cgroup_file_init(hstate_index(h));
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700865}
866
Aneesh Kumar K.V75754682012-07-31 16:42:36 -0700867/*
868 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
869 * when we migrate hugepages
870 */
Sidhartha Kumar29f39432022-11-01 15:30:54 -0700871void hugetlb_cgroup_migrate(struct folio *old_folio, struct folio *new_folio)
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700872{
873 struct hugetlb_cgroup *h_cg;
Mina Almasry1adc4d42020-04-01 21:11:15 -0700874 struct hugetlb_cgroup *h_cg_rsvd;
Sidhartha Kumar29f39432022-11-01 15:30:54 -0700875 struct hstate *h = folio_hstate(old_folio);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700876
877 if (hugetlb_cgroup_disabled())
878 return;
879
Mike Kravetzdb71ef72021-05-04 18:35:07 -0700880 spin_lock_irq(&hugetlb_lock);
Sidhartha Kumarf0747322022-11-01 15:30:52 -0700881 h_cg = hugetlb_cgroup_from_folio(old_folio);
882 h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
Sidhartha Kumarde656ed2022-11-01 15:30:53 -0700883 set_hugetlb_cgroup(old_folio, NULL);
884 set_hugetlb_cgroup_rsvd(old_folio, NULL);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700885
886 /* move the h_cg details to new cgroup */
Sidhartha Kumarde656ed2022-11-01 15:30:53 -0700887 set_hugetlb_cgroup(new_folio, h_cg);
888 set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
Sidhartha Kumar29f39432022-11-01 15:30:54 -0700889 list_move(&new_folio->lru, &h->hugepage_activelist);
Mike Kravetzdb71ef72021-05-04 18:35:07 -0700890 spin_unlock_irq(&hugetlb_lock);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700891 return;
892}
893
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100894static struct cftype hugetlb_files[] = {
895 {} /* terminate */
896};
897
Tejun Heo073219e2014-02-08 10:36:58 -0500898struct cgroup_subsys hugetlb_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -0800899 .css_alloc = hugetlb_cgroup_css_alloc,
900 .css_offline = hugetlb_cgroup_css_offline,
901 .css_free = hugetlb_cgroup_css_free,
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100902 .dfl_cftypes = hugetlb_files,
903 .legacy_cftypes = hugetlb_files,
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700904};