Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include "xfs.h" |
| 7 | #include <linux/proc_fs.h> |
| 8 | |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 9 | struct xstats xfsstats; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 11 | static int counter_val(struct xfsstats __percpu *stats, int idx) |
Christoph Hellwig | 48776fd | 2012-03-13 08:52:33 +0000 | [diff] [blame] | 12 | { |
| 13 | int val = 0, cpu; |
| 14 | |
| 15 | for_each_possible_cpu(cpu) |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 16 | val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx)); |
Christoph Hellwig | 48776fd | 2012-03-13 08:52:33 +0000 | [diff] [blame] | 17 | return val; |
| 18 | } |
| 19 | |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 20 | int xfs_stats_format(struct xfsstats __percpu *stats, char *buf) |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 21 | { |
| 22 | int i, j; |
| 23 | int len = 0; |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 24 | uint64_t xs_xstrat_bytes = 0; |
| 25 | uint64_t xs_write_bytes = 0; |
| 26 | uint64_t xs_read_bytes = 0; |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 27 | |
| 28 | static const struct xstats_entry { |
| 29 | char *desc; |
| 30 | int endpoint; |
| 31 | } xstats[] = { |
| 32 | { "extent_alloc", XFSSTAT_END_EXTENT_ALLOC }, |
| 33 | { "abt", XFSSTAT_END_ALLOC_BTREE }, |
| 34 | { "blk_map", XFSSTAT_END_BLOCK_MAPPING }, |
| 35 | { "bmbt", XFSSTAT_END_BLOCK_MAP_BTREE }, |
| 36 | { "dir", XFSSTAT_END_DIRECTORY_OPS }, |
| 37 | { "trans", XFSSTAT_END_TRANSACTIONS }, |
| 38 | { "ig", XFSSTAT_END_INODE_OPS }, |
| 39 | { "log", XFSSTAT_END_LOG_OPS }, |
| 40 | { "push_ail", XFSSTAT_END_TAIL_PUSHING }, |
| 41 | { "xstrat", XFSSTAT_END_WRITE_CONVERT }, |
| 42 | { "rw", XFSSTAT_END_READ_WRITE_OPS }, |
| 43 | { "attr", XFSSTAT_END_ATTRIBUTE_OPS }, |
| 44 | { "icluster", XFSSTAT_END_INODE_CLUSTER }, |
| 45 | { "vnodes", XFSSTAT_END_VNODE_OPS }, |
| 46 | { "buf", XFSSTAT_END_BUF }, |
| 47 | { "abtb2", XFSSTAT_END_ABTB_V2 }, |
| 48 | { "abtc2", XFSSTAT_END_ABTC_V2 }, |
| 49 | { "bmbt2", XFSSTAT_END_BMBT_V2 }, |
| 50 | { "ibt2", XFSSTAT_END_IBT_V2 }, |
| 51 | { "fibt2", XFSSTAT_END_FIBT_V2 }, |
Darrick J. Wong | 00f4e4f | 2016-08-03 11:31:11 +1000 | [diff] [blame] | 52 | { "rmapbt", XFSSTAT_END_RMAP_V2 }, |
Darrick J. Wong | 46eeb52 | 2016-10-03 09:11:16 -0700 | [diff] [blame] | 53 | { "refcntbt", XFSSTAT_END_REFCOUNT }, |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 54 | /* we print both series of quota information together */ |
| 55 | { "qm", XFSSTAT_END_QM }, |
| 56 | }; |
| 57 | |
| 58 | /* Loop over all stats groups */ |
| 59 | |
| 60 | for (i = j = 0; i < ARRAY_SIZE(xstats); i++) { |
| 61 | len += snprintf(buf + len, PATH_MAX - len, "%s", |
| 62 | xstats[i].desc); |
| 63 | /* inner loop does each group */ |
| 64 | for (; j < xstats[i].endpoint; j++) |
| 65 | len += snprintf(buf + len, PATH_MAX - len, " %u", |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 66 | counter_val(stats, j)); |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 67 | len += snprintf(buf + len, PATH_MAX - len, "\n"); |
| 68 | } |
| 69 | /* extra precision counters */ |
| 70 | for_each_possible_cpu(i) { |
Dave Chinner | 11ef38a | 2016-12-05 14:38:58 +1100 | [diff] [blame] | 71 | xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes; |
| 72 | xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes; |
| 73 | xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes; |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 74 | } |
| 75 | |
| 76 | len += snprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n", |
| 77 | xs_xstrat_bytes, xs_write_bytes, xs_read_bytes); |
| 78 | len += snprintf(buf + len, PATH_MAX-len, "debug %u\n", |
| 79 | #if defined(DEBUG) |
| 80 | 1); |
| 81 | #else |
| 82 | 0); |
| 83 | #endif |
| 84 | |
| 85 | return len; |
| 86 | } |
| 87 | |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 88 | void xfs_stats_clearall(struct xfsstats __percpu *stats) |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 89 | { |
| 90 | int c; |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 91 | uint32_t vn_active; |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 92 | |
| 93 | xfs_notice(NULL, "Clearing xfsstats"); |
| 94 | for_each_possible_cpu(c) { |
| 95 | preempt_disable(); |
| 96 | /* save vn_active, it's a universal truth! */ |
Dave Chinner | 11ef38a | 2016-12-05 14:38:58 +1100 | [diff] [blame] | 97 | vn_active = per_cpu_ptr(stats, c)->s.vn_active; |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 98 | memset(per_cpu_ptr(stats, c), 0, sizeof(*stats)); |
Dave Chinner | 11ef38a | 2016-12-05 14:38:58 +1100 | [diff] [blame] | 99 | per_cpu_ptr(stats, c)->s.vn_active = vn_active; |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 100 | preempt_enable(); |
| 101 | } |
| 102 | } |
| 103 | |
Arnd Bergmann | 5ef03db | 2018-05-25 17:16:04 +0200 | [diff] [blame] | 104 | #ifdef CONFIG_PROC_FS |
Christoph Hellwig | 48776fd | 2012-03-13 08:52:33 +0000 | [diff] [blame] | 105 | /* legacy quota interfaces */ |
| 106 | #ifdef CONFIG_XFS_QUOTA |
| 107 | static int xqm_proc_show(struct seq_file *m, void *v) |
| 108 | { |
| 109 | /* maximum; incore; ratio free to inuse; freelist */ |
| 110 | seq_printf(m, "%d\t%d\t%d\t%u\n", |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 111 | 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT), |
| 112 | 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1)); |
Christoph Hellwig | 48776fd | 2012-03-13 08:52:33 +0000 | [diff] [blame] | 113 | return 0; |
| 114 | } |
| 115 | |
Christoph Hellwig | 48776fd | 2012-03-13 08:52:33 +0000 | [diff] [blame] | 116 | /* legacy quota stats interface no 2 */ |
| 117 | static int xqmstat_proc_show(struct seq_file *m, void *v) |
| 118 | { |
| 119 | int j; |
| 120 | |
| 121 | seq_printf(m, "qm"); |
| 122 | for (j = XFSSTAT_END_IBT_V2; j < XFSSTAT_END_XQMSTAT; j++) |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 123 | seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j)); |
Christoph Hellwig | 48776fd | 2012-03-13 08:52:33 +0000 | [diff] [blame] | 124 | seq_putc(m, '\n'); |
| 125 | return 0; |
| 126 | } |
Christoph Hellwig | 48776fd | 2012-03-13 08:52:33 +0000 | [diff] [blame] | 127 | #endif /* CONFIG_XFS_QUOTA */ |
| 128 | |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 129 | int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | xfs_init_procfs(void) |
| 131 | { |
| 132 | if (!proc_mkdir("fs/xfs", NULL)) |
Eric Sandeen | 9e92054 | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 133 | return -ENOMEM; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 134 | |
Bill O'Donnell | 32f0ea0 | 2015-10-12 05:16:45 +1100 | [diff] [blame] | 135 | if (!proc_symlink("fs/xfs/stat", NULL, |
| 136 | "/sys/fs/xfs/stats/stats")) |
Eric Sandeen | 9e92054 | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 137 | goto out; |
Bill O'Donnell | 32f0ea0 | 2015-10-12 05:16:45 +1100 | [diff] [blame] | 138 | |
Christoph Hellwig | 48776fd | 2012-03-13 08:52:33 +0000 | [diff] [blame] | 139 | #ifdef CONFIG_XFS_QUOTA |
Christoph Hellwig | 3f3942a | 2018-05-15 15:57:23 +0200 | [diff] [blame] | 140 | if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show)) |
Eric Sandeen | 9e92054 | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 141 | goto out; |
Christoph Hellwig | 3f3942a | 2018-05-15 15:57:23 +0200 | [diff] [blame] | 142 | if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show)) |
Eric Sandeen | 9e92054 | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 143 | goto out; |
Christoph Hellwig | 48776fd | 2012-03-13 08:52:33 +0000 | [diff] [blame] | 144 | #endif |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 145 | return 0; |
| 146 | |
Eric Sandeen | 9e92054 | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 147 | out: |
| 148 | remove_proc_subtree("fs/xfs", NULL); |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 149 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | } |
| 151 | |
| 152 | void |
| 153 | xfs_cleanup_procfs(void) |
| 154 | { |
Eric Sandeen | 9e92054 | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 155 | remove_proc_subtree("fs/xfs", NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | } |
Dave Chinner | 985ef4d | 2015-10-19 08:42:46 +1100 | [diff] [blame] | 157 | #endif /* CONFIG_PROC_FS */ |