Thomas Gleixner | 468e15f | 2019-05-27 08:55:17 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which |
| 4 | * are not related to any other subsystem |
| 5 | * |
| 6 | * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | */ |
| 8 | |
Thomas Weißschuh | d3d76fb | 2022-11-03 16:24:07 +0100 | [diff] [blame] | 9 | #include <asm/byteorder.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/kobject.h> |
| 11 | #include <linux/string.h> |
| 12 | #include <linux/sysfs.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 13 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/init.h> |
Jeff Moyer | c330dda | 2006-06-23 02:05:07 -0700 | [diff] [blame] | 15 | #include <linux/kexec.h> |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 16 | #include <linux/profile.h> |
Paul Gortmaker | 1596425 | 2011-07-28 14:22:29 -0400 | [diff] [blame] | 17 | #include <linux/stat.h> |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 18 | #include <linux/sched.h> |
Ludwig Nussel | 088ab0b | 2011-02-28 15:57:17 +0100 | [diff] [blame] | 19 | #include <linux/capability.h> |
Gideon Israel Dsouza | 52f5684c | 2014-04-07 15:39:20 -0700 | [diff] [blame] | 20 | #include <linux/compiler.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
Paul E. McKenney | 5a9be7c | 2015-11-24 15:44:06 -0800 | [diff] [blame] | 22 | #include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */ |
Paul Gortmaker | 7a75474 | 2014-02-11 16:10:12 -0500 | [diff] [blame] | 23 | |
Thomas Weißschuh | d3d76fb | 2022-11-03 16:24:07 +0100 | [diff] [blame] | 24 | #if defined(__LITTLE_ENDIAN) |
| 25 | #define CPU_BYTEORDER_STRING "little" |
| 26 | #elif defined(__BIG_ENDIAN) |
| 27 | #define CPU_BYTEORDER_STRING "big" |
| 28 | #else |
| 29 | #error Unknown byteorder |
| 30 | #endif |
| 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #define KERNEL_ATTR_RO(_name) \ |
Kay Sievers | 386f275 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 33 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
| 35 | #define KERNEL_ATTR_RW(_name) \ |
Miaohe Lin | a7cd9a5 | 2022-03-23 16:05:35 -0700 | [diff] [blame] | 36 | static struct kobj_attribute _name##_attr = __ATTR_RW(_name) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
Kay Sievers | 0f76e5a | 2005-11-11 04:58:04 +0100 | [diff] [blame] | 38 | /* current uevent sequence number */ |
Kay Sievers | 386f275 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 39 | static ssize_t uevent_seqnum_show(struct kobject *kobj, |
| 40 | struct kobj_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | { |
Kay Sievers | 386f275 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 42 | return sprintf(buf, "%llu\n", (unsigned long long)uevent_seqnum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | } |
Kay Sievers | 0f76e5a | 2005-11-11 04:58:04 +0100 | [diff] [blame] | 44 | KERNEL_ATTR_RO(uevent_seqnum); |
| 45 | |
Thomas Weißschuh | d3d76fb | 2022-11-03 16:24:07 +0100 | [diff] [blame] | 46 | /* cpu byteorder */ |
| 47 | static ssize_t cpu_byteorder_show(struct kobject *kobj, |
| 48 | struct kobj_attribute *attr, char *buf) |
| 49 | { |
| 50 | return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING); |
| 51 | } |
| 52 | KERNEL_ATTR_RO(cpu_byteorder); |
| 53 | |
Thomas Weißschuh | 00142bf | 2022-12-21 16:17:52 +0000 | [diff] [blame] | 54 | /* address bits */ |
| 55 | static ssize_t address_bits_show(struct kobject *kobj, |
| 56 | struct kobj_attribute *attr, char *buf) |
| 57 | { |
| 58 | return sysfs_emit(buf, "%zu\n", sizeof(void *) * 8 /* CHAR_BIT */); |
| 59 | } |
| 60 | KERNEL_ATTR_RO(address_bits); |
| 61 | |
Michael Marineau | 86d5613 | 2014-04-10 14:09:31 -0700 | [diff] [blame] | 62 | #ifdef CONFIG_UEVENT_HELPER |
Thadeu Lima de Souza Cascardo | af66585 | 2010-01-17 19:14:26 -0200 | [diff] [blame] | 63 | /* uevent helper program, used during early boot */ |
Kay Sievers | 386f275 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 64 | static ssize_t uevent_helper_show(struct kobject *kobj, |
| 65 | struct kobj_attribute *attr, char *buf) |
Kay Sievers | 0f76e5a | 2005-11-11 04:58:04 +0100 | [diff] [blame] | 66 | { |
Kay Sievers | 386f275 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 67 | return sprintf(buf, "%s\n", uevent_helper); |
Kay Sievers | 0f76e5a | 2005-11-11 04:58:04 +0100 | [diff] [blame] | 68 | } |
Kay Sievers | 386f275 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 69 | static ssize_t uevent_helper_store(struct kobject *kobj, |
| 70 | struct kobj_attribute *attr, |
| 71 | const char *buf, size_t count) |
Kay Sievers | 0f76e5a | 2005-11-11 04:58:04 +0100 | [diff] [blame] | 72 | { |
Kay Sievers | 312c004 | 2005-11-16 09:00:00 +0100 | [diff] [blame] | 73 | if (count+1 > UEVENT_HELPER_PATH_LEN) |
Kay Sievers | 0f76e5a | 2005-11-11 04:58:04 +0100 | [diff] [blame] | 74 | return -ENOENT; |
Kay Sievers | 386f275 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 75 | memcpy(uevent_helper, buf, count); |
Kay Sievers | 312c004 | 2005-11-16 09:00:00 +0100 | [diff] [blame] | 76 | uevent_helper[count] = '\0'; |
| 77 | if (count && uevent_helper[count-1] == '\n') |
| 78 | uevent_helper[count-1] = '\0'; |
Kay Sievers | 0f76e5a | 2005-11-11 04:58:04 +0100 | [diff] [blame] | 79 | return count; |
| 80 | } |
| 81 | KERNEL_ATTR_RW(uevent_helper); |
Michael Marineau | 86d5613 | 2014-04-10 14:09:31 -0700 | [diff] [blame] | 82 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 84 | #ifdef CONFIG_PROFILING |
| 85 | static ssize_t profiling_show(struct kobject *kobj, |
| 86 | struct kobj_attribute *attr, char *buf) |
| 87 | { |
| 88 | return sprintf(buf, "%d\n", prof_on); |
| 89 | } |
| 90 | static ssize_t profiling_store(struct kobject *kobj, |
| 91 | struct kobj_attribute *attr, |
| 92 | const char *buf, size_t count) |
| 93 | { |
| 94 | int ret; |
| 95 | |
| 96 | if (prof_on) |
| 97 | return -EEXIST; |
| 98 | /* |
| 99 | * This eventually calls into get_option() which |
| 100 | * has a ton of callers and is not const. It is |
| 101 | * easiest to cast it away here. |
| 102 | */ |
| 103 | profile_setup((char *)buf); |
| 104 | ret = profile_init(); |
| 105 | if (ret) |
| 106 | return ret; |
| 107 | ret = create_proc_profile(); |
| 108 | if (ret) |
| 109 | return ret; |
| 110 | return count; |
| 111 | } |
| 112 | KERNEL_ATTR_RW(profiling); |
| 113 | #endif |
| 114 | |
Dave Young | 2965faa | 2015-09-09 15:38:55 -0700 | [diff] [blame] | 115 | #ifdef CONFIG_KEXEC_CORE |
Kay Sievers | 386f275 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 116 | static ssize_t kexec_loaded_show(struct kobject *kobj, |
| 117 | struct kobj_attribute *attr, char *buf) |
Jeff Moyer | c330dda | 2006-06-23 02:05:07 -0700 | [diff] [blame] | 118 | { |
Kay Sievers | 386f275 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 119 | return sprintf(buf, "%d\n", !!kexec_image); |
Jeff Moyer | c330dda | 2006-06-23 02:05:07 -0700 | [diff] [blame] | 120 | } |
| 121 | KERNEL_ATTR_RO(kexec_loaded); |
| 122 | |
Kay Sievers | 386f275 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 123 | static ssize_t kexec_crash_loaded_show(struct kobject *kobj, |
| 124 | struct kobj_attribute *attr, char *buf) |
Jeff Moyer | c330dda | 2006-06-23 02:05:07 -0700 | [diff] [blame] | 125 | { |
Petr Tesarik | 21db79e | 2016-08-02 14:06:16 -0700 | [diff] [blame] | 126 | return sprintf(buf, "%d\n", kexec_crash_loaded()); |
Jeff Moyer | c330dda | 2006-06-23 02:05:07 -0700 | [diff] [blame] | 127 | } |
| 128 | KERNEL_ATTR_RO(kexec_crash_loaded); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 129 | |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 130 | static ssize_t kexec_crash_size_show(struct kobject *kobj, |
| 131 | struct kobj_attribute *attr, char *buf) |
| 132 | { |
Valentin Schneider | 7bb5da0 | 2022-06-30 23:32:57 +0100 | [diff] [blame] | 133 | ssize_t size = crash_get_memory_size(); |
| 134 | |
| 135 | if (size < 0) |
| 136 | return size; |
| 137 | |
| 138 | return sprintf(buf, "%zd\n", size); |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 139 | } |
| 140 | static ssize_t kexec_crash_size_store(struct kobject *kobj, |
| 141 | struct kobj_attribute *attr, |
| 142 | const char *buf, size_t count) |
| 143 | { |
| 144 | unsigned long cnt; |
| 145 | int ret; |
| 146 | |
Jingoo Han | 6072ddc | 2013-09-12 15:14:07 -0700 | [diff] [blame] | 147 | if (kstrtoul(buf, 0, &cnt)) |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 148 | return -EINVAL; |
| 149 | |
| 150 | ret = crash_shrink_memory(cnt); |
| 151 | return ret < 0 ? ret : count; |
| 152 | } |
| 153 | KERNEL_ATTR_RW(kexec_crash_size); |
| 154 | |
Hari Bathini | 692f66f | 2017-05-08 15:56:18 -0700 | [diff] [blame] | 155 | #endif /* CONFIG_KEXEC_CORE */ |
| 156 | |
| 157 | #ifdef CONFIG_CRASH_CORE |
| 158 | |
Kay Sievers | 386f275 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 159 | static ssize_t vmcoreinfo_show(struct kobject *kobj, |
| 160 | struct kobj_attribute *attr, char *buf) |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 161 | { |
Russell King | dae2801 | 2016-08-02 14:06:00 -0700 | [diff] [blame] | 162 | phys_addr_t vmcore_base = paddr_vmcoreinfo_note(); |
| 163 | return sprintf(buf, "%pa %x\n", &vmcore_base, |
Xunlei Pang | 203e9e4 | 2017-07-12 14:33:14 -0700 | [diff] [blame] | 164 | (unsigned int)VMCOREINFO_NOTE_SIZE); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 165 | } |
| 166 | KERNEL_ATTR_RO(vmcoreinfo); |
| 167 | |
Hari Bathini | 692f66f | 2017-05-08 15:56:18 -0700 | [diff] [blame] | 168 | #endif /* CONFIG_CRASH_CORE */ |
Jeff Moyer | c330dda | 2006-06-23 02:05:07 -0700 | [diff] [blame] | 169 | |
Ludwig Nussel | 088ab0b | 2011-02-28 15:57:17 +0100 | [diff] [blame] | 170 | /* whether file capabilities are enabled */ |
| 171 | static ssize_t fscaps_show(struct kobject *kobj, |
| 172 | struct kobj_attribute *attr, char *buf) |
| 173 | { |
| 174 | return sprintf(buf, "%d\n", file_caps_enabled); |
| 175 | } |
| 176 | KERNEL_ATTR_RO(fscaps); |
| 177 | |
Paul E. McKenney | 79cfea02 | 2015-12-07 13:09:52 -0800 | [diff] [blame] | 178 | #ifndef CONFIG_TINY_RCU |
Antti P Miettinen | 3705b88 | 2012-10-05 09:59:15 +0300 | [diff] [blame] | 179 | int rcu_expedited; |
| 180 | static ssize_t rcu_expedited_show(struct kobject *kobj, |
| 181 | struct kobj_attribute *attr, char *buf) |
| 182 | { |
Paul E. McKenney | 5a9be7c | 2015-11-24 15:44:06 -0800 | [diff] [blame] | 183 | return sprintf(buf, "%d\n", READ_ONCE(rcu_expedited)); |
Antti P Miettinen | 3705b88 | 2012-10-05 09:59:15 +0300 | [diff] [blame] | 184 | } |
| 185 | static ssize_t rcu_expedited_store(struct kobject *kobj, |
| 186 | struct kobj_attribute *attr, |
| 187 | const char *buf, size_t count) |
| 188 | { |
| 189 | if (kstrtoint(buf, 0, &rcu_expedited)) |
| 190 | return -EINVAL; |
| 191 | |
| 192 | return count; |
| 193 | } |
| 194 | KERNEL_ATTR_RW(rcu_expedited); |
| 195 | |
Paul E. McKenney | 5a9be7c | 2015-11-24 15:44:06 -0800 | [diff] [blame] | 196 | int rcu_normal; |
| 197 | static ssize_t rcu_normal_show(struct kobject *kobj, |
| 198 | struct kobj_attribute *attr, char *buf) |
| 199 | { |
| 200 | return sprintf(buf, "%d\n", READ_ONCE(rcu_normal)); |
| 201 | } |
| 202 | static ssize_t rcu_normal_store(struct kobject *kobj, |
| 203 | struct kobj_attribute *attr, |
| 204 | const char *buf, size_t count) |
| 205 | { |
| 206 | if (kstrtoint(buf, 0, &rcu_normal)) |
| 207 | return -EINVAL; |
| 208 | |
| 209 | return count; |
| 210 | } |
| 211 | KERNEL_ATTR_RW(rcu_normal); |
Paul E. McKenney | 79cfea02 | 2015-12-07 13:09:52 -0800 | [diff] [blame] | 212 | #endif /* #ifndef CONFIG_TINY_RCU */ |
Paul E. McKenney | 5a9be7c | 2015-11-24 15:44:06 -0800 | [diff] [blame] | 213 | |
Roland McGrath | da1a679 | 2007-07-19 01:48:39 -0700 | [diff] [blame] | 214 | /* |
| 215 | * Make /sys/kernel/notes give the raw contents of our kernel .notes section. |
| 216 | */ |
Gideon Israel Dsouza | 52f5684c | 2014-04-07 15:39:20 -0700 | [diff] [blame] | 217 | extern const void __start_notes __weak; |
| 218 | extern const void __stop_notes __weak; |
Roland McGrath | da1a679 | 2007-07-19 01:48:39 -0700 | [diff] [blame] | 219 | #define notes_size (&__stop_notes - &__start_notes) |
| 220 | |
Chris Wright | 2c3c8be | 2010-05-12 18:28:57 -0700 | [diff] [blame] | 221 | static ssize_t notes_read(struct file *filp, struct kobject *kobj, |
| 222 | struct bin_attribute *bin_attr, |
Roland McGrath | da1a679 | 2007-07-19 01:48:39 -0700 | [diff] [blame] | 223 | char *buf, loff_t off, size_t count) |
| 224 | { |
| 225 | memcpy(buf, &__start_notes + off, count); |
| 226 | return count; |
| 227 | } |
| 228 | |
Bhumika Goyal | 738bc38 | 2017-02-24 15:00:46 -0800 | [diff] [blame] | 229 | static struct bin_attribute notes_attr __ro_after_init = { |
Roland McGrath | da1a679 | 2007-07-19 01:48:39 -0700 | [diff] [blame] | 230 | .attr = { |
| 231 | .name = "notes", |
| 232 | .mode = S_IRUGO, |
| 233 | }, |
| 234 | .read = ¬es_read, |
| 235 | }; |
| 236 | |
Greg Kroah-Hartman | 0ff21e4 | 2007-11-06 10:36:58 -0800 | [diff] [blame] | 237 | struct kobject *kernel_kobj; |
| 238 | EXPORT_SYMBOL_GPL(kernel_kobj); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | |
| 240 | static struct attribute * kernel_attrs[] = { |
Ludwig Nussel | 088ab0b | 2011-02-28 15:57:17 +0100 | [diff] [blame] | 241 | &fscaps_attr.attr, |
Kay Sievers | 0f76e5a | 2005-11-11 04:58:04 +0100 | [diff] [blame] | 242 | &uevent_seqnum_attr.attr, |
Thomas Weißschuh | d3d76fb | 2022-11-03 16:24:07 +0100 | [diff] [blame] | 243 | &cpu_byteorder_attr.attr, |
Thomas Weißschuh | 00142bf | 2022-12-21 16:17:52 +0000 | [diff] [blame] | 244 | &address_bits_attr.attr, |
Michael Marineau | 86d5613 | 2014-04-10 14:09:31 -0700 | [diff] [blame] | 245 | #ifdef CONFIG_UEVENT_HELPER |
Kay Sievers | 0f76e5a | 2005-11-11 04:58:04 +0100 | [diff] [blame] | 246 | &uevent_helper_attr.attr, |
Michael Marineau | 86d5613 | 2014-04-10 14:09:31 -0700 | [diff] [blame] | 247 | #endif |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 248 | #ifdef CONFIG_PROFILING |
| 249 | &profiling_attr.attr, |
| 250 | #endif |
Dave Young | 2965faa | 2015-09-09 15:38:55 -0700 | [diff] [blame] | 251 | #ifdef CONFIG_KEXEC_CORE |
Jeff Moyer | c330dda | 2006-06-23 02:05:07 -0700 | [diff] [blame] | 252 | &kexec_loaded_attr.attr, |
| 253 | &kexec_crash_loaded_attr.attr, |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 254 | &kexec_crash_size_attr.attr, |
Hari Bathini | 692f66f | 2017-05-08 15:56:18 -0700 | [diff] [blame] | 255 | #endif |
| 256 | #ifdef CONFIG_CRASH_CORE |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 257 | &vmcoreinfo_attr.attr, |
Jeff Moyer | c330dda | 2006-06-23 02:05:07 -0700 | [diff] [blame] | 258 | #endif |
Paul E. McKenney | 79cfea02 | 2015-12-07 13:09:52 -0800 | [diff] [blame] | 259 | #ifndef CONFIG_TINY_RCU |
Antti P Miettinen | 3705b88 | 2012-10-05 09:59:15 +0300 | [diff] [blame] | 260 | &rcu_expedited_attr.attr, |
Paul E. McKenney | 5a9be7c | 2015-11-24 15:44:06 -0800 | [diff] [blame] | 261 | &rcu_normal_attr.attr, |
Paul E. McKenney | 79cfea02 | 2015-12-07 13:09:52 -0800 | [diff] [blame] | 262 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | NULL |
| 264 | }; |
| 265 | |
Arvind Yadav | 9dcdcea | 2017-07-10 15:51:14 -0700 | [diff] [blame] | 266 | static const struct attribute_group kernel_attr_group = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | .attrs = kernel_attrs, |
| 268 | }; |
| 269 | |
| 270 | static int __init ksysfs_init(void) |
| 271 | { |
Greg Kroah-Hartman | bd35b93 | 2007-10-29 20:13:17 +0100 | [diff] [blame] | 272 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | |
Greg Kroah-Hartman | 0ff21e4 | 2007-11-06 10:36:58 -0800 | [diff] [blame] | 274 | kernel_kobj = kobject_create_and_add("kernel", NULL); |
| 275 | if (!kernel_kobj) { |
Greg Kroah-Hartman | bd35b93 | 2007-10-29 20:13:17 +0100 | [diff] [blame] | 276 | error = -ENOMEM; |
| 277 | goto exit; |
| 278 | } |
Greg Kroah-Hartman | 0ff21e4 | 2007-11-06 10:36:58 -0800 | [diff] [blame] | 279 | error = sysfs_create_group(kernel_kobj, &kernel_attr_group); |
Greg Kroah-Hartman | bd35b93 | 2007-10-29 20:13:17 +0100 | [diff] [blame] | 280 | if (error) |
| 281 | goto kset_exit; |
| 282 | |
| 283 | if (notes_size > 0) { |
Roland McGrath | da1a679 | 2007-07-19 01:48:39 -0700 | [diff] [blame] | 284 | notes_attr.size = notes_size; |
Greg Kroah-Hartman | 0ff21e4 | 2007-11-06 10:36:58 -0800 | [diff] [blame] | 285 | error = sysfs_create_bin_file(kernel_kobj, ¬es_attr); |
Greg Kroah-Hartman | bd35b93 | 2007-10-29 20:13:17 +0100 | [diff] [blame] | 286 | if (error) |
| 287 | goto group_exit; |
Roland McGrath | da1a679 | 2007-07-19 01:48:39 -0700 | [diff] [blame] | 288 | } |
| 289 | |
Greg Kroah-Hartman | bd35b93 | 2007-10-29 20:13:17 +0100 | [diff] [blame] | 290 | return 0; |
| 291 | |
Greg Kroah-Hartman | bd35b93 | 2007-10-29 20:13:17 +0100 | [diff] [blame] | 292 | group_exit: |
Greg Kroah-Hartman | 0ff21e4 | 2007-11-06 10:36:58 -0800 | [diff] [blame] | 293 | sysfs_remove_group(kernel_kobj, &kernel_attr_group); |
Greg Kroah-Hartman | bd35b93 | 2007-10-29 20:13:17 +0100 | [diff] [blame] | 294 | kset_exit: |
Greg Kroah-Hartman | 78a2d90 | 2007-12-20 08:13:05 -0800 | [diff] [blame] | 295 | kobject_put(kernel_kobj); |
Greg Kroah-Hartman | bd35b93 | 2007-10-29 20:13:17 +0100 | [diff] [blame] | 296 | exit: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | return error; |
| 298 | } |
| 299 | |
| 300 | core_initcall(ksysfs_init); |