Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Suspend support specific for s390. |
| 3 | * |
| 4 | * Copyright IBM Corp. 2009 |
| 5 | * |
| 6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> |
| 7 | */ |
| 8 | |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 9 | #include <linux/pfn.h> |
Martin Schwidefsky | 638ad34 | 2011-10-30 15:17:13 +0100 | [diff] [blame] | 10 | #include <linux/suspend.h> |
Martin Schwidefsky | 85055dd | 2011-08-17 20:42:24 +0200 | [diff] [blame] | 11 | #include <linux/mm.h> |
David Howells | a0616cd | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 12 | #include <asm/ctl_reg.h> |
Sebastian Ott | 77e844b | 2013-08-29 19:36:49 +0200 | [diff] [blame] | 13 | #include <asm/ipl.h> |
| 14 | #include <asm/cio.h> |
Sebastian Ott | 57b5918 | 2013-08-29 19:40:01 +0200 | [diff] [blame] | 15 | #include <asm/pci.h> |
Geert Uytterhoeven | 7f8998c | 2014-10-09 15:30:30 -0700 | [diff] [blame] | 16 | #include <asm/sections.h> |
Heiko Carstens | 63df41d6 | 2013-09-06 19:10:48 +0200 | [diff] [blame] | 17 | #include "entry.h" |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 18 | |
| 19 | /* |
Martin Schwidefsky | 85055dd | 2011-08-17 20:42:24 +0200 | [diff] [blame] | 20 | * The restore of the saved pages in an hibernation image will set |
| 21 | * the change and referenced bits in the storage key for each page. |
| 22 | * Overindication of the referenced bits after an hibernation cycle |
| 23 | * does not cause any harm but the overindication of the change bits |
| 24 | * would cause trouble. |
| 25 | * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each |
| 26 | * page to the most significant byte of the associated page frame |
| 27 | * number in the hibernation image. |
| 28 | */ |
| 29 | |
| 30 | /* |
| 31 | * Key storage is allocated as a linked list of pages. |
| 32 | * The size of the keys array is (PAGE_SIZE - sizeof(long)) |
| 33 | */ |
| 34 | struct page_key_data { |
| 35 | struct page_key_data *next; |
| 36 | unsigned char data[]; |
| 37 | }; |
| 38 | |
| 39 | #define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *)) |
| 40 | |
| 41 | static struct page_key_data *page_key_data; |
| 42 | static struct page_key_data *page_key_rp, *page_key_wp; |
| 43 | static unsigned long page_key_rx, page_key_wx; |
Michael Holzheu | 91c15a9 | 2013-04-08 16:09:31 +0200 | [diff] [blame] | 44 | unsigned long suspend_zero_pages; |
Martin Schwidefsky | 85055dd | 2011-08-17 20:42:24 +0200 | [diff] [blame] | 45 | |
| 46 | /* |
| 47 | * For each page in the hibernation image one additional byte is |
| 48 | * stored in the most significant byte of the page frame number. |
| 49 | * On suspend no additional memory is required but on resume the |
| 50 | * keys need to be memorized until the page data has been restored. |
| 51 | * Only then can the storage keys be set to their old state. |
| 52 | */ |
| 53 | unsigned long page_key_additional_pages(unsigned long pages) |
| 54 | { |
| 55 | return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); |
| 56 | } |
| 57 | |
| 58 | /* |
| 59 | * Free page_key_data list of arrays. |
| 60 | */ |
| 61 | void page_key_free(void) |
| 62 | { |
| 63 | struct page_key_data *pkd; |
| 64 | |
| 65 | while (page_key_data) { |
| 66 | pkd = page_key_data; |
| 67 | page_key_data = pkd->next; |
| 68 | free_page((unsigned long) pkd); |
| 69 | } |
| 70 | } |
| 71 | |
| 72 | /* |
| 73 | * Allocate page_key_data list of arrays with enough room to store |
| 74 | * one byte for each page in the hibernation image. |
| 75 | */ |
| 76 | int page_key_alloc(unsigned long pages) |
| 77 | { |
| 78 | struct page_key_data *pk; |
| 79 | unsigned long size; |
| 80 | |
| 81 | size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); |
| 82 | while (size--) { |
| 83 | pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL); |
| 84 | if (!pk) { |
| 85 | page_key_free(); |
| 86 | return -ENOMEM; |
| 87 | } |
| 88 | pk->next = page_key_data; |
| 89 | page_key_data = pk; |
| 90 | } |
| 91 | page_key_rp = page_key_wp = page_key_data; |
| 92 | page_key_rx = page_key_wx = 0; |
| 93 | return 0; |
| 94 | } |
| 95 | |
| 96 | /* |
| 97 | * Save the storage key into the upper 8 bits of the page frame number. |
| 98 | */ |
| 99 | void page_key_read(unsigned long *pfn) |
| 100 | { |
| 101 | unsigned long addr; |
| 102 | |
| 103 | addr = (unsigned long) page_address(pfn_to_page(*pfn)); |
| 104 | *(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr); |
| 105 | } |
| 106 | |
| 107 | /* |
| 108 | * Extract the storage key from the upper 8 bits of the page frame number |
| 109 | * and store it in the page_key_data list of arrays. |
| 110 | */ |
| 111 | void page_key_memorize(unsigned long *pfn) |
| 112 | { |
| 113 | page_key_wp->data[page_key_wx] = *(unsigned char *) pfn; |
| 114 | *(unsigned char *) pfn = 0; |
| 115 | if (++page_key_wx < PAGE_KEY_DATA_SIZE) |
| 116 | return; |
| 117 | page_key_wp = page_key_wp->next; |
| 118 | page_key_wx = 0; |
| 119 | } |
| 120 | |
| 121 | /* |
| 122 | * Get the next key from the page_key_data list of arrays and set the |
| 123 | * storage key of the page referred by @address. If @address refers to |
| 124 | * a "safe" page the swsusp_arch_resume code will transfer the storage |
| 125 | * key from the buffer page to the original page. |
| 126 | */ |
| 127 | void page_key_write(void *address) |
| 128 | { |
| 129 | page_set_storage_key((unsigned long) address, |
| 130 | page_key_rp->data[page_key_rx], 0); |
| 131 | if (++page_key_rx >= PAGE_KEY_DATA_SIZE) |
| 132 | return; |
| 133 | page_key_rp = page_key_rp->next; |
| 134 | page_key_rx = 0; |
| 135 | } |
| 136 | |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 137 | int pfn_is_nosave(unsigned long pfn) |
| 138 | { |
Heiko Carstens | 2573a57 | 2009-09-22 22:58:50 +0200 | [diff] [blame] | 139 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); |
| 140 | unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 141 | |
Heiko Carstens | 2573a57 | 2009-09-22 22:58:50 +0200 | [diff] [blame] | 142 | /* Always save lowcore pages (LC protection might be enabled). */ |
| 143 | if (pfn <= LC_PAGES) |
| 144 | return 0; |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 145 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) |
| 146 | return 1; |
Heiko Carstens | 2573a57 | 2009-09-22 22:58:50 +0200 | [diff] [blame] | 147 | /* Skip memory holes and read-only pages (NSS, DCSS, ...). */ |
| 148 | if (tprot(PFN_PHYS(pfn))) |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 149 | return 1; |
| 150 | return 0; |
| 151 | } |
| 152 | |
Michael Holzheu | 91c15a9 | 2013-04-08 16:09:31 +0200 | [diff] [blame] | 153 | /* |
| 154 | * PM notifier callback for suspend |
| 155 | */ |
| 156 | static int suspend_pm_cb(struct notifier_block *nb, unsigned long action, |
| 157 | void *ptr) |
| 158 | { |
| 159 | switch (action) { |
| 160 | case PM_SUSPEND_PREPARE: |
| 161 | case PM_HIBERNATION_PREPARE: |
| 162 | suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER); |
| 163 | if (!suspend_zero_pages) |
| 164 | return NOTIFY_BAD; |
| 165 | break; |
| 166 | case PM_POST_SUSPEND: |
| 167 | case PM_POST_HIBERNATION: |
| 168 | free_pages(suspend_zero_pages, LC_ORDER); |
| 169 | break; |
| 170 | default: |
| 171 | return NOTIFY_DONE; |
| 172 | } |
| 173 | return NOTIFY_OK; |
| 174 | } |
| 175 | |
| 176 | static int __init suspend_pm_init(void) |
| 177 | { |
| 178 | pm_notifier(suspend_pm_cb, 0); |
| 179 | return 0; |
| 180 | } |
| 181 | arch_initcall(suspend_pm_init); |
| 182 | |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 183 | void save_processor_state(void) |
| 184 | { |
| 185 | /* swsusp_arch_suspend() actually saves all cpu register contents. |
| 186 | * Machine checks must be disabled since swsusp_arch_suspend() stores |
| 187 | * register contents to their lowcore save areas. That's the same |
| 188 | * place where register contents on machine checks would be saved. |
| 189 | * To avoid register corruption disable machine checks. |
| 190 | * We must also disable machine checks in the new psw mask for |
| 191 | * program checks, since swsusp_arch_suspend() may generate program |
| 192 | * checks. Disabling machine checks for all other new psw masks is |
| 193 | * just paranoia. |
| 194 | */ |
| 195 | local_mcck_disable(); |
| 196 | /* Disable lowcore protection */ |
| 197 | __ctl_clear_bit(0,28); |
| 198 | S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK; |
| 199 | S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK; |
| 200 | S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK; |
| 201 | S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK; |
| 202 | } |
| 203 | |
| 204 | void restore_processor_state(void) |
| 205 | { |
| 206 | S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK; |
| 207 | S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK; |
| 208 | S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK; |
| 209 | S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK; |
| 210 | /* Enable lowcore protection */ |
| 211 | __ctl_set_bit(0,28); |
| 212 | local_mcck_enable(); |
| 213 | } |
Sebastian Ott | 77e844b | 2013-08-29 19:36:49 +0200 | [diff] [blame] | 214 | |
| 215 | /* Called at the end of swsusp_arch_resume */ |
| 216 | void s390_early_resume(void) |
| 217 | { |
| 218 | lgr_info_log(); |
| 219 | channel_subsystem_reinit(); |
Sebastian Ott | 57b5918 | 2013-08-29 19:40:01 +0200 | [diff] [blame] | 220 | zpci_rescan(); |
Sebastian Ott | 77e844b | 2013-08-29 19:36:49 +0200 | [diff] [blame] | 221 | } |