1 #include <linux/suspend.h> 2 #include <linux/suspend_ioctls.h> 3 #include <linux/utsname.h> 4 #include <linux/freezer.h> 5 6 struct swsusp_info { 7 struct new_utsname uts; 8 u32 version_code; 9 unsigned long num_physpages; 10 int cpus; 11 unsigned long image_pages; 12 unsigned long pages; 13 unsigned long size; 14 } __attribute__((aligned(PAGE_SIZE))); 15 16 #ifdef CONFIG_HIBERNATION 17 /* kernel/power/snapshot.c */ 18 extern void __init hibernate_reserved_size_init(void); 19 extern void __init hibernate_image_size_init(void); 20 21 #ifdef CONFIG_ARCH_HIBERNATION_HEADER 22 /* Maximum size of architecture specific data in a hibernation header */ 23 #define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4) 24 25 extern int arch_hibernation_header_save(void *addr, unsigned int max_size); 26 extern int arch_hibernation_header_restore(void *addr); 27 28 static inline int init_header_complete(struct swsusp_info *info) 29 { 30 return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE); 31 } 32 33 static inline char *check_image_kernel(struct swsusp_info *info) 34 { 35 return arch_hibernation_header_restore(info) ? 36 "architecture specific data" : NULL; 37 } 38 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */ 39 40 /* 41 * Keep some memory free so that I/O operations can succeed without paging 42 * [Might this be more than 4 MB?] 43 */ 44 #define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT) 45 46 /* 47 * Keep 1 MB of memory free so that device drivers can allocate some pages in 48 * their .suspend() routines without breaking the suspend to disk. 49 */ 50 #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) 51 52 /* kernel/power/hibernate.c */ 53 extern bool freezer_test_done; 54 55 extern int hibernation_snapshot(int platform_mode); 56 extern int hibernation_restore(int platform_mode); 57 extern int hibernation_platform_enter(void); 58 59 #else /* !CONFIG_HIBERNATION */ 60 61 static inline void hibernate_reserved_size_init(void) {} 62 static inline void hibernate_image_size_init(void) {} 63 #endif /* !CONFIG_HIBERNATION */ 64 65 extern int pfn_is_nosave(unsigned long); 66 67 #define power_attr(_name) \ 68 static struct kobj_attribute _name##_attr = { \ 69 .attr = { \ 70 .name = __stringify(_name), \ 71 .mode = 0644, \ 72 }, \ 73 .show = _name##_show, \ 74 .store = _name##_store, \ 75 } 76 77 /* Preferred image size in bytes (default 500 MB) */ 78 extern unsigned long image_size; 79 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */ 80 extern unsigned long reserved_size; 81 extern int in_suspend; 82 extern dev_t swsusp_resume_device; 83 extern sector_t swsusp_resume_block; 84 85 extern asmlinkage int swsusp_arch_suspend(void); 86 extern asmlinkage int swsusp_arch_resume(void); 87 88 extern int create_basic_memory_bitmaps(void); 89 extern void free_basic_memory_bitmaps(void); 90 extern int hibernate_preallocate_memory(void); 91 92 /** 93 * Auxiliary structure used for reading the snapshot image data and 94 * metadata from and writing them to the list of page backup entries 95 * (PBEs) which is the main data structure of swsusp. 96 * 97 * Using struct snapshot_handle we can transfer the image, including its 98 * metadata, as a continuous sequence of bytes with the help of 99 * snapshot_read_next() and snapshot_write_next(). 100 * 101 * The code that writes the image to a storage or transfers it to 102 * the user land is required to use snapshot_read_next() for this 103 * purpose and it should not make any assumptions regarding the internal 104 * structure of the image. Similarly, the code that reads the image from 105 * a storage or transfers it from the user land is required to use 106 * snapshot_write_next(). 107 * 108 * This may allow us to change the internal structure of the image 109 * in the future with considerably less effort. 110 */ 111 112 struct snapshot_handle { 113 unsigned int cur; /* number of the block of PAGE_SIZE bytes the 114 * next operation will refer to (ie. current) 115 */ 116 void *buffer; /* address of the block to read from 117 * or write to 118 */ 119 int sync_read; /* Set to one to notify the caller of 120 * snapshot_write_next() that it may 121 * need to call wait_on_bio_chain() 122 */ 123 }; 124 125 /* This macro returns the address from/to which the caller of 126 * snapshot_read_next()/snapshot_write_next() is allowed to 127 * read/write data after the function returns 128 */ 129 #define data_of(handle) ((handle).buffer) 130 131 extern unsigned int snapshot_additional_pages(struct zone *zone); 132 extern unsigned long snapshot_get_image_size(void); 133 extern int snapshot_read_next(struct snapshot_handle *handle); 134 extern int snapshot_write_next(struct snapshot_handle *handle); 135 extern void snapshot_write_finalize(struct snapshot_handle *handle); 136 extern int snapshot_image_loaded(struct snapshot_handle *handle); 137 138 /* If unset, the snapshot device cannot be open. */ 139 extern atomic_t snapshot_device_available; 140 141 extern sector_t alloc_swapdev_block(int swap); 142 extern void free_all_swap_pages(int swap); 143 extern int swsusp_swap_in_use(void); 144 145 /* 146 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in 147 * the image header. 148 */ 149 #define SF_PLATFORM_MODE 1 150 #define SF_NOCOMPRESS_MODE 2 151 #define SF_CRC32_MODE 4 152 153 /* kernel/power/hibernate.c */ 154 extern int swsusp_check(void); 155 extern void swsusp_free(void); 156 extern int swsusp_read(unsigned int *flags_p); 157 extern int swsusp_write(unsigned int flags); 158 extern void swsusp_close(fmode_t); 159 160 /* kernel/power/block_io.c */ 161 extern struct block_device *hib_resume_bdev; 162 163 extern int hib_bio_read_page(pgoff_t page_off, void *addr, 164 struct bio **bio_chain); 165 extern int hib_bio_write_page(pgoff_t page_off, void *addr, 166 struct bio **bio_chain); 167 extern int hib_wait_on_bio_chain(struct bio **bio_chain); 168 169 struct timeval; 170 /* kernel/power/swsusp.c */ 171 extern void swsusp_show_speed(struct timeval *, struct timeval *, 172 unsigned int, char *); 173 174 #ifdef CONFIG_SUSPEND 175 /* kernel/power/suspend.c */ 176 extern const char *const pm_states[]; 177 178 extern bool valid_state(suspend_state_t state); 179 extern int suspend_devices_and_enter(suspend_state_t state); 180 #else /* !CONFIG_SUSPEND */ 181 static inline int suspend_devices_and_enter(suspend_state_t state) 182 { 183 return -ENOSYS; 184 } 185 static inline bool valid_state(suspend_state_t state) { return false; } 186 #endif /* !CONFIG_SUSPEND */ 187 188 #ifdef CONFIG_PM_TEST_SUSPEND 189 /* kernel/power/suspend_test.c */ 190 extern void suspend_test_start(void); 191 extern void suspend_test_finish(const char *label); 192 #else /* !CONFIG_PM_TEST_SUSPEND */ 193 static inline void suspend_test_start(void) {} 194 static inline void suspend_test_finish(const char *label) {} 195 #endif /* !CONFIG_PM_TEST_SUSPEND */ 196 197 #ifdef CONFIG_PM_SLEEP 198 /* kernel/power/main.c */ 199 extern int pm_notifier_call_chain(unsigned long val); 200 #endif 201 202 #ifdef CONFIG_HIGHMEM 203 int restore_highmem(void); 204 #else 205 static inline unsigned int count_highmem_pages(void) { return 0; } 206 static inline int restore_highmem(void) { return 0; } 207 #endif 208 209 /* 210 * Suspend test levels 211 */ 212 enum { 213 /* keep first */ 214 TEST_NONE, 215 TEST_CORE, 216 TEST_CPUS, 217 TEST_PLATFORM, 218 TEST_DEVICES, 219 TEST_FREEZER, 220 /* keep last */ 221 __TEST_AFTER_LAST 222 }; 223 224 #define TEST_FIRST TEST_NONE 225 #define TEST_MAX (__TEST_AFTER_LAST - 1) 226 227 extern int pm_test_level; 228 229 #ifdef CONFIG_SUSPEND_FREEZER 230 static inline int suspend_freeze_processes(void) 231 { 232 int error; 233 234 error = freeze_processes(); 235 /* 236 * freeze_processes() automatically thaws every task if freezing 237 * fails. So we need not do anything extra upon error. 238 */ 239 if (error) 240 return error; 241 242 error = freeze_kernel_threads(); 243 /* 244 * freeze_kernel_threads() thaws only kernel threads upon freezing 245 * failure. So we have to thaw the userspace tasks ourselves. 246 */ 247 if (error) 248 thaw_processes(); 249 250 return error; 251 } 252 253 static inline void suspend_thaw_processes(void) 254 { 255 thaw_processes(); 256 } 257 #else 258 static inline int suspend_freeze_processes(void) 259 { 260 return 0; 261 } 262 263 static inline void suspend_thaw_processes(void) 264 { 265 } 266 #endif 267