1 #include <linux/suspend.h> 2 #include <linux/suspend_ioctls.h> 3 #include <linux/utsname.h> 4 #include <linux/freezer.h> 5 6 struct swsusp_info { 7 struct new_utsname uts; 8 u32 version_code; 9 unsigned long num_physpages; 10 int cpus; 11 unsigned long image_pages; 12 unsigned long pages; 13 unsigned long size; 14 } __attribute__((aligned(PAGE_SIZE))); 15 16 #ifdef CONFIG_HIBERNATION 17 /* kernel/power/snapshot.c */ 18 extern void __init hibernate_reserved_size_init(void); 19 extern void __init hibernate_image_size_init(void); 20 21 #ifdef CONFIG_ARCH_HIBERNATION_HEADER 22 /* Maximum size of architecture specific data in a hibernation header */ 23 #define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4) 24 25 extern int arch_hibernation_header_save(void *addr, unsigned int max_size); 26 extern int arch_hibernation_header_restore(void *addr); 27 28 static inline int init_header_complete(struct swsusp_info *info) 29 { 30 return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE); 31 } 32 33 static inline char *check_image_kernel(struct swsusp_info *info) 34 { 35 return arch_hibernation_header_restore(info) ? 36 "architecture specific data" : NULL; 37 } 38 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */ 39 40 /* 41 * Keep some memory free so that I/O operations can succeed without paging 42 * [Might this be more than 4 MB?] 43 */ 44 #define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT) 45 46 /* 47 * Keep 1 MB of memory free so that device drivers can allocate some pages in 48 * their .suspend() routines without breaking the suspend to disk. 49 */ 50 #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) 51 52 /* kernel/power/hibernate.c */ 53 extern int hibernation_snapshot(int platform_mode); 54 extern int hibernation_restore(int platform_mode); 55 extern int hibernation_platform_enter(void); 56 57 #else /* !CONFIG_HIBERNATION */ 58 59 static inline void hibernate_reserved_size_init(void) {} 60 static inline void hibernate_image_size_init(void) {} 61 #endif /* !CONFIG_HIBERNATION */ 62 63 extern int pfn_is_nosave(unsigned long); 64 65 #define power_attr(_name) \ 66 static struct kobj_attribute _name##_attr = { \ 67 .attr = { \ 68 .name = __stringify(_name), \ 69 .mode = 0644, \ 70 }, \ 71 .show = _name##_show, \ 72 .store = _name##_store, \ 73 } 74 75 /* Preferred image size in bytes (default 500 MB) */ 76 extern unsigned long image_size; 77 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */ 78 extern unsigned long reserved_size; 79 extern int in_suspend; 80 extern dev_t swsusp_resume_device; 81 extern sector_t swsusp_resume_block; 82 83 extern asmlinkage int swsusp_arch_suspend(void); 84 extern asmlinkage int swsusp_arch_resume(void); 85 86 extern int create_basic_memory_bitmaps(void); 87 extern void free_basic_memory_bitmaps(void); 88 extern int hibernate_preallocate_memory(void); 89 90 /** 91 * Auxiliary structure used for reading the snapshot image data and 92 * metadata from and writing them to the list of page backup entries 93 * (PBEs) which is the main data structure of swsusp. 94 * 95 * Using struct snapshot_handle we can transfer the image, including its 96 * metadata, as a continuous sequence of bytes with the help of 97 * snapshot_read_next() and snapshot_write_next(). 98 * 99 * The code that writes the image to a storage or transfers it to 100 * the user land is required to use snapshot_read_next() for this 101 * purpose and it should not make any assumptions regarding the internal 102 * structure of the image. Similarly, the code that reads the image from 103 * a storage or transfers it from the user land is required to use 104 * snapshot_write_next(). 105 * 106 * This may allow us to change the internal structure of the image 107 * in the future with considerably less effort. 108 */ 109 110 struct snapshot_handle { 111 unsigned int cur; /* number of the block of PAGE_SIZE bytes the 112 * next operation will refer to (ie. current) 113 */ 114 void *buffer; /* address of the block to read from 115 * or write to 116 */ 117 int sync_read; /* Set to one to notify the caller of 118 * snapshot_write_next() that it may 119 * need to call wait_on_bio_chain() 120 */ 121 }; 122 123 /* This macro returns the address from/to which the caller of 124 * snapshot_read_next()/snapshot_write_next() is allowed to 125 * read/write data after the function returns 126 */ 127 #define data_of(handle) ((handle).buffer) 128 129 extern unsigned int snapshot_additional_pages(struct zone *zone); 130 extern unsigned long snapshot_get_image_size(void); 131 extern int snapshot_read_next(struct snapshot_handle *handle); 132 extern int snapshot_write_next(struct snapshot_handle *handle); 133 extern void snapshot_write_finalize(struct snapshot_handle *handle); 134 extern int snapshot_image_loaded(struct snapshot_handle *handle); 135 136 /* If unset, the snapshot device cannot be open. */ 137 extern atomic_t snapshot_device_available; 138 139 extern sector_t alloc_swapdev_block(int swap); 140 extern void free_all_swap_pages(int swap); 141 extern int swsusp_swap_in_use(void); 142 143 /* 144 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in 145 * the image header. 146 */ 147 #define SF_PLATFORM_MODE 1 148 #define SF_NOCOMPRESS_MODE 2 149 150 /* kernel/power/hibernate.c */ 151 extern int swsusp_check(void); 152 extern void swsusp_free(void); 153 extern int swsusp_read(unsigned int *flags_p); 154 extern int swsusp_write(unsigned int flags); 155 extern void swsusp_close(fmode_t); 156 157 /* kernel/power/block_io.c */ 158 extern struct block_device *hib_resume_bdev; 159 160 extern int hib_bio_read_page(pgoff_t page_off, void *addr, 161 struct bio **bio_chain); 162 extern int hib_bio_write_page(pgoff_t page_off, void *addr, 163 struct bio **bio_chain); 164 extern int hib_wait_on_bio_chain(struct bio **bio_chain); 165 166 struct timeval; 167 /* kernel/power/swsusp.c */ 168 extern void swsusp_show_speed(struct timeval *, struct timeval *, 169 unsigned int, char *); 170 171 #ifdef CONFIG_SUSPEND 172 /* kernel/power/suspend.c */ 173 extern const char *const pm_states[]; 174 175 extern bool valid_state(suspend_state_t state); 176 extern int suspend_devices_and_enter(suspend_state_t state); 177 extern int enter_state(suspend_state_t state); 178 #else /* !CONFIG_SUSPEND */ 179 static inline int suspend_devices_and_enter(suspend_state_t state) 180 { 181 return -ENOSYS; 182 } 183 static inline int enter_state(suspend_state_t state) { return -ENOSYS; } 184 static inline bool valid_state(suspend_state_t state) { return false; } 185 #endif /* !CONFIG_SUSPEND */ 186 187 #ifdef CONFIG_PM_TEST_SUSPEND 188 /* kernel/power/suspend_test.c */ 189 extern void suspend_test_start(void); 190 extern void suspend_test_finish(const char *label); 191 #else /* !CONFIG_PM_TEST_SUSPEND */ 192 static inline void suspend_test_start(void) {} 193 static inline void suspend_test_finish(const char *label) {} 194 #endif /* !CONFIG_PM_TEST_SUSPEND */ 195 196 #ifdef CONFIG_PM_SLEEP 197 /* kernel/power/main.c */ 198 extern int pm_notifier_call_chain(unsigned long val); 199 #endif 200 201 #ifdef CONFIG_HIGHMEM 202 int restore_highmem(void); 203 #else 204 static inline unsigned int count_highmem_pages(void) { return 0; } 205 static inline int restore_highmem(void) { return 0; } 206 #endif 207 208 /* 209 * Suspend test levels 210 */ 211 enum { 212 /* keep first */ 213 TEST_NONE, 214 TEST_CORE, 215 TEST_CPUS, 216 TEST_PLATFORM, 217 TEST_DEVICES, 218 TEST_FREEZER, 219 /* keep last */ 220 __TEST_AFTER_LAST 221 }; 222 223 #define TEST_FIRST TEST_NONE 224 #define TEST_MAX (__TEST_AFTER_LAST - 1) 225 226 extern int pm_test_level; 227 228 #ifdef CONFIG_SUSPEND_FREEZER 229 static inline int suspend_freeze_processes(void) 230 { 231 return freeze_processes(); 232 } 233 234 static inline void suspend_thaw_processes(void) 235 { 236 thaw_processes(); 237 } 238 #else 239 static inline int suspend_freeze_processes(void) 240 { 241 return 0; 242 } 243 244 static inline void suspend_thaw_processes(void) 245 { 246 } 247 #endif 248