vserver 2.0 rc7
[linux-2.6.git] / kernel / power / swsusp.c
1 /*
2  * linux/kernel/power/swsusp.c
3  *
4  * This file is to realize architecture-independent
5  * machine suspend feature using pretty near only high-level routines
6  *
7  * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
8  * Copyright (C) 1998,2001-2004 Pavel Machek <pavel@suse.cz>
9  *
10  * This file is released under the GPLv2.
11  *
12  * I'd like to thank the following people for their work:
13  * 
14  * Pavel Machek <pavel@ucw.cz>:
15  * Modifications, defectiveness pointing, being with me at the very beginning,
16  * suspend to swap space, stop all tasks. Port to 2.4.18-ac and 2.5.17.
17  *
18  * Steve Doddi <dirk@loth.demon.co.uk>: 
19  * Support the possibility of hardware state restoring.
20  *
21  * Raph <grey.havens@earthling.net>:
22  * Support for preserving states of network devices and virtual console
23  * (including X and svgatextmode)
24  *
25  * Kurt Garloff <garloff@suse.de>:
26  * Straightened the critical function in order to prevent compilers from
27  * playing tricks with local variables.
28  *
29  * Andreas Mohr <a.mohr@mailto.de>
30  *
31  * Alex Badea <vampire@go.ro>:
32  * Fixed runaway init
33  *
34  * More state savers are welcome. Especially for the scsi layer...
35  *
36  * For TODOs,FIXMEs also look in Documentation/power/swsusp.txt
37  */
38
39 #include <linux/module.h>
40 #include <linux/mm.h>
41 #include <linux/suspend.h>
42 #include <linux/smp_lock.h>
43 #include <linux/file.h>
44 #include <linux/utsname.h>
45 #include <linux/version.h>
46 #include <linux/delay.h>
47 #include <linux/reboot.h>
48 #include <linux/bitops.h>
49 #include <linux/vt_kern.h>
50 #include <linux/kbd_kern.h>
51 #include <linux/keyboard.h>
52 #include <linux/spinlock.h>
53 #include <linux/genhd.h>
54 #include <linux/kernel.h>
55 #include <linux/major.h>
56 #include <linux/swap.h>
57 #include <linux/pm.h>
58 #include <linux/device.h>
59 #include <linux/buffer_head.h>
60 #include <linux/swapops.h>
61 #include <linux/bootmem.h>
62 #include <linux/syscalls.h>
63 #include <linux/console.h>
64 #include <linux/highmem.h>
65 #include <linux/bio.h>
66
67 #include <asm/uaccess.h>
68 #include <asm/mmu_context.h>
69 #include <asm/pgtable.h>
70 #include <asm/tlbflush.h>
71 #include <asm/io.h>
72
73 #include "power.h"
74
75 /* References to section boundaries */
76 extern const void __nosave_begin, __nosave_end;
77
78 /* Variables to be preserved over suspend */
79 static int nr_copy_pages_check;
80
81 extern char resume_file[];
82
83 /* Local variables that should not be affected by save */
84 unsigned int nr_copy_pages __nosavedata = 0;
85
86 /* Suspend pagedir is allocated before final copy, therefore it
87    must be freed after resume 
88
89    Warning: this is evil. There are actually two pagedirs at time of
90    resume. One is "pagedir_save", which is empty frame allocated at
91    time of suspend, that must be freed. Second is "pagedir_nosave", 
92    allocated at time of resume, that travels through memory not to
93    collide with anything.
94
95    Warning: this is even more evil than it seems. Pagedirs this file
96    talks about are completely different from page directories used by
97    MMU hardware.
98  */
99 suspend_pagedir_t *pagedir_nosave __nosavedata = NULL;
100 static suspend_pagedir_t *pagedir_save;
101
102 #define SWSUSP_SIG      "S1SUSPEND"
103
104 static struct swsusp_header {
105         char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)];
106         swp_entry_t swsusp_info;
107         char    orig_sig[10];
108         char    sig[10];
109 } __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header;
110
111 static struct swsusp_info swsusp_info;
112
113 /*
114  * XXX: We try to keep some more pages free so that I/O operations succeed
115  * without paging. Might this be more?
116  */
117 #define PAGES_FOR_IO    512
118
119 /*
120  * Saving part...
121  */
122
123 /* We memorize in swapfile_used what swap devices are used for suspension */
124 #define SWAPFILE_UNUSED    0
125 #define SWAPFILE_SUSPEND   1    /* This is the suspending device */
126 #define SWAPFILE_IGNORED   2    /* Those are other swap devices ignored for suspension */
127
128 static unsigned short swapfile_used[MAX_SWAPFILES];
129 static unsigned short root_swap;
130
131 static int mark_swapfiles(swp_entry_t prev)
132 {
133         int error;
134
135         rw_swap_page_sync(READ, 
136                           swp_entry(root_swap, 0),
137                           virt_to_page((unsigned long)&swsusp_header));
138         if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) ||
139             !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) {
140                 memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10);
141                 memcpy(swsusp_header.sig,SWSUSP_SIG, 10);
142                 swsusp_header.swsusp_info = prev;
143                 error = rw_swap_page_sync(WRITE, 
144                                           swp_entry(root_swap, 0),
145                                           virt_to_page((unsigned long)
146                                                        &swsusp_header));
147         } else {
148                 pr_debug("swsusp: Partition is not swap space.\n");
149                 error = -ENODEV;
150         }
151         return error;
152 }
153
154 /*
155  * Check whether the swap device is the specified resume
156  * device, irrespective of whether they are specified by
157  * identical names.
158  *
159  * (Thus, device inode aliasing is allowed.  You can say /dev/hda4
160  * instead of /dev/ide/host0/bus0/target0/lun0/part4 [if using devfs]
161  * and they'll be considered the same device.  This is *necessary* for
162  * devfs, since the resume code can only recognize the form /dev/hda4,
163  * but the suspend code would see the long name.)
164  */
165 static int is_resume_device(const struct swap_info_struct *swap_info)
166 {
167         struct file *file = swap_info->swap_file;
168         struct inode *inode = file->f_dentry->d_inode;
169
170         return S_ISBLK(inode->i_mode) &&
171                 swsusp_resume_device == MKDEV(imajor(inode), iminor(inode));
172 }
173
174 static int swsusp_swap_check(void) /* This is called before saving image */
175 {
176         int i, len;
177         
178         len=strlen(resume_file);
179         root_swap = 0xFFFF;
180         
181         swap_list_lock();
182         for(i=0; i<MAX_SWAPFILES; i++) {
183                 if (swap_info[i].flags == 0) {
184                         swapfile_used[i]=SWAPFILE_UNUSED;
185                 } else {
186                         if(!len) {
187                                 printk(KERN_WARNING "resume= option should be used to set suspend device" );
188                                 if(root_swap == 0xFFFF) {
189                                         swapfile_used[i] = SWAPFILE_SUSPEND;
190                                         root_swap = i;
191                                 } else
192                                         swapfile_used[i] = SWAPFILE_IGNORED;                              
193                         } else {
194                                 /* we ignore all swap devices that are not the resume_file */
195                                 if (is_resume_device(&swap_info[i])) {
196                                         swapfile_used[i] = SWAPFILE_SUSPEND;
197                                         root_swap = i;
198                                 } else {
199                                         swapfile_used[i] = SWAPFILE_IGNORED;
200                                 }
201                         }
202                 }
203         }
204         swap_list_unlock();
205         return (root_swap != 0xffff) ? 0 : -ENODEV;
206 }
207
208 /**
209  * This is called after saving image so modification
210  * will be lost after resume... and that's what we want.
211  * we make the device unusable. A new call to
212  * lock_swapdevices can unlock the devices. 
213  */
214 static void lock_swapdevices(void)
215 {
216         int i;
217
218         swap_list_lock();
219         for(i = 0; i< MAX_SWAPFILES; i++)
220                 if(swapfile_used[i] == SWAPFILE_IGNORED) {
221                         swap_info[i].flags ^= 0xFF;
222                 }
223         swap_list_unlock();
224 }
225
226 /**
227  *      write_swap_page - Write one page to a fresh swap location.
228  *      @addr:  Address we're writing.
229  *      @loc:   Place to store the entry we used.
230  *
231  *      Allocate a new swap entry and 'sync' it. Note we discard -EIO
232  *      errors. That is an artifact left over from swsusp. It did not 
233  *      check the return of rw_swap_page_sync() at all, since most pages
234  *      written back to swap would return -EIO.
235  *      This is a partial improvement, since we will at least return other
236  *      errors, though we need to eventually fix the damn code.
237  */
238 static int write_page(unsigned long addr, swp_entry_t * loc)
239 {
240         swp_entry_t entry;
241         int error = 0;
242
243         entry = get_swap_page();
244         if (swp_offset(entry) && 
245             swapfile_used[swp_type(entry)] == SWAPFILE_SUSPEND) {
246                 error = rw_swap_page_sync(WRITE, entry,
247                                           virt_to_page(addr));
248                 if (error == -EIO)
249                         error = 0;
250                 if (!error)
251                         *loc = entry;
252         } else
253                 error = -ENOSPC;
254         return error;
255 }
256
257 /**
258  *      data_free - Free the swap entries used by the saved image.
259  *
260  *      Walk the list of used swap entries and free each one. 
261  *      This is only used for cleanup when suspend fails.
262  */
263 static void data_free(void)
264 {
265         swp_entry_t entry;
266         int i;
267
268         for (i = 0; i < nr_copy_pages; i++) {
269                 entry = (pagedir_nosave + i)->swap_address;
270                 if (entry.val)
271                         swap_free(entry);
272                 else
273                         break;
274                 (pagedir_nosave + i)->swap_address = (swp_entry_t){0};
275         }
276 }
277
278 /**
279  *      data_write - Write saved image to swap.
280  *
281  *      Walk the list of pages in the image and sync each one to swap.
282  */
283 static int data_write(void)
284 {
285         int error = 0, i = 0;
286         unsigned int mod = nr_copy_pages / 100;
287         struct pbe *p;
288
289         if (!mod)
290                 mod = 1;
291
292         printk( "Writing data to swap (%d pages)...     ", nr_copy_pages );
293         for_each_pbe(p, pagedir_nosave) {
294                 if (!(i%mod))
295                         printk( "\b\b\b\b%3d%%", i / mod );
296                 if ((error = write_page(p->address, &(p->swap_address))))
297                         return error;
298                 i++;
299         }
300         printk("\b\b\b\bdone\n");
301         return error;
302 }
303
304 static void dump_info(void)
305 {
306         pr_debug(" swsusp: Version: %u\n",swsusp_info.version_code);
307         pr_debug(" swsusp: Num Pages: %ld\n",swsusp_info.num_physpages);
308         pr_debug(" swsusp: UTS Sys: %s\n",swsusp_info.uts.sysname);
309         pr_debug(" swsusp: UTS Node: %s\n",swsusp_info.uts.nodename);
310         pr_debug(" swsusp: UTS Release: %s\n",swsusp_info.uts.release);
311         pr_debug(" swsusp: UTS Version: %s\n",swsusp_info.uts.version);
312         pr_debug(" swsusp: UTS Machine: %s\n",swsusp_info.uts.machine);
313         pr_debug(" swsusp: UTS Domain: %s\n",swsusp_info.uts.domainname);
314         pr_debug(" swsusp: CPUs: %d\n",swsusp_info.cpus);
315         pr_debug(" swsusp: Image: %ld Pages\n",swsusp_info.image_pages);
316         pr_debug(" swsusp: Pagedir: %ld Pages\n",swsusp_info.pagedir_pages);
317 }
318
319 static void init_header(void)
320 {
321         memset(&swsusp_info, 0, sizeof(swsusp_info));
322         swsusp_info.version_code = LINUX_VERSION_CODE;
323         swsusp_info.num_physpages = num_physpages;
324         memcpy(&swsusp_info.uts, &system_utsname, sizeof(system_utsname));
325
326         swsusp_info.suspend_pagedir = pagedir_nosave;
327         swsusp_info.cpus = num_online_cpus();
328         swsusp_info.image_pages = nr_copy_pages;
329 }
330
331 static int close_swap(void)
332 {
333         swp_entry_t entry;
334         int error;
335
336         dump_info();
337         error = write_page((unsigned long)&swsusp_info, &entry);
338         if (!error) { 
339                 printk( "S" );
340                 error = mark_swapfiles(entry);
341                 printk( "|\n" );
342         }
343         return error;
344 }
345
346 /**
347  *      free_pagedir_entries - Free pages used by the page directory.
348  *
349  *      This is used during suspend for error recovery.
350  */
351
352 static void free_pagedir_entries(void)
353 {
354         int i;
355
356         for (i = 0; i < swsusp_info.pagedir_pages; i++)
357                 swap_free(swsusp_info.pagedir[i]);
358 }
359
360
361 /**
362  *      write_pagedir - Write the array of pages holding the page directory.
363  *      @last:  Last swap entry we write (needed for header).
364  */
365
366 static int write_pagedir(void)
367 {
368         int error = 0;
369         unsigned n = 0;
370         struct pbe * pbe;
371
372         printk( "Writing pagedir...");
373         for_each_pb_page(pbe, pagedir_nosave) {
374                 if ((error = write_page((unsigned long)pbe, &swsusp_info.pagedir[n++])))
375                         return error;
376         }
377
378         swsusp_info.pagedir_pages = n;
379         printk("done (%u pages)\n", n);
380         return error;
381 }
382
383 /**
384  *      write_suspend_image - Write entire image and metadata.
385  *
386  */
387
388 static int write_suspend_image(void)
389 {
390         int error;
391
392         init_header();
393         if ((error = data_write()))
394                 goto FreeData;
395
396         if ((error = write_pagedir()))
397                 goto FreePagedir;
398
399         if ((error = close_swap()))
400                 goto FreePagedir;
401  Done:
402         return error;
403  FreePagedir:
404         free_pagedir_entries();
405  FreeData:
406         data_free();
407         goto Done;
408 }
409
410
411 #ifdef CONFIG_HIGHMEM
412 struct highmem_page {
413         char *data;
414         struct page *page;
415         struct highmem_page *next;
416 };
417
418 static struct highmem_page *highmem_copy;
419
420 static int save_highmem_zone(struct zone *zone)
421 {
422         unsigned long zone_pfn;
423         mark_free_pages(zone);
424         for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
425                 struct page *page;
426                 struct highmem_page *save;
427                 void *kaddr;
428                 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
429
430                 if (!(pfn%1000))
431                         printk(".");
432                 if (!pfn_valid(pfn))
433                         continue;
434                 page = pfn_to_page(pfn);
435                 /*
436                  * This condition results from rvmalloc() sans vmalloc_32()
437                  * and architectural memory reservations. This should be
438                  * corrected eventually when the cases giving rise to this
439                  * are better understood.
440                  */
441                 if (PageReserved(page)) {
442                         printk("highmem reserved page?!\n");
443                         continue;
444                 }
445                 BUG_ON(PageNosave(page));
446                 if (PageNosaveFree(page))
447                         continue;
448                 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
449                 if (!save)
450                         return -ENOMEM;
451                 save->next = highmem_copy;
452                 save->page = page;
453                 save->data = (void *) get_zeroed_page(GFP_ATOMIC);
454                 if (!save->data) {
455                         kfree(save);
456                         return -ENOMEM;
457                 }
458                 kaddr = kmap_atomic(page, KM_USER0);
459                 memcpy(save->data, kaddr, PAGE_SIZE);
460                 kunmap_atomic(kaddr, KM_USER0);
461                 highmem_copy = save;
462         }
463         return 0;
464 }
465 #endif /* CONFIG_HIGHMEM */
466
467
468 static int save_highmem(void)
469 {
470 #ifdef CONFIG_HIGHMEM
471         struct zone *zone;
472         int res = 0;
473
474         pr_debug("swsusp: Saving Highmem\n");
475         for_each_zone(zone) {
476                 if (is_highmem(zone))
477                         res = save_highmem_zone(zone);
478                 if (res)
479                         return res;
480         }
481 #endif
482         return 0;
483 }
484
485 static int restore_highmem(void)
486 {
487 #ifdef CONFIG_HIGHMEM
488         printk("swsusp: Restoring Highmem\n");
489         while (highmem_copy) {
490                 struct highmem_page *save = highmem_copy;
491                 void *kaddr;
492                 highmem_copy = save->next;
493
494                 kaddr = kmap_atomic(save->page, KM_USER0);
495                 memcpy(kaddr, save->data, PAGE_SIZE);
496                 kunmap_atomic(kaddr, KM_USER0);
497                 free_page((long) save->data);
498                 kfree(save);
499         }
500 #endif
501         return 0;
502 }
503
504
505 static int pfn_is_nosave(unsigned long pfn)
506 {
507         unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
508         unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
509         return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
510 }
511
512 /**
513  *      saveable - Determine whether a page should be cloned or not.
514  *      @pfn:   The page
515  *
516  *      We save a page if it's Reserved, and not in the range of pages
517  *      statically defined as 'unsaveable', or if it isn't reserved, and
518  *      isn't part of a free chunk of pages.
519  */
520
521 static int saveable(struct zone * zone, unsigned long * zone_pfn)
522 {
523         unsigned long pfn = *zone_pfn + zone->zone_start_pfn;
524         struct page * page;
525
526         if (!pfn_valid(pfn))
527                 return 0;
528
529         page = pfn_to_page(pfn);
530         BUG_ON(PageReserved(page) && PageNosave(page));
531         if (PageNosave(page))
532                 return 0;
533         if (PageReserved(page) && pfn_is_nosave(pfn)) {
534                 pr_debug("[nosave pfn 0x%lx]", pfn);
535                 return 0;
536         }
537         if (PageNosaveFree(page))
538                 return 0;
539
540         return 1;
541 }
542
543 static void count_data_pages(void)
544 {
545         struct zone *zone;
546         unsigned long zone_pfn;
547
548         nr_copy_pages = 0;
549
550         for_each_zone(zone) {
551                 if (is_highmem(zone))
552                         continue;
553                 mark_free_pages(zone);
554                 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
555                         nr_copy_pages += saveable(zone, &zone_pfn);
556         }
557 }
558
559
560 static void copy_data_pages(void)
561 {
562         struct zone *zone;
563         unsigned long zone_pfn;
564         struct pbe * pbe = pagedir_nosave;
565         
566         pr_debug("copy_data_pages(): pages to copy: %d\n", nr_copy_pages);
567         for_each_zone(zone) {
568                 if (is_highmem(zone))
569                         continue;
570                 mark_free_pages(zone);
571                 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
572                         if (saveable(zone, &zone_pfn)) {
573                                 struct page * page;
574                                 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
575                                 BUG_ON(!pbe);
576                                 pbe->orig_address = (long) page_address(page);
577                                 /* copy_page is not usable for copying task structs. */
578                                 memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE);
579                                 pbe = pbe->next;
580                         }
581                 }
582         }
583         BUG_ON(pbe);
584 }
585
586
587 /**
588  *      calc_nr - Determine the number of pages needed for a pbe list.
589  */
590
591 static int calc_nr(int nr_copy)
592 {
593         int extra = 0;
594         int mod = !!(nr_copy % PBES_PER_PAGE);
595         int diff = (nr_copy / PBES_PER_PAGE) + mod;
596
597         do {
598                 extra += diff;
599                 nr_copy += diff;
600                 mod = !!(nr_copy % PBES_PER_PAGE);
601                 diff = (nr_copy / PBES_PER_PAGE) + mod - extra;
602         } while (diff > 0);
603
604         return nr_copy;
605 }
606
607 /**
608  *      free_pagedir - free pages allocated with alloc_pagedir()
609  */
610
611 static inline void free_pagedir(struct pbe *pblist)
612 {
613         struct pbe *pbe;
614
615         while (pblist) {
616                 pbe = (pblist + PB_PAGE_SKIP)->next;
617                 free_page((unsigned long)pblist);
618                 pblist = pbe;
619         }
620 }
621
622 /**
623  *      fill_pb_page - Create a list of PBEs on a given memory page
624  */
625
626 static inline void fill_pb_page(struct pbe *pbpage)
627 {
628         struct pbe *p;
629
630         p = pbpage;
631         pbpage += PB_PAGE_SKIP;
632         do
633                 p->next = p + 1;
634         while (++p < pbpage);
635 }
636
637 /**
638  *      create_pbe_list - Create a list of PBEs on top of a given chain
639  *      of memory pages allocated with alloc_pagedir()
640  */
641
642 static void create_pbe_list(struct pbe *pblist, unsigned nr_pages)
643 {
644         struct pbe *pbpage, *p;
645         unsigned num = PBES_PER_PAGE;
646
647         for_each_pb_page (pbpage, pblist) {
648                 if (num >= nr_pages)
649                         break;
650
651                 fill_pb_page(pbpage);
652                 num += PBES_PER_PAGE;
653         }
654         if (pbpage) {
655                 for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++)
656                         p->next = p + 1;
657                 p->next = NULL;
658         }
659         pr_debug("create_pbe_list(): initialized %d PBEs\n", num);
660 }
661
662 /**
663  *      alloc_pagedir - Allocate the page directory.
664  *
665  *      First, determine exactly how many pages we need and
666  *      allocate them.
667  *
668  *      We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
669  *      struct pbe elements (pbes) and the last element in the page points
670  *      to the next page.
671  *
672  *      On each page we set up a list of struct_pbe elements.
673  */
674
675 static struct pbe * alloc_pagedir(unsigned nr_pages)
676 {
677         unsigned num;
678         struct pbe *pblist, *pbe;
679
680         if (!nr_pages)
681                 return NULL;
682
683         pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages);
684         pblist = (struct pbe *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
685         for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
686                         pbe = pbe->next, num += PBES_PER_PAGE) {
687                 pbe += PB_PAGE_SKIP;
688                 pbe->next = (struct pbe *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
689         }
690         if (!pbe) { /* get_zeroed_page() failed */
691                 free_pagedir(pblist);
692                 pblist = NULL;
693         }
694         return pblist;
695 }
696
697 /**
698  *      free_image_pages - Free pages allocated for snapshot
699  */
700
701 static void free_image_pages(void)
702 {
703         struct pbe * p;
704
705         for_each_pbe(p, pagedir_save) {
706                 if (p->address) {
707                         ClearPageNosave(virt_to_page(p->address));
708                         free_page(p->address);
709                         p->address = 0;
710                 }
711         }
712 }
713
714 /**
715  *      alloc_image_pages - Allocate pages for the snapshot.
716  */
717
718 static int alloc_image_pages(void)
719 {
720         struct pbe * p;
721
722         for_each_pbe(p, pagedir_save) {
723                 p->address = get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
724                 if (!p->address)
725                         return -ENOMEM;
726                 SetPageNosave(virt_to_page(p->address));
727         }
728         return 0;
729 }
730
731 void swsusp_free(void)
732 {
733         BUG_ON(PageNosave(virt_to_page(pagedir_save)));
734         BUG_ON(PageNosaveFree(virt_to_page(pagedir_save)));
735         free_image_pages();
736         free_pagedir(pagedir_save);
737 }
738
739
740 /**
741  *      enough_free_mem - Make sure we enough free memory to snapshot.
742  *
743  *      Returns TRUE or FALSE after checking the number of available 
744  *      free pages.
745  */
746
747 static int enough_free_mem(void)
748 {
749         if (nr_free_pages() < (nr_copy_pages + PAGES_FOR_IO)) {
750                 pr_debug("swsusp: Not enough free pages: Have %d\n",
751                          nr_free_pages());
752                 return 0;
753         }
754         return 1;
755 }
756
757
758 /**
759  *      enough_swap - Make sure we have enough swap to save the image.
760  *
761  *      Returns TRUE or FALSE after checking the total amount of swap 
762  *      space avaiable.
763  *
764  *      FIXME: si_swapinfo(&i) returns all swap devices information.
765  *      We should only consider resume_device. 
766  */
767
768 static int enough_swap(void)
769 {
770         struct sysinfo i;
771
772         si_swapinfo(&i);
773         if (i.freeswap < (nr_copy_pages + PAGES_FOR_IO))  {
774                 pr_debug("swsusp: Not enough swap. Need %ld\n",i.freeswap);
775                 return 0;
776         }
777         return 1;
778 }
779
780 static int swsusp_alloc(void)
781 {
782         int error;
783
784         pr_debug("suspend: (pages needed: %d + %d free: %d)\n",
785                  nr_copy_pages, PAGES_FOR_IO, nr_free_pages());
786
787         pagedir_nosave = NULL;
788         if (!enough_free_mem())
789                 return -ENOMEM;
790
791         if (!enough_swap())
792                 return -ENOSPC;
793
794         nr_copy_pages = calc_nr(nr_copy_pages);
795
796         if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) {
797                 printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
798                 return -ENOMEM;
799         }
800         create_pbe_list(pagedir_save, nr_copy_pages);
801         pagedir_nosave = pagedir_save;
802         if ((error = alloc_image_pages())) {
803                 printk(KERN_ERR "suspend: Allocating image pages failed.\n");
804                 swsusp_free();
805                 return error;
806         }
807
808         nr_copy_pages_check = nr_copy_pages;
809         return 0;
810 }
811
812 static int suspend_prepare_image(void)
813 {
814         int error;
815
816         pr_debug("swsusp: critical section: \n");
817         if (save_highmem()) {
818                 printk(KERN_CRIT "Suspend machine: Not enough free pages for highmem\n");
819                 restore_highmem();
820                 return -ENOMEM;
821         }
822
823         drain_local_pages();
824         count_data_pages();
825         printk("swsusp: Need to copy %u pages\n", nr_copy_pages);
826
827         error = swsusp_alloc();
828         if (error)
829                 return error;
830         
831         /* During allocating of suspend pagedir, new cold pages may appear. 
832          * Kill them.
833          */
834         drain_local_pages();
835         copy_data_pages();
836
837         /*
838          * End of critical section. From now on, we can write to memory,
839          * but we should not touch disk. This specially means we must _not_
840          * touch swap space! Except we must write out our image of course.
841          */
842
843         printk("swsusp: critical section/: done (%d pages copied)\n", nr_copy_pages );
844         return 0;
845 }
846
847
848 /* It is important _NOT_ to umount filesystems at this point. We want
849  * them synced (in case something goes wrong) but we DO not want to mark
850  * filesystem clean: it is not. (And it does not matter, if we resume
851  * correctly, we'll mark system clean, anyway.)
852  */
853 int swsusp_write(void)
854 {
855         int error;
856         device_resume();
857         lock_swapdevices();
858         error = write_suspend_image();
859         /* This will unlock ignored swap devices since writing is finished */
860         lock_swapdevices();
861         return error;
862
863 }
864
865
866 extern asmlinkage int swsusp_arch_suspend(void);
867 extern asmlinkage int swsusp_arch_resume(void);
868
869
870 asmlinkage int swsusp_save(void)
871 {
872         int error = 0;
873
874         if ((error = swsusp_swap_check())) {
875                 printk(KERN_ERR "swsusp: FATAL: cannot find swap device, try "
876                                 "swapon -a!\n");
877                 return error;
878         }
879         return suspend_prepare_image();
880 }
881
882 int swsusp_suspend(void)
883 {
884         int error;
885         if ((error = arch_prepare_suspend()))
886                 return error;
887         local_irq_disable();
888         /* At this point, device_suspend() has been called, but *not*
889          * device_power_down(). We *must* device_power_down() now.
890          * Otherwise, drivers for some devices (e.g. interrupt controllers)
891          * become desynchronized with the actual state of the hardware
892          * at resume time, and evil weirdness ensues.
893          */
894         if ((error = device_power_down(PMSG_FREEZE))) {
895                 printk(KERN_ERR "Some devices failed to power down, aborting suspend\n");
896                 local_irq_enable();
897                 swsusp_free();
898                 return error;
899         }
900         save_processor_state();
901         if ((error = swsusp_arch_suspend()))
902                 swsusp_free();
903         /* Restore control flow magically appears here */
904         restore_processor_state();
905         BUG_ON (nr_copy_pages_check != nr_copy_pages);
906         restore_highmem();
907         device_power_up();
908         local_irq_enable();
909         return error;
910 }
911
912 int swsusp_resume(void)
913 {
914         int error;
915         local_irq_disable();
916         if (device_power_down(PMSG_FREEZE))
917                 printk(KERN_ERR "Some devices failed to power down, very bad\n");
918         /* We'll ignore saved state, but this gets preempt count (etc) right */
919         save_processor_state();
920         error = swsusp_arch_resume();
921         /* Code below is only ever reached in case of failure. Otherwise
922          * execution continues at place where swsusp_arch_suspend was called
923          */
924         BUG_ON(!error);
925         restore_processor_state();
926         restore_highmem();
927         device_power_up();
928         local_irq_enable();
929         return error;
930 }
931
932 /* More restore stuff */
933
934 /*
935  * Returns true if given address/order collides with any orig_address 
936  */
937 static int does_collide_order(unsigned long addr, int order)
938 {
939         int i;
940         
941         for (i=0; i < (1<<order); i++)
942                 if (!PageNosaveFree(virt_to_page(addr + i * PAGE_SIZE)))
943                         return 1;
944         return 0;
945 }
946
947 /**
948  *      On resume, for storing the PBE list and the image,
949  *      we can only use memory pages that do not conflict with the pages
950  *      which had been used before suspend.
951  *
952  *      We don't know which pages are usable until we allocate them.
953  *
954  *      Allocated but unusable (ie eaten) memory pages are linked together
955  *      to create a list, so that we can free them easily
956  *
957  *      We could have used a type other than (void *)
958  *      for this purpose, but ...
959  */
960 static void **eaten_memory = NULL;
961
962 static inline void eat_page(void *page)
963 {
964         void **c;
965
966         c = eaten_memory;
967         eaten_memory = page;
968         *eaten_memory = c;
969 }
970
971 static unsigned long get_usable_page(unsigned gfp_mask)
972 {
973         unsigned long m;
974
975         m = get_zeroed_page(gfp_mask);
976         while (does_collide_order(m, 0)) {
977                 eat_page((void *)m);
978                 m = get_zeroed_page(gfp_mask);
979                 if (!m)
980                         break;
981         }
982         return m;
983 }
984
985 static void free_eaten_memory(void)
986 {
987         unsigned long m;
988         void **c;
989         int i = 0;
990
991         c = eaten_memory;
992         while (c) {
993                 m = (unsigned long)c;
994                 c = *c;
995                 free_page(m);
996                 i++;
997         }
998         eaten_memory = NULL;
999         pr_debug("swsusp: %d unused pages freed\n", i);
1000 }
1001
1002 /**
1003  *      check_pagedir - We ensure here that pages that the PBEs point to
1004  *      won't collide with pages where we're going to restore from the loaded
1005  *      pages later
1006  */
1007
1008 static int check_pagedir(struct pbe *pblist)
1009 {
1010         struct pbe *p;
1011
1012         /* This is necessary, so that we can free allocated pages
1013          * in case of failure
1014          */
1015         for_each_pbe (p, pblist)
1016                 p->address = 0UL;
1017
1018         for_each_pbe (p, pblist) {
1019                 p->address = get_usable_page(GFP_ATOMIC);
1020                 if (!p->address)
1021                         return -ENOMEM;
1022         }
1023         return 0;
1024 }
1025
1026 /**
1027  *      swsusp_pagedir_relocate - It is possible, that some memory pages
1028  *      occupied by the list of PBEs collide with pages where we're going to
1029  *      restore from the loaded pages later.  We relocate them here.
1030  */
1031
1032 static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
1033 {
1034         struct zone *zone;
1035         unsigned long zone_pfn;
1036         struct pbe *pbpage, *tail, *p;
1037         void *m;
1038         int rel = 0, error = 0;
1039
1040         if (!pblist) /* a sanity check */
1041                 return NULL;
1042
1043         pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n",
1044                         swsusp_info.pagedir_pages);
1045
1046         /* Set page flags */
1047
1048         for_each_zone(zone) {
1049                 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
1050                         SetPageNosaveFree(pfn_to_page(zone_pfn +
1051                                         zone->zone_start_pfn));
1052         }
1053
1054         /* Clear orig addresses */
1055
1056         for_each_pbe (p, pblist)
1057                 ClearPageNosaveFree(virt_to_page(p->orig_address));
1058
1059         tail = pblist + PB_PAGE_SKIP;
1060
1061         /* Relocate colliding pages */
1062
1063         for_each_pb_page (pbpage, pblist) {
1064                 if (does_collide_order((unsigned long)pbpage, 0)) {
1065                         m = (void *)get_usable_page(GFP_ATOMIC | __GFP_COLD);
1066                         if (!m) {
1067                                 error = -ENOMEM;
1068                                 break;
1069                         }
1070                         memcpy(m, (void *)pbpage, PAGE_SIZE);
1071                         if (pbpage == pblist)
1072                                 pblist = (struct pbe *)m;
1073                         else
1074                                 tail->next = (struct pbe *)m;
1075
1076                         eat_page((void *)pbpage);
1077                         pbpage = (struct pbe *)m;
1078
1079                         /* We have to link the PBEs again */
1080
1081                         for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++)
1082                                 if (p->next) /* needed to save the end */
1083                                         p->next = p + 1;
1084
1085                         rel++;
1086                 }
1087                 tail = pbpage + PB_PAGE_SKIP;
1088         }
1089
1090         if (error) {
1091                 printk("\nswsusp: Out of memory\n\n");
1092                 free_pagedir(pblist);
1093                 free_eaten_memory();
1094                 pblist = NULL;
1095         }
1096         else
1097                 printk("swsusp: Relocated %d pages\n", rel);
1098
1099         return pblist;
1100 }
1101
1102 /*
1103  *      Using bio to read from swap.
1104  *      This code requires a bit more work than just using buffer heads
1105  *      but, it is the recommended way for 2.5/2.6.
1106  *      The following are to signal the beginning and end of I/O. Bios
1107  *      finish asynchronously, while we want them to happen synchronously.
1108  *      A simple atomic_t, and a wait loop take care of this problem.
1109  */
1110
1111 static atomic_t io_done = ATOMIC_INIT(0);
1112
1113 static int end_io(struct bio * bio, unsigned int num, int err)
1114 {
1115         if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1116                 panic("I/O error reading memory image");
1117         atomic_set(&io_done, 0);
1118         return 0;
1119 }
1120
1121 static struct block_device * resume_bdev;
1122
1123 /**
1124  *      submit - submit BIO request.
1125  *      @rw:    READ or WRITE.
1126  *      @off    physical offset of page.
1127  *      @page:  page we're reading or writing.
1128  *
1129  *      Straight from the textbook - allocate and initialize the bio.
1130  *      If we're writing, make sure the page is marked as dirty.
1131  *      Then submit it and wait.
1132  */
1133
1134 static int submit(int rw, pgoff_t page_off, void * page)
1135 {
1136         int error = 0;
1137         struct bio * bio;
1138
1139         bio = bio_alloc(GFP_ATOMIC, 1);
1140         if (!bio)
1141                 return -ENOMEM;
1142         bio->bi_sector = page_off * (PAGE_SIZE >> 9);
1143         bio_get(bio);
1144         bio->bi_bdev = resume_bdev;
1145         bio->bi_end_io = end_io;
1146
1147         if (bio_add_page(bio, virt_to_page(page), PAGE_SIZE, 0) < PAGE_SIZE) {
1148                 printk("swsusp: ERROR: adding page to bio at %ld\n",page_off);
1149                 error = -EFAULT;
1150                 goto Done;
1151         }
1152
1153         if (rw == WRITE)
1154                 bio_set_pages_dirty(bio);
1155
1156         atomic_set(&io_done, 1);
1157         submit_bio(rw | (1 << BIO_RW_SYNC), bio);
1158         while (atomic_read(&io_done))
1159                 yield();
1160
1161  Done:
1162         bio_put(bio);
1163         return error;
1164 }
1165
1166 static int bio_read_page(pgoff_t page_off, void * page)
1167 {
1168         return submit(READ, page_off, page);
1169 }
1170
1171 static int bio_write_page(pgoff_t page_off, void * page)
1172 {
1173         return submit(WRITE, page_off, page);
1174 }
1175
1176 /*
1177  * Sanity check if this image makes sense with this kernel/swap context
1178  * I really don't think that it's foolproof but more than nothing..
1179  */
1180
1181 static const char * sanity_check(void)
1182 {
1183         dump_info();
1184         if(swsusp_info.version_code != LINUX_VERSION_CODE)
1185                 return "kernel version";
1186         if(swsusp_info.num_physpages != num_physpages)
1187                 return "memory size";
1188         if (strcmp(swsusp_info.uts.sysname,system_utsname.sysname))
1189                 return "system type";
1190         if (strcmp(swsusp_info.uts.release,system_utsname.release))
1191                 return "kernel release";
1192         if (strcmp(swsusp_info.uts.version,system_utsname.version))
1193                 return "version";
1194         if (strcmp(swsusp_info.uts.machine,system_utsname.machine))
1195                 return "machine";
1196         if(swsusp_info.cpus != num_online_cpus())
1197                 return "number of cpus";
1198         return NULL;
1199 }
1200
1201
1202 static int check_header(void)
1203 {
1204         const char * reason = NULL;
1205         int error;
1206
1207         if ((error = bio_read_page(swp_offset(swsusp_header.swsusp_info), &swsusp_info)))
1208                 return error;
1209
1210         /* Is this same machine? */
1211         if ((reason = sanity_check())) {
1212                 printk(KERN_ERR "swsusp: Resume mismatch: %s\n",reason);
1213                 return -EPERM;
1214         }
1215         nr_copy_pages = swsusp_info.image_pages;
1216         return error;
1217 }
1218
1219 static int check_sig(void)
1220 {
1221         int error;
1222
1223         memset(&swsusp_header, 0, sizeof(swsusp_header));
1224         if ((error = bio_read_page(0, &swsusp_header)))
1225                 return error;
1226         if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) {
1227                 memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10);
1228
1229                 /*
1230                  * Reset swap signature now.
1231                  */
1232                 error = bio_write_page(0, &swsusp_header);
1233         } else { 
1234                 printk(KERN_ERR "swsusp: Suspend partition has wrong signature?\n");
1235                 return -EINVAL;
1236         }
1237         if (!error)
1238                 pr_debug("swsusp: Signature found, resuming\n");
1239         return error;
1240 }
1241
1242 /**
1243  *      data_read - Read image pages from swap.
1244  *
1245  *      You do not need to check for overlaps, check_pagedir()
1246  *      already did that.
1247  */
1248
1249 static int data_read(struct pbe *pblist)
1250 {
1251         struct pbe * p;
1252         int error = 0;
1253         int i = 0;
1254         int mod = swsusp_info.image_pages / 100;
1255
1256         if (!mod)
1257                 mod = 1;
1258
1259         printk("swsusp: Reading image data (%lu pages):     ",
1260                         swsusp_info.image_pages);
1261
1262         for_each_pbe (p, pblist) {
1263                 if (!(i % mod))
1264                         printk("\b\b\b\b%3d%%", i / mod);
1265
1266                 error = bio_read_page(swp_offset(p->swap_address),
1267                                   (void *)p->address);
1268                 if (error)
1269                         return error;
1270
1271                 i++;
1272         }
1273         printk("\b\b\b\bdone\n");
1274         return error;
1275 }
1276
1277 extern dev_t name_to_dev_t(const char *line);
1278
1279 /**
1280  *      read_pagedir - Read page backup list pages from swap
1281  */
1282
1283 static int read_pagedir(struct pbe *pblist)
1284 {
1285         struct pbe *pbpage, *p;
1286         unsigned i = 0;
1287         int error;
1288
1289         if (!pblist)
1290                 return -EFAULT;
1291
1292         printk("swsusp: Reading pagedir (%lu pages)\n",
1293                         swsusp_info.pagedir_pages);
1294
1295         for_each_pb_page (pbpage, pblist) {
1296                 unsigned long offset = swp_offset(swsusp_info.pagedir[i++]);
1297
1298                 error = -EFAULT;
1299                 if (offset) {
1300                         p = (pbpage + PB_PAGE_SKIP)->next;
1301                         error = bio_read_page(offset, (void *)pbpage);
1302                         (pbpage + PB_PAGE_SKIP)->next = p;
1303                 }
1304                 if (error)
1305                         break;
1306         }
1307
1308         if (error)
1309                 free_page((unsigned long)pblist);
1310
1311         BUG_ON(i != swsusp_info.pagedir_pages);
1312
1313         return error;
1314 }
1315
1316
1317 static int check_suspend_image(void)
1318 {
1319         int error = 0;
1320
1321         if ((error = check_sig()))
1322                 return error;
1323
1324         if ((error = check_header()))
1325                 return error;
1326
1327         return 0;
1328 }
1329
1330 static int read_suspend_image(void)
1331 {
1332         int error = 0;
1333         struct pbe *p;
1334
1335         if (!(p = alloc_pagedir(nr_copy_pages)))
1336                 return -ENOMEM;
1337
1338         if ((error = read_pagedir(p)))
1339                 return error;
1340
1341         create_pbe_list(p, nr_copy_pages);
1342
1343         if (!(pagedir_nosave = swsusp_pagedir_relocate(p)))
1344                 return -ENOMEM;
1345
1346         /* Allocate memory for the image and read the data from swap */
1347
1348         error = check_pagedir(pagedir_nosave);
1349         free_eaten_memory();
1350         if (!error)
1351                 error = data_read(pagedir_nosave);
1352
1353         if (error) { /* We fail cleanly */
1354                 for_each_pbe (p, pagedir_nosave)
1355                         if (p->address) {
1356                                 free_page(p->address);
1357                                 p->address = 0UL;
1358                         }
1359                 free_pagedir(pagedir_nosave);
1360         }
1361         return error;
1362 }
1363
1364 /**
1365  *      swsusp_check - Check for saved image in swap
1366  */
1367
1368 int swsusp_check(void)
1369 {
1370         int error;
1371
1372         if (!swsusp_resume_device) {
1373                 if (!strlen(resume_file))
1374                         return -ENOENT;
1375                 swsusp_resume_device = name_to_dev_t(resume_file);
1376                 pr_debug("swsusp: Resume From Partition %s\n", resume_file);
1377         } else {
1378                 pr_debug("swsusp: Resume From Partition %d:%d\n",
1379                          MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
1380         }
1381
1382         resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
1383         if (!IS_ERR(resume_bdev)) {
1384                 set_blocksize(resume_bdev, PAGE_SIZE);
1385                 error = check_suspend_image();
1386                 if (error)
1387                     blkdev_put(resume_bdev);
1388         } else
1389                 error = PTR_ERR(resume_bdev);
1390
1391         if (!error)
1392                 pr_debug("swsusp: resume file found\n");
1393         else
1394                 pr_debug("swsusp: Error %d check for resume file\n", error);
1395         return error;
1396 }
1397
1398 /**
1399  *      swsusp_read - Read saved image from swap.
1400  */
1401
1402 int swsusp_read(void)
1403 {
1404         int error;
1405
1406         if (IS_ERR(resume_bdev)) {
1407                 pr_debug("swsusp: block device not initialised\n");
1408                 return PTR_ERR(resume_bdev);
1409         }
1410
1411         error = read_suspend_image();
1412         blkdev_put(resume_bdev);
1413
1414         if (!error)
1415                 pr_debug("swsusp: Reading resume file was successful\n");
1416         else
1417                 pr_debug("swsusp: Error %d resuming\n", error);
1418         return error;
1419 }
1420
1421 /**
1422  *      swsusp_close - close swap device.
1423  */
1424
1425 void swsusp_close(void)
1426 {
1427         if (IS_ERR(resume_bdev)) {
1428                 pr_debug("swsusp: block device not initialised\n");
1429                 return;
1430         }
1431
1432         blkdev_put(resume_bdev);
1433 }