--- linux-4.11.12/include/linux/swap.h.orig +++ linux-4.11.12/include/linux/swap.h @@ -413,6 +413,8 @@ extern bool reuse_swap_page(struct page *, int *); extern int try_to_free_swap(struct page *); struct backing_dev_info; +extern void get_swap_range_of_type(int type, swp_entry_t *start, + swp_entry_t *end, unsigned int limit); extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); extern void exit_swap_address_space(unsigned int type); --- linux-4.11.12/kernel/power/tuxonice_bio_core.c.orig +++ linux-4.11.12/kernel/power/tuxonice_bio_core.c @@ -1724,7 +1724,7 @@ O_RDONLY|O_LARGEFILE, 0); if (!IS_ERR(file) && file) { - vfs_getattr(&file->f_path, &stat); + vfs_getattr(&file->f_path, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); filp_close(file, NULL); } else error = vfs_stat(commandline, &stat); --- linux-4.11.12/kernel/power/tuxonice_io.c.orig +++ linux-4.11.12/kernel/power/tuxonice_io.c @@ -596,7 +596,7 @@ goto out; } - cpumask_copy(orig_mask, tsk_cpus_allowed(current)); + cpumask_copy(orig_mask, ¤t->cpus_allowed); current->flags |= PF_NOFREEZE; @@ -604,7 +604,7 @@ mutex_lock(&io_mutex); thread_num = atomic_read(&toi_io_workers); - cpumask_copy(tsk_cpus_allowed(current), orig_mask); + cpumask_copy(¤t->cpus_allowed, orig_mask); schedule(); atomic_inc(&toi_io_workers); --- linux-4.11.12/kernel/power/tuxonice_prepare_image.c.orig +++ linux-4.11.12/kernel/power/tuxonice_prepare_image.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include --- linux-4.11.12/mm/swapfile.c.orig +++ linux-4.11.12/mm/swapfile.c @@ -879,6 +879,60 @@ return (swp_entry_t) {0}; } +static unsigned int find_next_to_unuse(struct swap_info_struct *si, + unsigned int prev, bool frontswap); + +void get_swap_range_of_type(int type, swp_entry_t *start, swp_entry_t *end, + unsigned int limit) +{ + struct swap_info_struct *si; + pgoff_t start_at; + unsigned int i; + + *start = swp_entry(0, 0); + *end = swp_entry(0, 0); + si = swap_info[type]; + spin_lock(&si->lock); + if (si && (si->flags & SWP_WRITEOK)) { + atomic_long_dec(&nr_swap_pages); + /* This is called for allocating swap entry, not cache */ + start_at = scan_swap_map(si, 1); + if (start_at) { + unsigned long stop_at = find_next_to_unuse(si, start_at, 0); + if (stop_at > start_at) + stop_at--; + else + stop_at = si->max - 1; + if (stop_at - start_at + 1 > limit) + stop_at = min_t(unsigned int, + start_at + limit - 1, + si->max - 1); + /* Mark them used */ + for (i = start_at; i <= stop_at; i++) + si->swap_map[i] = 1; + /* first page already done above */ + si->inuse_pages += stop_at - start_at; + + atomic_long_sub(stop_at - start_at, &nr_swap_pages); + if (start_at == si->lowest_bit) + si->lowest_bit = stop_at + 1; + if (stop_at == si->highest_bit) + si->highest_bit = start_at - 1; + if (si->inuse_pages == si->pages) { + si->lowest_bit = si->max; + si->highest_bit = 0; + } + for (i = start_at + 1; i <= stop_at; i++) + inc_cluster_info_page(si, si->cluster_info, i); + si->cluster_next = stop_at + 1; + *start = swp_entry(type, start_at); + *end = swp_entry(type, stop_at); + } else + atomic_long_inc(&nr_swap_pages); + } + spin_unlock(&si->lock); +} + static struct swap_info_struct *__swap_info_get(swp_entry_t entry) { struct swap_info_struct *p; --- linux-4.11.12/mm/vmscan.c.orig +++ linux-4.11.12/mm/vmscan.c @@ -2819,6 +2819,12 @@ pg_data_t *last_pgdat; struct zoneref *z; struct zone *zone; + +#ifdef CONFIG_FREEZER + if (unlikely(pm_freezing && !sc->hibernation_mode)) + return 0; +#endif + retry: delayacct_freepages_start();