mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-23 09:56:00 +00:00
Merge branch 'pm-sleep'
* pm-sleep: PM / Hibernate: Touch Soft Lockup Watchdog in rtree_next_node PM / Hibernate: Remove the old memory-bitmap implementation PM / Hibernate: Iterate over set bits instead of PFNs in swsusp_free() PM / Hibernate: Implement position keeping in radix tree PM / Hibernate: Add memory_rtree_find_bit function PM / Hibernate: Create a Radix-Tree to store memory bitmap PM / sleep: fix kernel-doc warnings in drivers/base/power/main.c
This commit is contained in:
commit
ddbe8db147
@ -465,6 +465,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
|
||||
* device_resume_noirq - Execute an "early resume" callback for given device.
|
||||
* @dev: Device to handle.
|
||||
* @state: PM transition of the system being carried out.
|
||||
* @async: If true, the device is being resumed asynchronously.
|
||||
*
|
||||
* The driver of @dev will not receive interrupts while this function is being
|
||||
* executed.
|
||||
@ -594,6 +595,7 @@ static void dpm_resume_noirq(pm_message_t state)
|
||||
* device_resume_early - Execute an "early resume" callback for given device.
|
||||
* @dev: Device to handle.
|
||||
* @state: PM transition of the system being carried out.
|
||||
* @async: If true, the device is being resumed asynchronously.
|
||||
*
|
||||
* Runtime PM is disabled for @dev while this function is being executed.
|
||||
*/
|
||||
@ -1004,6 +1006,7 @@ static pm_message_t resume_event(pm_message_t sleep_state)
|
||||
* device_suspend_noirq - Execute a "late suspend" callback for given device.
|
||||
* @dev: Device to handle.
|
||||
* @state: PM transition of the system being carried out.
|
||||
* @async: If true, the device is being suspended asynchronously.
|
||||
*
|
||||
* The driver of @dev will not receive interrupts while this function is being
|
||||
* executed.
|
||||
@ -1144,6 +1147,7 @@ static int dpm_suspend_noirq(pm_message_t state)
|
||||
* device_suspend_late - Execute a "late suspend" callback for given device.
|
||||
* @dev: Device to handle.
|
||||
* @state: PM transition of the system being carried out.
|
||||
* @async: If true, the device is being suspended asynchronously.
|
||||
*
|
||||
* Runtime PM is disabled for @dev while this function is being executed.
|
||||
*/
|
||||
@ -1298,6 +1302,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_end);
|
||||
* @dev: Device to suspend.
|
||||
* @state: PM transition of the system being carried out.
|
||||
* @cb: Suspend callback to execute.
|
||||
* @info: string description of caller.
|
||||
*/
|
||||
static int legacy_suspend(struct device *dev, pm_message_t state,
|
||||
int (*cb)(struct device *dev, pm_message_t state),
|
||||
|
@ -248,33 +248,61 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
|
||||
* information is stored (in the form of a block of bitmap)
|
||||
* It also contains the pfns that correspond to the start and end of
|
||||
* the represented memory area.
|
||||
*
|
||||
* The memory bitmap is organized as a radix tree to guarantee fast random
|
||||
* access to the bits. There is one radix tree for each zone (as returned
|
||||
* from create_mem_extents).
|
||||
*
|
||||
* One radix tree is represented by one struct mem_zone_bm_rtree. There are
|
||||
* two linked lists for the nodes of the tree, one for the inner nodes and
|
||||
* one for the leave nodes. The linked leave nodes are used for fast linear
|
||||
* access of the memory bitmap.
|
||||
*
|
||||
* The struct rtree_node represents one node of the radix tree.
|
||||
*/
|
||||
|
||||
#define BM_END_OF_MAP (~0UL)
|
||||
|
||||
#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
|
||||
#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
|
||||
#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
|
||||
|
||||
struct bm_block {
|
||||
struct list_head hook; /* hook into a list of bitmap blocks */
|
||||
unsigned long start_pfn; /* pfn represented by the first bit */
|
||||
unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
|
||||
unsigned long *data; /* bitmap representing pages */
|
||||
/*
|
||||
* struct rtree_node is a wrapper struct to link the nodes
|
||||
* of the rtree together for easy linear iteration over
|
||||
* bits and easy freeing
|
||||
*/
|
||||
struct rtree_node {
|
||||
struct list_head list;
|
||||
unsigned long *data;
|
||||
};
|
||||
|
||||
static inline unsigned long bm_block_bits(struct bm_block *bb)
|
||||
{
|
||||
return bb->end_pfn - bb->start_pfn;
|
||||
}
|
||||
/*
|
||||
* struct mem_zone_bm_rtree represents a bitmap used for one
|
||||
* populated memory zone.
|
||||
*/
|
||||
struct mem_zone_bm_rtree {
|
||||
struct list_head list; /* Link Zones together */
|
||||
struct list_head nodes; /* Radix Tree inner nodes */
|
||||
struct list_head leaves; /* Radix Tree leaves */
|
||||
unsigned long start_pfn; /* Zone start page frame */
|
||||
unsigned long end_pfn; /* Zone end page frame + 1 */
|
||||
struct rtree_node *rtree; /* Radix Tree Root */
|
||||
int levels; /* Number of Radix Tree Levels */
|
||||
unsigned int blocks; /* Number of Bitmap Blocks */
|
||||
};
|
||||
|
||||
/* strcut bm_position is used for browsing memory bitmaps */
|
||||
|
||||
struct bm_position {
|
||||
struct bm_block *block;
|
||||
int bit;
|
||||
struct mem_zone_bm_rtree *zone;
|
||||
struct rtree_node *node;
|
||||
unsigned long node_pfn;
|
||||
int node_bit;
|
||||
};
|
||||
|
||||
struct memory_bitmap {
|
||||
struct list_head blocks; /* list of bitmap blocks */
|
||||
struct list_head zones;
|
||||
struct linked_page *p_list; /* list of pages used to store zone
|
||||
* bitmap objects and bitmap block
|
||||
* objects
|
||||
@ -284,38 +312,178 @@ struct memory_bitmap {
|
||||
|
||||
/* Functions that operate on memory bitmaps */
|
||||
|
||||
static void memory_bm_position_reset(struct memory_bitmap *bm)
|
||||
#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
|
||||
#if BITS_PER_LONG == 32
|
||||
#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
|
||||
#else
|
||||
#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
|
||||
#endif
|
||||
#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
|
||||
|
||||
/*
|
||||
* alloc_rtree_node - Allocate a new node and add it to the radix tree.
|
||||
*
|
||||
* This function is used to allocate inner nodes as well as the
|
||||
* leave nodes of the radix tree. It also adds the node to the
|
||||
* corresponding linked list passed in by the *list parameter.
|
||||
*/
|
||||
static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
|
||||
struct chain_allocator *ca,
|
||||
struct list_head *list)
|
||||
{
|
||||
bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
|
||||
bm->cur.bit = 0;
|
||||
struct rtree_node *node;
|
||||
|
||||
node = chain_alloc(ca, sizeof(struct rtree_node));
|
||||
if (!node)
|
||||
return NULL;
|
||||
|
||||
node->data = get_image_page(gfp_mask, safe_needed);
|
||||
if (!node->data)
|
||||
return NULL;
|
||||
|
||||
list_add_tail(&node->list, list);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
|
||||
|
||||
/**
|
||||
* create_bm_block_list - create a list of block bitmap objects
|
||||
* @pages - number of pages to track
|
||||
* @list - list to put the allocated blocks into
|
||||
* @ca - chain allocator to be used for allocating memory
|
||||
/*
|
||||
* add_rtree_block - Add a new leave node to the radix tree
|
||||
*
|
||||
* The leave nodes need to be allocated in order to keep the leaves
|
||||
* linked list in order. This is guaranteed by the zone->blocks
|
||||
* counter.
|
||||
*/
|
||||
static int create_bm_block_list(unsigned long pages,
|
||||
struct list_head *list,
|
||||
struct chain_allocator *ca)
|
||||
static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
|
||||
int safe_needed, struct chain_allocator *ca)
|
||||
{
|
||||
unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
|
||||
struct rtree_node *node, *block, **dst;
|
||||
unsigned int levels_needed, block_nr;
|
||||
int i;
|
||||
|
||||
while (nr_blocks-- > 0) {
|
||||
struct bm_block *bb;
|
||||
block_nr = zone->blocks;
|
||||
levels_needed = 0;
|
||||
|
||||
bb = chain_alloc(ca, sizeof(struct bm_block));
|
||||
if (!bb)
|
||||
return -ENOMEM;
|
||||
list_add(&bb->hook, list);
|
||||
/* How many levels do we need for this block nr? */
|
||||
while (block_nr) {
|
||||
levels_needed += 1;
|
||||
block_nr >>= BM_RTREE_LEVEL_SHIFT;
|
||||
}
|
||||
|
||||
/* Make sure the rtree has enough levels */
|
||||
for (i = zone->levels; i < levels_needed; i++) {
|
||||
node = alloc_rtree_node(gfp_mask, safe_needed, ca,
|
||||
&zone->nodes);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
node->data[0] = (unsigned long)zone->rtree;
|
||||
zone->rtree = node;
|
||||
zone->levels += 1;
|
||||
}
|
||||
|
||||
/* Allocate new block */
|
||||
block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
|
||||
if (!block)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Now walk the rtree to insert the block */
|
||||
node = zone->rtree;
|
||||
dst = &zone->rtree;
|
||||
block_nr = zone->blocks;
|
||||
for (i = zone->levels; i > 0; i--) {
|
||||
int index;
|
||||
|
||||
if (!node) {
|
||||
node = alloc_rtree_node(gfp_mask, safe_needed, ca,
|
||||
&zone->nodes);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
*dst = node;
|
||||
}
|
||||
|
||||
index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
|
||||
index &= BM_RTREE_LEVEL_MASK;
|
||||
dst = (struct rtree_node **)&((*dst)->data[index]);
|
||||
node = *dst;
|
||||
}
|
||||
|
||||
zone->blocks += 1;
|
||||
*dst = block;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
|
||||
int clear_nosave_free);
|
||||
|
||||
/*
|
||||
* create_zone_bm_rtree - create a radix tree for one zone
|
||||
*
|
||||
* Allocated the mem_zone_bm_rtree structure and initializes it.
|
||||
* This function also allocated and builds the radix tree for the
|
||||
* zone.
|
||||
*/
|
||||
static struct mem_zone_bm_rtree *
|
||||
create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
|
||||
struct chain_allocator *ca,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
struct mem_zone_bm_rtree *zone;
|
||||
unsigned int i, nr_blocks;
|
||||
unsigned long pages;
|
||||
|
||||
pages = end - start;
|
||||
zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
|
||||
if (!zone)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&zone->nodes);
|
||||
INIT_LIST_HEAD(&zone->leaves);
|
||||
zone->start_pfn = start;
|
||||
zone->end_pfn = end;
|
||||
nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
|
||||
|
||||
for (i = 0; i < nr_blocks; i++) {
|
||||
if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
|
||||
free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return zone;
|
||||
}
|
||||
|
||||
/*
|
||||
* free_zone_bm_rtree - Free the memory of the radix tree
|
||||
*
|
||||
* Free all node pages of the radix tree. The mem_zone_bm_rtree
|
||||
* structure itself is not freed here nor are the rtree_node
|
||||
* structs.
|
||||
*/
|
||||
static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
|
||||
int clear_nosave_free)
|
||||
{
|
||||
struct rtree_node *node;
|
||||
|
||||
list_for_each_entry(node, &zone->nodes, list)
|
||||
free_image_page(node->data, clear_nosave_free);
|
||||
|
||||
list_for_each_entry(node, &zone->leaves, list)
|
||||
free_image_page(node->data, clear_nosave_free);
|
||||
}
|
||||
|
||||
static void memory_bm_position_reset(struct memory_bitmap *bm)
|
||||
{
|
||||
bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
|
||||
list);
|
||||
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
|
||||
struct rtree_node, list);
|
||||
bm->cur.node_pfn = 0;
|
||||
bm->cur.node_bit = 0;
|
||||
}
|
||||
|
||||
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
|
||||
|
||||
struct mem_extent {
|
||||
struct list_head hook;
|
||||
unsigned long start;
|
||||
@ -407,40 +575,22 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
|
||||
int error;
|
||||
|
||||
chain_init(&ca, gfp_mask, safe_needed);
|
||||
INIT_LIST_HEAD(&bm->blocks);
|
||||
INIT_LIST_HEAD(&bm->zones);
|
||||
|
||||
error = create_mem_extents(&mem_extents, gfp_mask);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
list_for_each_entry(ext, &mem_extents, hook) {
|
||||
struct bm_block *bb;
|
||||
unsigned long pfn = ext->start;
|
||||
unsigned long pages = ext->end - ext->start;
|
||||
struct mem_zone_bm_rtree *zone;
|
||||
|
||||
bb = list_entry(bm->blocks.prev, struct bm_block, hook);
|
||||
|
||||
error = create_bm_block_list(pages, bm->blocks.prev, &ca);
|
||||
if (error)
|
||||
zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
|
||||
ext->start, ext->end);
|
||||
if (!zone) {
|
||||
error = -ENOMEM;
|
||||
goto Error;
|
||||
|
||||
list_for_each_entry_continue(bb, &bm->blocks, hook) {
|
||||
bb->data = get_image_page(gfp_mask, safe_needed);
|
||||
if (!bb->data) {
|
||||
error = -ENOMEM;
|
||||
goto Error;
|
||||
}
|
||||
|
||||
bb->start_pfn = pfn;
|
||||
if (pages >= BM_BITS_PER_BLOCK) {
|
||||
pfn += BM_BITS_PER_BLOCK;
|
||||
pages -= BM_BITS_PER_BLOCK;
|
||||
} else {
|
||||
/* This is executed only once in the loop */
|
||||
pfn += pages;
|
||||
}
|
||||
bb->end_pfn = pfn;
|
||||
}
|
||||
list_add_tail(&zone->list, &bm->zones);
|
||||
}
|
||||
|
||||
bm->p_list = ca.chain;
|
||||
@ -460,51 +610,83 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
|
||||
*/
|
||||
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
|
||||
{
|
||||
struct bm_block *bb;
|
||||
struct mem_zone_bm_rtree *zone;
|
||||
|
||||
list_for_each_entry(bb, &bm->blocks, hook)
|
||||
if (bb->data)
|
||||
free_image_page(bb->data, clear_nosave_free);
|
||||
list_for_each_entry(zone, &bm->zones, list)
|
||||
free_zone_bm_rtree(zone, clear_nosave_free);
|
||||
|
||||
free_list_of_pages(bm->p_list, clear_nosave_free);
|
||||
|
||||
INIT_LIST_HEAD(&bm->blocks);
|
||||
INIT_LIST_HEAD(&bm->zones);
|
||||
}
|
||||
|
||||
/**
|
||||
* memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
|
||||
* to given pfn. The cur_zone_bm member of @bm and the cur_block member
|
||||
* of @bm->cur_zone_bm are updated.
|
||||
* memory_bm_find_bit - Find the bit for pfn in the memory
|
||||
* bitmap
|
||||
*
|
||||
* Find the bit in the bitmap @bm that corresponds to given pfn.
|
||||
* The cur.zone, cur.block and cur.node_pfn member of @bm are
|
||||
* updated.
|
||||
* It walks the radix tree to find the page which contains the bit for
|
||||
* pfn and returns the bit position in **addr and *bit_nr.
|
||||
*/
|
||||
static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
|
||||
void **addr, unsigned int *bit_nr)
|
||||
void **addr, unsigned int *bit_nr)
|
||||
{
|
||||
struct bm_block *bb;
|
||||
struct mem_zone_bm_rtree *curr, *zone;
|
||||
struct rtree_node *node;
|
||||
int i, block_nr;
|
||||
|
||||
/*
|
||||
* Check if the pfn corresponds to the current bitmap block and find
|
||||
* the block where it fits if this is not the case.
|
||||
*/
|
||||
bb = bm->cur.block;
|
||||
if (pfn < bb->start_pfn)
|
||||
list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
|
||||
if (pfn >= bb->start_pfn)
|
||||
break;
|
||||
zone = bm->cur.zone;
|
||||
|
||||
if (pfn >= bb->end_pfn)
|
||||
list_for_each_entry_continue(bb, &bm->blocks, hook)
|
||||
if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
|
||||
break;
|
||||
if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
|
||||
goto zone_found;
|
||||
|
||||
if (&bb->hook == &bm->blocks)
|
||||
zone = NULL;
|
||||
|
||||
/* Find the right zone */
|
||||
list_for_each_entry(curr, &bm->zones, list) {
|
||||
if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
|
||||
zone = curr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!zone)
|
||||
return -EFAULT;
|
||||
|
||||
/* The block has been found */
|
||||
bm->cur.block = bb;
|
||||
pfn -= bb->start_pfn;
|
||||
bm->cur.bit = pfn + 1;
|
||||
*bit_nr = pfn;
|
||||
*addr = bb->data;
|
||||
zone_found:
|
||||
/*
|
||||
* We have a zone. Now walk the radix tree to find the leave
|
||||
* node for our pfn.
|
||||
*/
|
||||
|
||||
node = bm->cur.node;
|
||||
if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
|
||||
goto node_found;
|
||||
|
||||
node = zone->rtree;
|
||||
block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
|
||||
|
||||
for (i = zone->levels; i > 0; i--) {
|
||||
int index;
|
||||
|
||||
index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
|
||||
index &= BM_RTREE_LEVEL_MASK;
|
||||
BUG_ON(node->data[index] == 0);
|
||||
node = (struct rtree_node *)node->data[index];
|
||||
}
|
||||
|
||||
node_found:
|
||||
/* Update last position */
|
||||
bm->cur.zone = zone;
|
||||
bm->cur.node = node;
|
||||
bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
|
||||
|
||||
/* Set return values */
|
||||
*addr = node->data;
|
||||
*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -528,6 +710,7 @@ static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
|
||||
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
|
||||
if (!error)
|
||||
set_bit(bit, addr);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -542,6 +725,14 @@ static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
|
||||
clear_bit(bit, addr);
|
||||
}
|
||||
|
||||
static void memory_bm_clear_current(struct memory_bitmap *bm)
|
||||
{
|
||||
int bit;
|
||||
|
||||
bit = max(bm->cur.node_bit - 1, 0);
|
||||
clear_bit(bit, bm->cur.node->data);
|
||||
}
|
||||
|
||||
static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
|
||||
{
|
||||
void *addr;
|
||||
@ -561,38 +752,70 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
|
||||
return !memory_bm_find_bit(bm, pfn, &addr, &bit);
|
||||
}
|
||||
|
||||
/**
|
||||
* memory_bm_next_pfn - find the pfn that corresponds to the next set bit
|
||||
* in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
|
||||
* returned.
|
||||
/*
|
||||
* rtree_next_node - Jumps to the next leave node
|
||||
*
|
||||
* It is required to run memory_bm_position_reset() before the first call to
|
||||
* this function.
|
||||
* Sets the position to the beginning of the next node in the
|
||||
* memory bitmap. This is either the next node in the current
|
||||
* zone's radix tree or the first node in the radix tree of the
|
||||
* next zone.
|
||||
*
|
||||
* Returns true if there is a next node, false otherwise.
|
||||
*/
|
||||
static bool rtree_next_node(struct memory_bitmap *bm)
|
||||
{
|
||||
bm->cur.node = list_entry(bm->cur.node->list.next,
|
||||
struct rtree_node, list);
|
||||
if (&bm->cur.node->list != &bm->cur.zone->leaves) {
|
||||
bm->cur.node_pfn += BM_BITS_PER_BLOCK;
|
||||
bm->cur.node_bit = 0;
|
||||
touch_softlockup_watchdog();
|
||||
return true;
|
||||
}
|
||||
|
||||
/* No more nodes, goto next zone */
|
||||
bm->cur.zone = list_entry(bm->cur.zone->list.next,
|
||||
struct mem_zone_bm_rtree, list);
|
||||
if (&bm->cur.zone->list != &bm->zones) {
|
||||
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
|
||||
struct rtree_node, list);
|
||||
bm->cur.node_pfn = 0;
|
||||
bm->cur.node_bit = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* No more zones */
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
|
||||
*
|
||||
* Starting from the last returned position this function searches
|
||||
* for the next set bit in the memory bitmap and returns its
|
||||
* number. If no more bit is set BM_END_OF_MAP is returned.
|
||||
*
|
||||
* It is required to run memory_bm_position_reset() before the
|
||||
* first call to this function.
|
||||
*/
|
||||
static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
|
||||
{
|
||||
struct bm_block *bb;
|
||||
unsigned long bits, pfn, pages;
|
||||
int bit;
|
||||
|
||||
bb = bm->cur.block;
|
||||
do {
|
||||
bit = bm->cur.bit;
|
||||
bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
|
||||
if (bit < bm_block_bits(bb))
|
||||
goto Return_pfn;
|
||||
pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
|
||||
bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
|
||||
bit = find_next_bit(bm->cur.node->data, bits,
|
||||
bm->cur.node_bit);
|
||||
if (bit < bits) {
|
||||
pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
|
||||
bm->cur.node_bit = bit + 1;
|
||||
return pfn;
|
||||
}
|
||||
} while (rtree_next_node(bm));
|
||||
|
||||
bb = list_entry(bb->hook.next, struct bm_block, hook);
|
||||
bm->cur.block = bb;
|
||||
bm->cur.bit = 0;
|
||||
} while (&bb->hook != &bm->blocks);
|
||||
|
||||
memory_bm_position_reset(bm);
|
||||
return BM_END_OF_MAP;
|
||||
|
||||
Return_pfn:
|
||||
bm->cur.bit = bit + 1;
|
||||
return bb->start_pfn + bit;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -816,12 +1039,17 @@ void free_basic_memory_bitmaps(void)
|
||||
|
||||
unsigned int snapshot_additional_pages(struct zone *zone)
|
||||
{
|
||||
unsigned int res;
|
||||
unsigned int rtree, nodes;
|
||||
|
||||
res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
|
||||
res += DIV_ROUND_UP(res * sizeof(struct bm_block),
|
||||
LINKED_PAGE_DATA_SIZE);
|
||||
return 2 * res;
|
||||
rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
|
||||
rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
|
||||
LINKED_PAGE_DATA_SIZE);
|
||||
while (nodes > 1) {
|
||||
nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
|
||||
rtree += nodes;
|
||||
}
|
||||
|
||||
return 2 * rtree;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
@ -1094,23 +1322,35 @@ static struct memory_bitmap copy_bm;
|
||||
|
||||
void swsusp_free(void)
|
||||
{
|
||||
struct zone *zone;
|
||||
unsigned long pfn, max_zone_pfn;
|
||||
unsigned long fb_pfn, fr_pfn;
|
||||
|
||||
for_each_populated_zone(zone) {
|
||||
max_zone_pfn = zone_end_pfn(zone);
|
||||
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
|
||||
if (pfn_valid(pfn)) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
memory_bm_position_reset(forbidden_pages_map);
|
||||
memory_bm_position_reset(free_pages_map);
|
||||
|
||||
if (swsusp_page_is_forbidden(page) &&
|
||||
swsusp_page_is_free(page)) {
|
||||
swsusp_unset_page_forbidden(page);
|
||||
swsusp_unset_page_free(page);
|
||||
__free_page(page);
|
||||
}
|
||||
}
|
||||
loop:
|
||||
fr_pfn = memory_bm_next_pfn(free_pages_map);
|
||||
fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
|
||||
|
||||
/*
|
||||
* Find the next bit set in both bitmaps. This is guaranteed to
|
||||
* terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
|
||||
*/
|
||||
do {
|
||||
if (fb_pfn < fr_pfn)
|
||||
fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
|
||||
if (fr_pfn < fb_pfn)
|
||||
fr_pfn = memory_bm_next_pfn(free_pages_map);
|
||||
} while (fb_pfn != fr_pfn);
|
||||
|
||||
if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
|
||||
struct page *page = pfn_to_page(fr_pfn);
|
||||
|
||||
memory_bm_clear_current(forbidden_pages_map);
|
||||
memory_bm_clear_current(free_pages_map);
|
||||
__free_page(page);
|
||||
goto loop;
|
||||
}
|
||||
|
||||
nr_copy_pages = 0;
|
||||
nr_meta_pages = 0;
|
||||
restore_pblist = NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user