mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
xen: move max_pfn in xen_memory_setup() out of function scope
[ Upstream commit 43dc2a0f479b9cd30f6674986d7a40517e999d31 ] Instead of having max_pfn as a local variable of xen_memory_setup(), make it a static variable in setup.c instead. This avoids having to pass it to subfunctions, which will be needed in more cases in future. Rename it to ini_nr_pages, as the value denotes the currently usable number of memory pages as passed from the hypervisor at boot time. Signed-off-by: Juergen Gross <jgross@suse.com> Tested-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> Signed-off-by: Juergen Gross <jgross@suse.com> Stable-dep-of: be35d91c8880 ("xen: tolerate ACPI NVS memory overlapping with Xen allocated memory") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
242d0c3c40
commit
f12153eece
1 changed files with 26 additions and 26 deletions
|
@ -47,6 +47,9 @@ bool xen_pv_pci_possible;
|
|||
/* E820 map used during setting up memory. */
|
||||
static struct e820_table xen_e820_table __initdata;
|
||||
|
||||
/* Number of initially usable memory pages. */
|
||||
static unsigned long ini_nr_pages __initdata;
|
||||
|
||||
/*
|
||||
* Buffer used to remap identity mapped pages. We only need the virtual space.
|
||||
* The physical page behind this address is remapped as needed to different
|
||||
|
@ -213,7 +216,7 @@ static int __init xen_free_mfn(unsigned long mfn)
|
|||
* as a fallback if the remapping fails.
|
||||
*/
|
||||
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
|
||||
unsigned long end_pfn, unsigned long nr_pages)
|
||||
unsigned long end_pfn)
|
||||
{
|
||||
unsigned long pfn, end;
|
||||
int ret;
|
||||
|
@ -221,7 +224,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
|
|||
WARN_ON(start_pfn > end_pfn);
|
||||
|
||||
/* Release pages first. */
|
||||
end = min(end_pfn, nr_pages);
|
||||
end = min(end_pfn, ini_nr_pages);
|
||||
for (pfn = start_pfn; pfn < end; pfn++) {
|
||||
unsigned long mfn = pfn_to_mfn(pfn);
|
||||
|
||||
|
@ -342,15 +345,14 @@ static void __init xen_do_set_identity_and_remap_chunk(
|
|||
* to Xen and not remapped.
|
||||
*/
|
||||
static unsigned long __init xen_set_identity_and_remap_chunk(
|
||||
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
|
||||
unsigned long remap_pfn)
|
||||
unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
unsigned long i = 0;
|
||||
unsigned long n = end_pfn - start_pfn;
|
||||
|
||||
if (remap_pfn == 0)
|
||||
remap_pfn = nr_pages;
|
||||
remap_pfn = ini_nr_pages;
|
||||
|
||||
while (i < n) {
|
||||
unsigned long cur_pfn = start_pfn + i;
|
||||
|
@ -359,19 +361,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
|
|||
unsigned long remap_range_size;
|
||||
|
||||
/* Do not remap pages beyond the current allocation */
|
||||
if (cur_pfn >= nr_pages) {
|
||||
if (cur_pfn >= ini_nr_pages) {
|
||||
/* Identity map remaining pages */
|
||||
set_phys_range_identity(cur_pfn, cur_pfn + size);
|
||||
break;
|
||||
}
|
||||
if (cur_pfn + size > nr_pages)
|
||||
size = nr_pages - cur_pfn;
|
||||
if (cur_pfn + size > ini_nr_pages)
|
||||
size = ini_nr_pages - cur_pfn;
|
||||
|
||||
remap_range_size = xen_find_pfn_range(&remap_pfn);
|
||||
if (!remap_range_size) {
|
||||
pr_warn("Unable to find available pfn range, not remapping identity pages\n");
|
||||
xen_set_identity_and_release_chunk(cur_pfn,
|
||||
cur_pfn + left, nr_pages);
|
||||
cur_pfn + left);
|
||||
break;
|
||||
}
|
||||
/* Adjust size to fit in current e820 RAM region */
|
||||
|
@ -398,18 +400,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
|
|||
}
|
||||
|
||||
static unsigned long __init xen_count_remap_pages(
|
||||
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
|
||||
unsigned long start_pfn, unsigned long end_pfn,
|
||||
unsigned long remap_pages)
|
||||
{
|
||||
if (start_pfn >= nr_pages)
|
||||
if (start_pfn >= ini_nr_pages)
|
||||
return remap_pages;
|
||||
|
||||
return remap_pages + min(end_pfn, nr_pages) - start_pfn;
|
||||
return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn;
|
||||
}
|
||||
|
||||
static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
|
||||
static unsigned long __init xen_foreach_remap_area(
|
||||
unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
|
||||
unsigned long nr_pages, unsigned long last_val))
|
||||
unsigned long last_val))
|
||||
{
|
||||
phys_addr_t start = 0;
|
||||
unsigned long ret_val = 0;
|
||||
|
@ -437,8 +439,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
|
|||
end_pfn = PFN_UP(entry->addr);
|
||||
|
||||
if (start_pfn < end_pfn)
|
||||
ret_val = func(start_pfn, end_pfn, nr_pages,
|
||||
ret_val);
|
||||
ret_val = func(start_pfn, end_pfn, ret_val);
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
|
@ -701,7 +702,7 @@ static void __init xen_reserve_xen_mfnlist(void)
|
|||
**/
|
||||
char * __init xen_memory_setup(void)
|
||||
{
|
||||
unsigned long max_pfn, pfn_s, n_pfns;
|
||||
unsigned long pfn_s, n_pfns;
|
||||
phys_addr_t mem_end, addr, size, chunk_size;
|
||||
u32 type;
|
||||
int rc;
|
||||
|
@ -713,9 +714,8 @@ char * __init xen_memory_setup(void)
|
|||
int op;
|
||||
|
||||
xen_parse_512gb();
|
||||
max_pfn = xen_get_pages_limit();
|
||||
max_pfn = min(max_pfn, xen_start_info->nr_pages);
|
||||
mem_end = PFN_PHYS(max_pfn);
|
||||
ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages);
|
||||
mem_end = PFN_PHYS(ini_nr_pages);
|
||||
|
||||
memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
|
||||
set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
|
||||
|
@ -768,10 +768,10 @@ char * __init xen_memory_setup(void)
|
|||
max_pages = xen_get_max_pages();
|
||||
|
||||
/* How many extra pages do we need due to remapping? */
|
||||
max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
|
||||
max_pages += xen_foreach_remap_area(xen_count_remap_pages);
|
||||
|
||||
if (max_pages > max_pfn)
|
||||
extra_pages += max_pages - max_pfn;
|
||||
if (max_pages > ini_nr_pages)
|
||||
extra_pages += max_pages - ini_nr_pages;
|
||||
|
||||
/*
|
||||
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
|
||||
|
@ -780,8 +780,8 @@ char * __init xen_memory_setup(void)
|
|||
* Make sure we have no memory above max_pages, as this area
|
||||
* isn't handled by the p2m management.
|
||||
*/
|
||||
maxmem_pages = EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM));
|
||||
extra_pages = min3(maxmem_pages, extra_pages, max_pages - max_pfn);
|
||||
maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM));
|
||||
extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages);
|
||||
i = 0;
|
||||
addr = xen_e820_table.entries[0].addr;
|
||||
size = xen_e820_table.entries[0].size;
|
||||
|
@ -886,7 +886,7 @@ char * __init xen_memory_setup(void)
|
|||
* Set identity map on non-RAM pages and prepare remapping the
|
||||
* underlying RAM.
|
||||
*/
|
||||
xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
|
||||
xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
|
||||
|
||||
pr_info("Released %ld page(s)\n", xen_released_pages);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue