bianbu-linux-6.6/arch/m68k/mm/init.c
Mike Rapoport fa3354e4ea mm: free_area_init: use maximal zone PFNs rather than zone sizes
Currently, architectures that use free_area_init() to initialize memory
map and node and zone structures need to calculate zone and hole sizes.
We can use free_area_init_nodes() instead and let it detect the zone
boundaries while the architectures will only have to supply the possible
limits for the zones.

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Hoan Tran <hoan@os.amperecomputing.com>	[arm64]
Reviewed-by: Baoquan He <bhe@redhat.com>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200412194859.12663-5-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-03 20:09:43 -07:00

157 lines
3.5 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/m68k/mm/init.c
*
* Copyright (C) 1995 Hamish Macdonald
*
* Contains common initialization routines, specific init code moved
* to motorola.c and sun3mmu.c
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <asm/setup.h>
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/io.h>
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
#include <asm/sections.h>
#include <asm/tlb.h>
/*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
void *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_MMU
pg_data_t pg_data_map[MAX_NUMNODES];
EXPORT_SYMBOL(pg_data_map);
int m68k_virt_to_node_shift;
#ifndef CONFIG_SINGLE_MEMORY_CHUNK
pg_data_t *pg_data_table[65];
EXPORT_SYMBOL(pg_data_table);
#endif
void __init m68k_setup_node(int node)
{
#ifndef CONFIG_SINGLE_MEMORY_CHUNK
struct m68k_mem_info *info = m68k_memory + node;
int i, end;
i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
for (; i <= end; i++) {
if (pg_data_table[i])
pr_warn("overlap at %u for chunk %u\n", i, node);
pg_data_table[i] = pg_data_map + node;
}
#endif
node_set_online(node);
}
#else /* CONFIG_MMU */
/*
* paging_init() continues the virtual memory environment setup which
* was begun by the code in arch/head.S.
* The parameters are pointers to where to stick the starting and ending
* addresses of available kernel virtual memory.
*/
void __init paging_init(void)
{
/*
* Make sure start_mem is page aligned, otherwise bootmem and
* page_alloc get different views of the world.
*/
unsigned long end_mem = memory_end & PAGE_MASK;
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
high_memory = (void *) end_mem;
empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!empty_zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers (user data space).
*/
set_fs (USER_DS);
max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
free_area_init(max_zone_pfn);
}
#endif /* CONFIG_MMU */
void free_initmem(void)
{
#ifndef CONFIG_MMU_SUN3
free_initmem_default(-1);
#endif /* CONFIG_MMU_SUN3 */
}
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
#define VECTORS &vectors[0]
#else
#define VECTORS _ramvec
#endif
static inline void init_pointer_tables(void)
{
#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
int i, j;
/* insert pointer tables allocated so far into the tablelist */
init_pointer_table(kernel_pg_dir, TABLE_PGD);
for (i = 0; i < PTRS_PER_PGD; i++) {
pud_t *pud = (pud_t *)&kernel_pg_dir[i];
pmd_t *pmd_dir;
if (!pud_present(*pud))
continue;
pmd_dir = (pmd_t *)pgd_page_vaddr(kernel_pg_dir[i]);
init_pointer_table(pmd_dir, TABLE_PMD);
for (j = 0; j < PTRS_PER_PMD; j++) {
pmd_t *pmd = &pmd_dir[j];
pte_t *pte_dir;
if (!pmd_present(*pmd))
continue;
pte_dir = (pte_t *)__pmd_page(*pmd);
init_pointer_table(pte_dir, TABLE_PTE);
}
}
#endif
}
void __init mem_init(void)
{
/* this will put all memory onto the freelists */
memblock_free_all();
init_pointer_tables();
mem_init_print_info(NULL);
}