diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index 526a6a6473128a81ff5b0bdfd1e8315889fa2ca4..daa44b2ef35ad0bfc6973f4870973ba0ed1e7532 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -32,4 +32,11 @@ extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data); extern int fadump_reserve_mem(void); #endif + +#if defined(CONFIG_FA_DUMP) && defined(CONFIG_CMA) +void fadump_cma_init(void); +#else +static inline void fadump_cma_init(void) { } +#endif + #endif /* _ASM_POWERPC_FADUMP_H */ diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 3ff2da7b120b5090fe041efdebaf359bf2e32348..12bdc5838970e35d7e02dda0f0311c79811e64f9 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -80,7 +80,7 @@ static struct cma *fadump_cma; * But for some reason even if it fails we still have the memory reservation * with us and we can still continue doing fadump. */ -static int __init fadump_cma_init(void) +int __init fadump_cma_init(void) { unsigned long long base, size; int rc; @@ -129,8 +129,6 @@ static int __init fadump_cma_init(void) fw_dump.reserve_dump_area_size); return 1; } -#else -static int __init fadump_cma_init(void) { return 1; } #endif /* CONFIG_CMA */ /* Scan the Firmware Assisted dump configuration details. */ @@ -647,8 +645,6 @@ int __init fadump_reserve_mem(void) pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n", (size >> 20), base, (memblock_phys_mem_size() >> 20)); - - ret = fadump_cma_init(); } return ret; diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 03eaad5949f1415278a3cf10201d1eb872552e78..d43db8150767bedd073e2847c07cd33f0da5b51d 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -988,9 +988,11 @@ void __init setup_arch(char **cmdline_p) initmem_init(); /* - * Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must - * be called after initmem_init(), so that pageblock_order is initialised. + * Reserve large chunks of memory for use by CMA for fadump, KVM and + * hugetlb. These must be called after initmem_init(), so that + * pageblock_order is initialised. */ + fadump_cma_init(); kvm_cma_reserve(); gigantic_hugetlb_cma_reserve();