memblock: rename memblock_free to memblock_phys_free
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2071840 Tested: This is one of a series of patch sets to enable Arm SystemReady IR support in the kernel for NXP i.MX8 platforms. At this stage, this has been tested by ensuring we can survive the CI/CD loop -- i.e., that we have not broken anything else, and a simple boot test. When sufficient drivers have been brought in for i.MX8M, we will be able to run further tests. Conflicts: arch/s390/kernel/setup.c arch/s390/kernel/smp.c These have been modified in ways that no longer strictly match the upstream code, throwing off the auto-merge; this is a simple function name replacement, however, so easily done manually instead. commit 3ecc68349bbab6bff1d12cbc7951ca6019b2faf6 Author: Mike Rapoport <rppt@kernel.org> Date: Fri Nov 5 13:43:19 2021 -0700 memblock: rename memblock_free to memblock_phys_free Since memblock_free() operates on a physical range, make its name reflect it and rename it to memblock_phys_free(), so it will be a logical counterpart to memblock_phys_alloc(). The callers are updated with the below semantic patch: @@ expression addr; expression size; @@ - memblock_free(addr, size); + memblock_phys_free(addr, size); Link: https://lkml.kernel.org/r/20210930185031.18648-6-rppt@kernel.org Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Juergen Gross <jgross@suse.com> Cc: Shahab Vahedi <Shahab.Vahedi@synopsys.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> (cherry picked from commit 3ecc68349bbab6bff1d12cbc7951ca6019b2faf6) Signed-off-by: Al Stone <ahs3@redhat.com>
This commit is contained in:
parent
6697b84555
commit
14289d8c8f
|
@ -233,7 +233,8 @@ albacore_init_arch(void)
|
|||
unsigned long size;
|
||||
|
||||
size = initrd_end - initrd_start;
|
||||
memblock_free(__pa(initrd_start), PAGE_ALIGN(size));
|
||||
memblock_phys_free(__pa(initrd_start),
|
||||
PAGE_ALIGN(size));
|
||||
if (!move_initrd(pci_mem))
|
||||
printk("irongate_init_arch: initrd too big "
|
||||
"(%ldK)\ndisabling initrd\n",
|
||||
|
|
|
@ -173,7 +173,7 @@ static void __init highmem_init(void)
|
|||
#ifdef CONFIG_HIGHMEM
|
||||
unsigned long tmp;
|
||||
|
||||
memblock_free(high_mem_start, high_mem_sz);
|
||||
memblock_phys_free(high_mem_start, high_mem_sz);
|
||||
for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
|
||||
free_highmem_page(pfn_to_page(tmp));
|
||||
#endif
|
||||
|
|
|
@ -339,7 +339,7 @@ err_fabric:
|
|||
err_sysctrl:
|
||||
iounmap(relocation);
|
||||
err_reloc:
|
||||
memblock_free(hip04_boot_method[0], hip04_boot_method[1]);
|
||||
memblock_phys_free(hip04_boot_method[0], hip04_boot_method[1]);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -158,7 +158,7 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
|
|||
panic("Failed to steal %pa bytes at %pS\n",
|
||||
&size, (void *)_RET_IP_);
|
||||
|
||||
memblock_free(phys, size);
|
||||
memblock_phys_free(phys, size);
|
||||
memblock_remove(phys, size);
|
||||
|
||||
return phys;
|
||||
|
|
|
@ -784,8 +784,8 @@ void __init paging_init(void)
|
|||
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
|
||||
init_mm.pgd = swapper_pg_dir;
|
||||
|
||||
memblock_free(__pa_symbol(init_pg_dir),
|
||||
__pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
|
||||
memblock_phys_free(__pa_symbol(init_pg_dir),
|
||||
__pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
|
||||
|
||||
memblock_allow_resize();
|
||||
}
|
||||
|
|
|
@ -529,7 +529,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
|
|||
|
||||
static void __init pcpu_fc_free(void *ptr, size_t size)
|
||||
{
|
||||
memblock_free(__pa(ptr), size);
|
||||
memblock_phys_free(__pa(ptr), size);
|
||||
}
|
||||
|
||||
void __init setup_per_cpu_areas(void)
|
||||
|
|
|
@ -69,10 +69,10 @@ static void __init ip30_mem_init(void)
|
|||
total_mem += size;
|
||||
|
||||
if (addr >= IP30_REAL_MEMORY_START)
|
||||
memblock_free(addr, size);
|
||||
memblock_phys_free(addr, size);
|
||||
else if ((addr + size) > IP30_REAL_MEMORY_START)
|
||||
memblock_free(IP30_REAL_MEMORY_START,
|
||||
size - IP30_MAX_PROM_MEMORY);
|
||||
memblock_phys_free(IP30_REAL_MEMORY_START,
|
||||
size - IP30_MAX_PROM_MEMORY);
|
||||
}
|
||||
pr_info("Detected %luMB of physical memory.\n", MEM_SHIFT(total_mem));
|
||||
}
|
||||
|
|
|
@ -1095,8 +1095,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
|
|||
|
||||
cpufeatures_setup_finished();
|
||||
|
||||
memblock_free(__pa(dt_cpu_features),
|
||||
sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
|
||||
memblock_phys_free(__pa(dt_cpu_features),
|
||||
sizeof(struct dt_cpu_feature) * nr_dt_cpu_features);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -322,8 +322,8 @@ void __init free_unused_pacas(void)
|
|||
|
||||
new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
|
||||
if (new_ptrs_size < paca_ptrs_size)
|
||||
memblock_free(__pa(paca_ptrs) + new_ptrs_size,
|
||||
paca_ptrs_size - new_ptrs_size);
|
||||
memblock_phys_free(__pa(paca_ptrs) + new_ptrs_size,
|
||||
paca_ptrs_size - new_ptrs_size);
|
||||
|
||||
paca_nr_cpu_ids = nr_cpu_ids;
|
||||
paca_ptrs_size = new_ptrs_size;
|
||||
|
@ -331,8 +331,8 @@ void __init free_unused_pacas(void)
|
|||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
if (early_radix_enabled()) {
|
||||
/* Ugly fixup, see new_slb_shadow() */
|
||||
memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
|
||||
sizeof(struct slb_shadow));
|
||||
memblock_phys_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
|
||||
sizeof(struct slb_shadow));
|
||||
paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -840,7 +840,7 @@ static void __init smp_setup_pacas(void)
|
|||
set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]);
|
||||
}
|
||||
|
||||
memblock_free(__pa(cpu_to_phys_id), nr_cpu_ids * sizeof(u32));
|
||||
memblock_phys_free(__pa(cpu_to_phys_id), nr_cpu_ids * sizeof(u32));
|
||||
cpu_to_phys_id = NULL;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -813,7 +813,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
|
|||
|
||||
static void __init pcpu_free_bootmem(void *ptr, size_t size)
|
||||
{
|
||||
memblock_free(__pa(ptr), size);
|
||||
memblock_phys_free(__pa(ptr), size);
|
||||
}
|
||||
|
||||
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
|
||||
|
|
|
@ -2767,7 +2767,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
|
|||
if (!phb->hose) {
|
||||
pr_err(" Can't allocate PCI controller for %pOF\n",
|
||||
np);
|
||||
memblock_free(__pa(phb), sizeof(struct pnv_phb));
|
||||
memblock_phys_free(__pa(phb), sizeof(struct pnv_phb));
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,7 +57,8 @@ void __init svm_swiotlb_init(void)
|
|||
return;
|
||||
|
||||
|
||||
memblock_free(__pa(vstart), PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
|
||||
memblock_phys_free(__pa(vstart),
|
||||
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
|
||||
panic("SVM: Cannot allocate SWIOTLB buffer");
|
||||
}
|
||||
|
||||
|
|
|
@ -230,13 +230,14 @@ static void __init init_resources(void)
|
|||
|
||||
/* Clean-up any unused pre-allocated resources */
|
||||
if (res_idx >= 0)
|
||||
memblock_free(__pa(mem_res), (res_idx + 1) * sizeof(*mem_res));
|
||||
memblock_phys_free(__pa(mem_res),
|
||||
(res_idx + 1) * sizeof(*mem_res));
|
||||
return;
|
||||
|
||||
error:
|
||||
/* Better an empty resource tree than an inconsistent one */
|
||||
release_child_resources(&iomem_resource);
|
||||
memblock_free(__pa(mem_res), mem_res_sz);
|
||||
memblock_phys_free(__pa(mem_res), mem_res_sz);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -639,7 +639,7 @@ static void __init reserve_crashkernel(void)
|
|||
}
|
||||
|
||||
if (register_memory_notifier(&kdump_mem_nb)) {
|
||||
memblock_free(crash_base, crash_size);
|
||||
memblock_phys_free(crash_base, crash_size);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -739,7 +739,7 @@ static void __init check_initrd(void)
|
|||
if (INITRD_START && INITRD_SIZE &&
|
||||
!memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
|
||||
pr_err("The initial RAM disk does not fit into the memory\n");
|
||||
memblock_free(INITRD_START, INITRD_SIZE);
|
||||
memblock_phys_free(INITRD_START, INITRD_SIZE);
|
||||
initrd_start = initrd_end = 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -709,7 +709,7 @@ void __init smp_save_dump_cpus(void)
|
|||
/* Get the CPU registers */
|
||||
smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
|
||||
}
|
||||
memblock_free(page, PAGE_SIZE);
|
||||
memblock_phys_free(page, PAGE_SIZE);
|
||||
diag_dma_ops.diag308_reset();
|
||||
pcpu_set_smt(0);
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ void __init setup_uv(void)
|
|||
}
|
||||
|
||||
if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) {
|
||||
memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
|
||||
memblock_phys_free(uv_stor_base, uv_info.uv_base_stor_len);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
|
|
@ -400,5 +400,5 @@ void __init kasan_copy_shadow_mapping(void)
|
|||
|
||||
void __init kasan_free_early_identity(void)
|
||||
{
|
||||
memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
|
||||
memblock_phys_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
|
||||
}
|
||||
|
|
|
@ -560,7 +560,7 @@ static void __init ap325rxa_mv_mem_reserve(void)
|
|||
if (!phys)
|
||||
panic("Failed to allocate CEU memory\n");
|
||||
|
||||
memblock_free(phys, size);
|
||||
memblock_phys_free(phys, size);
|
||||
memblock_remove(phys, size);
|
||||
|
||||
ceu_dma_membase = phys;
|
||||
|
|
|
@ -1502,7 +1502,7 @@ static void __init ecovec_mv_mem_reserve(void)
|
|||
if (!phys)
|
||||
panic("Failed to allocate CEU0 memory\n");
|
||||
|
||||
memblock_free(phys, size);
|
||||
memblock_phys_free(phys, size);
|
||||
memblock_remove(phys, size);
|
||||
ceu0_dma_membase = phys;
|
||||
|
||||
|
@ -1510,7 +1510,7 @@ static void __init ecovec_mv_mem_reserve(void)
|
|||
if (!phys)
|
||||
panic("Failed to allocate CEU1 memory\n");
|
||||
|
||||
memblock_free(phys, size);
|
||||
memblock_phys_free(phys, size);
|
||||
memblock_remove(phys, size);
|
||||
ceu1_dma_membase = phys;
|
||||
}
|
||||
|
|
|
@ -633,7 +633,7 @@ static void __init kfr2r09_mv_mem_reserve(void)
|
|||
if (!phys)
|
||||
panic("Failed to allocate CEU memory\n");
|
||||
|
||||
memblock_free(phys, size);
|
||||
memblock_phys_free(phys, size);
|
||||
memblock_remove(phys, size);
|
||||
|
||||
ceu_dma_membase = phys;
|
||||
|
|
|
@ -633,7 +633,7 @@ static void __init migor_mv_mem_reserve(void)
|
|||
if (!phys)
|
||||
panic("Failed to allocate CEU memory\n");
|
||||
|
||||
memblock_free(phys, size);
|
||||
memblock_phys_free(phys, size);
|
||||
memblock_remove(phys, size);
|
||||
|
||||
ceu_dma_membase = phys;
|
||||
|
|
|
@ -966,7 +966,7 @@ static void __init ms7724se_mv_mem_reserve(void)
|
|||
if (!phys)
|
||||
panic("Failed to allocate CEU0 memory\n");
|
||||
|
||||
memblock_free(phys, size);
|
||||
memblock_phys_free(phys, size);
|
||||
memblock_remove(phys, size);
|
||||
ceu0_dma_membase = phys;
|
||||
|
||||
|
@ -974,7 +974,7 @@ static void __init ms7724se_mv_mem_reserve(void)
|
|||
if (!phys)
|
||||
panic("Failed to allocate CEU1 memory\n");
|
||||
|
||||
memblock_free(phys, size);
|
||||
memblock_phys_free(phys, size);
|
||||
memblock_remove(phys, size);
|
||||
ceu1_dma_membase = phys;
|
||||
}
|
||||
|
|
|
@ -1567,7 +1567,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
|
|||
|
||||
static void __init pcpu_free_bootmem(void *ptr, size_t size)
|
||||
{
|
||||
memblock_free(__pa(ptr), size);
|
||||
memblock_phys_free(__pa(ptr), size);
|
||||
}
|
||||
|
||||
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
|
||||
|
|
|
@ -47,7 +47,7 @@ void __init mem_init(void)
|
|||
*/
|
||||
brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
|
||||
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
|
||||
memblock_free(__pa(brk_end), uml_reserved - brk_end);
|
||||
memblock_phys_free(__pa(brk_end), uml_reserved - brk_end);
|
||||
uml_reserved = brk_end;
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
|
|
|
@ -325,7 +325,7 @@ static void __init reserve_initrd(void)
|
|||
|
||||
relocate_initrd();
|
||||
|
||||
memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
|
||||
memblock_phys_free(ramdisk_image, ramdisk_end - ramdisk_image);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -524,7 +524,7 @@ static void __init reserve_crashkernel(void)
|
|||
}
|
||||
|
||||
if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
|
||||
memblock_free(crash_base, crash_size);
|
||||
memblock_phys_free(crash_base, crash_size);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -618,7 +618,7 @@ static void __init memory_map_top_down(unsigned long map_start,
|
|||
*/
|
||||
addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
|
||||
map_end);
|
||||
memblock_free(addr, PMD_SIZE);
|
||||
memblock_phys_free(addr, PMD_SIZE);
|
||||
real_end = addr + PMD_SIZE;
|
||||
|
||||
/* step_size need to be small so pgt_buf from BRK could cover it */
|
||||
|
|
|
@ -1025,7 +1025,7 @@ static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
|
|||
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
|
||||
make_lowmem_page_readwrite(vaddr);
|
||||
|
||||
memblock_free(paddr, size);
|
||||
memblock_phys_free(paddr, size);
|
||||
}
|
||||
|
||||
static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
|
||||
|
@ -1151,7 +1151,7 @@ static void __init xen_pagetable_p2m_free(void)
|
|||
xen_cleanhighmap(addr, addr + size);
|
||||
size = PAGE_ALIGN(xen_start_info->nr_pages *
|
||||
sizeof(unsigned long));
|
||||
memblock_free(__pa(addr), size);
|
||||
memblock_phys_free(__pa(addr), size);
|
||||
} else {
|
||||
xen_cleanmfnmap(addr);
|
||||
}
|
||||
|
@ -1952,7 +1952,7 @@ void __init xen_relocate_p2m(void)
|
|||
pfn_end = p2m_pfn_end;
|
||||
}
|
||||
|
||||
memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
|
||||
memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
|
||||
while (pfn < pfn_end) {
|
||||
if (pfn == p2m_pfn) {
|
||||
pfn = p2m_pfn_end;
|
||||
|
|
|
@ -153,7 +153,7 @@ static void __init xen_del_extra_mem(unsigned long start_pfn,
|
|||
break;
|
||||
}
|
||||
}
|
||||
memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
|
||||
memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -719,7 +719,7 @@ static void __init xen_reserve_xen_mfnlist(void)
|
|||
return;
|
||||
|
||||
xen_relocate_p2m();
|
||||
memblock_free(start, size);
|
||||
memblock_phys_free(start, size);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -885,7 +885,7 @@ char * __init xen_memory_setup(void)
|
|||
xen_phys_memcpy(new_area, start, size);
|
||||
pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
|
||||
start, start + size, new_area, new_area + size);
|
||||
memblock_free(start, size);
|
||||
memblock_phys_free(start, size);
|
||||
boot_params.hdr.ramdisk_image = new_area;
|
||||
boot_params.ext_ramdisk_image = new_area >> 32;
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
|
|||
|
||||
static void __init pcpu_fc_free(void *ptr, size_t size)
|
||||
{
|
||||
memblock_free(__pa(ptr), size);
|
||||
memblock_phys_free(__pa(ptr), size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
|
||||
|
|
|
@ -35,7 +35,7 @@ void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
|
|||
if (slab_is_available())
|
||||
memblock_free_late(phys, size);
|
||||
else
|
||||
memblock_free(phys, size);
|
||||
memblock_phys_free(phys, size);
|
||||
} else if (flags & EFI_MEMMAP_SLAB) {
|
||||
struct page *p = pfn_to_page(PHYS_PFN(phys));
|
||||
unsigned int order = get_order(size);
|
||||
|
|
|
@ -171,8 +171,7 @@ int ima_free_kexec_buffer(void)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
return memblock_free(addr, size);
|
||||
|
||||
return memblock_phys_free(addr, size);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -46,7 +46,7 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
|
|||
if (nomap) {
|
||||
err = memblock_mark_nomap(base, size);
|
||||
if (err)
|
||||
memblock_free(base, size);
|
||||
memblock_phys_free(base, size);
|
||||
kmemleak_ignore_phys(base);
|
||||
}
|
||||
|
||||
|
@ -284,7 +284,8 @@ void __init fdt_init_reserved_mem(void)
|
|||
if (nomap)
|
||||
memblock_clear_nomap(rmem->base, rmem->size);
|
||||
else
|
||||
memblock_free(rmem->base, rmem->size);
|
||||
memblock_phys_free(rmem->base,
|
||||
rmem->size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -139,7 +139,7 @@ int __init sclp_early_get_core_info(struct sclp_core_info *info)
|
|||
}
|
||||
sclp_fill_core_info(info, sccb);
|
||||
out:
|
||||
memblock_free((unsigned long)sccb, length);
|
||||
memblock_phys_free((unsigned long)sccb, length);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -192,7 +192,7 @@ static void __init xdbc_free_ring(struct xdbc_ring *ring)
|
|||
if (!seg)
|
||||
return;
|
||||
|
||||
memblock_free(seg->dma, PAGE_SIZE);
|
||||
memblock_phys_free(seg->dma, PAGE_SIZE);
|
||||
ring->segment = NULL;
|
||||
}
|
||||
|
||||
|
@ -672,10 +672,10 @@ int __init early_xdbc_setup_hardware(void)
|
|||
xdbc_free_ring(&xdbc.in_ring);
|
||||
|
||||
if (xdbc.table_dma)
|
||||
memblock_free(xdbc.table_dma, PAGE_SIZE);
|
||||
memblock_phys_free(xdbc.table_dma, PAGE_SIZE);
|
||||
|
||||
if (xdbc.out_dma)
|
||||
memblock_free(xdbc.out_dma, PAGE_SIZE);
|
||||
memblock_phys_free(xdbc.out_dma, PAGE_SIZE);
|
||||
|
||||
xdbc.table_base = NULL;
|
||||
xdbc.out_buf = NULL;
|
||||
|
@ -994,8 +994,8 @@ free_and_quit:
|
|||
xdbc_free_ring(&xdbc.evt_ring);
|
||||
xdbc_free_ring(&xdbc.out_ring);
|
||||
xdbc_free_ring(&xdbc.in_ring);
|
||||
memblock_free(xdbc.table_dma, PAGE_SIZE);
|
||||
memblock_free(xdbc.out_dma, PAGE_SIZE);
|
||||
memblock_phys_free(xdbc.table_dma, PAGE_SIZE);
|
||||
memblock_phys_free(xdbc.out_dma, PAGE_SIZE);
|
||||
writel(0, &xdbc.xdbc_reg->control);
|
||||
early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
|
||||
|
||||
|
|
|
@ -241,7 +241,7 @@ retry:
|
|||
*/
|
||||
rc = xen_swiotlb_fixup(start, nslabs);
|
||||
if (rc) {
|
||||
memblock_free(__pa(start), PAGE_ALIGN(bytes));
|
||||
memblock_phys_free(__pa(start), PAGE_ALIGN(bytes));
|
||||
if (nslabs > 1024 && repeat--) {
|
||||
/* Min is 2MB */
|
||||
nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
|
||||
|
|
|
@ -103,7 +103,7 @@ void memblock_allow_resize(void);
|
|||
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
|
||||
int memblock_add(phys_addr_t base, phys_addr_t size);
|
||||
int memblock_remove(phys_addr_t base, phys_addr_t size);
|
||||
int memblock_free(phys_addr_t base, phys_addr_t size);
|
||||
int memblock_phys_free(phys_addr_t base, phys_addr_t size);
|
||||
int memblock_reserve(phys_addr_t base, phys_addr_t size);
|
||||
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
|
||||
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
|
||||
|
|
|
@ -606,7 +606,7 @@ void __weak __init free_initrd_mem(unsigned long start, unsigned long end)
|
|||
unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
|
||||
unsigned long aligned_end = ALIGN(end, PAGE_SIZE);
|
||||
|
||||
memblock_free(__pa(aligned_start), aligned_end - aligned_start);
|
||||
memblock_phys_free(__pa(aligned_start), aligned_end - aligned_start);
|
||||
#endif
|
||||
|
||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
||||
|
|
|
@ -284,7 +284,7 @@ swiotlb_init(int verbose)
|
|||
return;
|
||||
|
||||
fail_free_mem:
|
||||
memblock_free(__pa(tlb), bytes);
|
||||
memblock_phys_free(__pa(tlb), bytes);
|
||||
fail:
|
||||
pr_warn("Cannot allocate buffer");
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ EXPORT_SYMBOL(free_cpumask_var);
|
|||
*/
|
||||
void __init free_bootmem_cpumask_var(cpumask_var_t mask)
|
||||
{
|
||||
memblock_free(__pa(mask), cpumask_size());
|
||||
memblock_phys_free(__pa(mask), cpumask_size());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
2
mm/cma.c
2
mm/cma.c
|
@ -391,7 +391,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
|
|||
return 0;
|
||||
|
||||
free_mem:
|
||||
memblock_free(base, size);
|
||||
memblock_phys_free(base, size);
|
||||
err:
|
||||
pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
|
||||
return ret;
|
||||
|
|
|
@ -806,18 +806,18 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
|
|||
void __init_memblock memblock_free_ptr(void *ptr, size_t size)
|
||||
{
|
||||
if (ptr)
|
||||
memblock_free(__pa(ptr), size);
|
||||
memblock_phys_free(__pa(ptr), size);
|
||||
}
|
||||
|
||||
/**
|
||||
* memblock_free - free boot memory block
|
||||
* memblock_phys_free - free boot memory block
|
||||
* @base: phys starting address of the boot memory block
|
||||
* @size: size of the boot memory block in bytes
|
||||
*
|
||||
* Free boot memory block previously allocated by memblock_alloc_xx() API.
|
||||
* The freeing memory will not be released to the buddy allocator.
|
||||
*/
|
||||
int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
|
||||
int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
|
||||
{
|
||||
phys_addr_t end = base + size - 1;
|
||||
|
||||
|
@ -1940,7 +1940,7 @@ static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
|
|||
* memmap array.
|
||||
*/
|
||||
if (pg < pgend)
|
||||
memblock_free(pg, pgend - pg);
|
||||
memblock_phys_free(pg, pgend - pg);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2137,7 +2137,7 @@ static int __ref try_remove_memory(u64 start, u64 size)
|
|||
arch_remove_memory(start, size, altmap);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
|
||||
memblock_free(start, size);
|
||||
memblock_phys_free(start, size);
|
||||
memblock_remove(start, size);
|
||||
}
|
||||
|
||||
|
|
|
@ -2472,7 +2472,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
|
|||
*/
|
||||
void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
|
||||
{
|
||||
memblock_free(__pa(ai), ai->__ai_size);
|
||||
memblock_phys_free(__pa(ai), ai->__ai_size);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3134,7 +3134,7 @@ out_free_areas:
|
|||
out_free:
|
||||
pcpu_free_alloc_info(ai);
|
||||
if (areas)
|
||||
memblock_free(__pa(areas), areas_size);
|
||||
memblock_phys_free(__pa(areas), areas_size);
|
||||
return rc;
|
||||
}
|
||||
#endif /* BUILD_EMBED_FIRST_CHUNK */
|
||||
|
@ -3256,7 +3256,7 @@ enomem:
|
|||
free_fn(page_address(pages[j]), PAGE_SIZE);
|
||||
rc = -ENOMEM;
|
||||
out_free_ar:
|
||||
memblock_free(__pa(pages), pages_size);
|
||||
memblock_phys_free(__pa(pages), pages_size);
|
||||
pcpu_free_alloc_info(ai);
|
||||
return rc;
|
||||
}
|
||||
|
@ -3286,7 +3286,7 @@ static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
|
|||
|
||||
static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
|
||||
{
|
||||
memblock_free(__pa(ptr), size);
|
||||
memblock_phys_free(__pa(ptr), size);
|
||||
}
|
||||
|
||||
void __init setup_per_cpu_areas(void)
|
||||
|
|
|
@ -451,7 +451,7 @@ static void *sparsemap_buf_end __meminitdata;
|
|||
static inline void __meminit sparse_buffer_free(unsigned long size)
|
||||
{
|
||||
WARN_ON(!sparsemap_buf || size == 0);
|
||||
memblock_free(__pa(sparsemap_buf), size);
|
||||
memblock_phys_free(__pa(sparsemap_buf), size);
|
||||
}
|
||||
|
||||
static void __init sparse_buffer_init(unsigned long size, int nid)
|
||||
|
|
Loading…
Reference in New Issue