// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Contiguous Memory Allocator
*
* Copyright (c) 2010-2011 by Samsung Electronics.
* Copyright IBM Corporation, 2013
* Copyright LG Electronics Inc., 2014
* Written by:
* Marek Szyprowski <m.szyprowski@samsung.com>
* Michal Nazarewicz <mina86@mina86.com>
* Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
* Joonsoo Kim <iamjoonsoo.kim@lge.com>
*/
#define pr_fmt(fmt) "cma: " fmt
#define CREATE_TRACE_POINTS
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/log2.h>
#include <linux/cma.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/kmemleak.h>
#include <trace/events/cma.h>
#include "internal.h"
#include "cma.h"
struct cma cma_areas[MAX_CMA_AREAS];
unsigned int cma_area_count;
static int __init __cma_declare_contiguous_nid(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
bool fixed, const char *name, struct cma **res_cma,
int nid);
phys_addr_t cma_get_base(const struct cma *cma)
{
WARN_ON_ONCE(cma->nranges != 1);
return PFN_PHYS(cma->ranges[0].base_pfn);
}
unsigned long cma_get_size(const struct cma *cma)
{
return cma->count << PAGE_SHIFT;
}
const char *cma_get_name(const struct cma *cma)
{
return cma->name;
}
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
unsigned int align_order)
{
if (align_order <= cma->order_per_bit)
return 0;
return (1UL << (align_order - cma->order_per_bit)) - 1;
}
/*
* Find the offset of the base PFN from the specified align_order.
* The value returned is represented in order_per_bits.
*/
static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
const struct cma_memrange *cmr,
unsigned int align_order)
{
return (cmr->base_pfn & ((1UL << align_order) - 1))
>> cma->order_per_bit;
}
static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
unsigned long pages)
{
return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
}
static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr,
unsigned long pfn, unsigned long count)
{
unsigned long bitmap_no, bitmap_count;
unsigned long flags;
bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit;
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
spin_lock_irqsave(&cma->lock, flags);
bitmap_clear(cmr->bitmap, bitmap_no, bitmap_count);
cma->available_count += count;
spin_unlock_irqrestore(&cma->lock, flags);
}
/*
* Check if a CMA area contains no ranges that intersect with
* multiple zones. Store the result in the flags in case
* this gets called more than once.
*/
bool cma_validate_zones(struct cma *cma)
{
int r;
unsigned long base_pfn;
struct cma_memrange *cmr;
bool valid_bit_set;
/*
* If already validated, return result of previous check.
* Either the valid or invalid bit will be set if this
* check has already been done. If neither is set, the
* check has not been performed yet.
*/
valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags);
if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags))
return valid_bit_set;
for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
base_pfn = cmr->base_pfn;
/*
* alloc_contig_range() requires the pfn range specified
* to be in the same zone. Simplify by forcing the entire
* CMA resv range to be in the same zone.
*/
WARN_ON_ONCE(!pfn_valid(base_pfn));
if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) {
set_bit(CMA_ZONES_INVALID, &cma->flags);
return false;
}
}
set_bit(CMA_ZONES_VALID, &cma->flags);
return true;
}
static void __init cma_activate_area(struct cma *cma)
{
unsigned long pfn, end_pfn;
int allocrange, r;
struct cma_memrange *cmr;
unsigned long bitmap_count, count;
for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
cmr = &cma->ranges[allocrange];
cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr),
GFP_KERNEL);
if (!cmr->bitmap)
goto cleanup;
}
if (!cma_validate_zones(cma))
goto cleanup;
for (r = 0; r < cma->nranges;