Thomas Gleixner | 8607a96 | 2019-05-22 09:51:44 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Contiguous Memory Allocator |
| 4 | * |
| 5 | * Copyright (c) 2010-2011 by Samsung Electronics. |
| 6 | * Copyright IBM Corporation, 2013 |
| 7 | * Copyright LG Electronics Inc., 2014 |
| 8 | * Written by: |
| 9 | * Marek Szyprowski <m.szyprowski@samsung.com> |
| 10 | * Michal Nazarewicz <mina86@mina86.com> |
| 11 | * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> |
| 12 | * Joonsoo Kim <iamjoonsoo.kim@lge.com> |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 13 | */ |
| 14 | |
| 15 | #define pr_fmt(fmt) "cma: " fmt |
| 16 | |
| 17 | #ifdef CONFIG_CMA_DEBUG |
| 18 | #ifndef DEBUG |
| 19 | # define DEBUG |
| 20 | #endif |
| 21 | #endif |
Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 22 | #define CREATE_TRACE_POINTS |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 23 | |
| 24 | #include <linux/memblock.h> |
| 25 | #include <linux/err.h> |
| 26 | #include <linux/mm.h> |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 27 | #include <linux/sizes.h> |
| 28 | #include <linux/slab.h> |
| 29 | #include <linux/log2.h> |
| 30 | #include <linux/cma.h> |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 31 | #include <linux/highmem.h> |
Thierry Reding | 620951e | 2014-12-12 16:58:31 -0800 | [diff] [blame] | 32 | #include <linux/io.h> |
Randy Dunlap | 514c603 | 2018-04-05 16:25:34 -0700 | [diff] [blame] | 33 | #include <linux/kmemleak.h> |
Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 34 | #include <trace/events/cma.h> |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 35 | |
Sasha Levin | 28b24c1 | 2015-04-14 15:44:57 -0700 | [diff] [blame] | 36 | #include "cma.h" |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 37 | |
Sasha Levin | 28b24c1 | 2015-04-14 15:44:57 -0700 | [diff] [blame] | 38 | struct cma cma_areas[MAX_CMA_AREAS]; |
| 39 | unsigned cma_area_count; |
Dong Aisheng | 60a60e3 | 2022-05-13 15:11:26 -0700 | [diff] [blame] | 40 | static DEFINE_MUTEX(cma_mutex); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 41 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 42 | phys_addr_t cma_get_base(const struct cma *cma) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 43 | { |
| 44 | return PFN_PHYS(cma->base_pfn); |
| 45 | } |
| 46 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 47 | unsigned long cma_get_size(const struct cma *cma) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 48 | { |
| 49 | return cma->count << PAGE_SHIFT; |
| 50 | } |
| 51 | |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 52 | const char *cma_get_name(const struct cma *cma) |
| 53 | { |
Barry Song | 18e98e5 | 2020-08-11 18:31:57 -0700 | [diff] [blame] | 54 | return cma->name; |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 55 | } |
| 56 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 57 | static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, |
Doug Berger | e048cb32 | 2017-07-10 15:49:44 -0700 | [diff] [blame] | 58 | unsigned int align_order) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 59 | { |
Weijie Yang | 68faed6 | 2014-10-13 15:51:03 -0700 | [diff] [blame] | 60 | if (align_order <= cma->order_per_bit) |
| 61 | return 0; |
| 62 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 63 | } |
| 64 | |
Danesh Petigara | 850fc43 | 2015-03-12 16:25:57 -0700 | [diff] [blame] | 65 | /* |
Doug Berger | e048cb32 | 2017-07-10 15:49:44 -0700 | [diff] [blame] | 66 | * Find the offset of the base PFN from the specified align_order. |
| 67 | * The value returned is represented in order_per_bits. |
Danesh Petigara | 850fc43 | 2015-03-12 16:25:57 -0700 | [diff] [blame] | 68 | */ |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 69 | static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, |
Doug Berger | e048cb32 | 2017-07-10 15:49:44 -0700 | [diff] [blame] | 70 | unsigned int align_order) |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 71 | { |
Doug Berger | e048cb32 | 2017-07-10 15:49:44 -0700 | [diff] [blame] | 72 | return (cma->base_pfn & ((1UL << align_order) - 1)) |
| 73 | >> cma->order_per_bit; |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 74 | } |
| 75 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 76 | static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, |
| 77 | unsigned long pages) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 78 | { |
| 79 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; |
| 80 | } |
| 81 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 82 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, |
Minchan Kim | 78fa515 | 2021-05-04 18:37:34 -0700 | [diff] [blame] | 83 | unsigned long count) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 84 | { |
| 85 | unsigned long bitmap_no, bitmap_count; |
Mike Kravetz | 0ef7dca | 2021-05-04 18:34:44 -0700 | [diff] [blame] | 86 | unsigned long flags; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 87 | |
| 88 | bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; |
| 89 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
| 90 | |
Mike Kravetz | 0ef7dca | 2021-05-04 18:34:44 -0700 | [diff] [blame] | 91 | spin_lock_irqsave(&cma->lock, flags); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 92 | bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); |
Mike Kravetz | 0ef7dca | 2021-05-04 18:34:44 -0700 | [diff] [blame] | 93 | spin_unlock_irqrestore(&cma->lock, flags); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 94 | } |
| 95 | |
Mike Kravetz | 3a5139f | 2020-08-11 18:32:03 -0700 | [diff] [blame] | 96 | static void __init cma_activate_area(struct cma *cma) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 97 | { |
David Hildenbrand | 072355c | 2021-02-25 17:16:37 -0800 | [diff] [blame] | 98 | unsigned long base_pfn = cma->base_pfn, pfn; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 99 | struct zone *zone; |
| 100 | |
Yunfeng Ye | 2184f99 | 2019-11-30 17:57:22 -0800 | [diff] [blame] | 101 | cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); |
Mike Kravetz | 3a5139f | 2020-08-11 18:32:03 -0700 | [diff] [blame] | 102 | if (!cma->bitmap) |
| 103 | goto out_error; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 104 | |
David Hildenbrand | 072355c | 2021-02-25 17:16:37 -0800 | [diff] [blame] | 105 | /* |
| 106 | * alloc_contig_range() requires the pfn range specified to be in the |
| 107 | * same zone. Simplify by forcing the entire CMA resv range to be in the |
| 108 | * same zone. |
| 109 | */ |
| 110 | WARN_ON_ONCE(!pfn_valid(base_pfn)); |
| 111 | zone = page_zone(pfn_to_page(base_pfn)); |
| 112 | for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { |
| 113 | WARN_ON_ONCE(!pfn_valid(pfn)); |
| 114 | if (page_zone(pfn_to_page(pfn)) != zone) |
| 115 | goto not_in_zone; |
| 116 | } |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 117 | |
David Hildenbrand | 072355c | 2021-02-25 17:16:37 -0800 | [diff] [blame] | 118 | for (pfn = base_pfn; pfn < base_pfn + cma->count; |
| 119 | pfn += pageblock_nr_pages) |
| 120 | init_cma_reserved_pageblock(pfn_to_page(pfn)); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 121 | |
Mike Kravetz | 0ef7dca | 2021-05-04 18:34:44 -0700 | [diff] [blame] | 122 | spin_lock_init(&cma->lock); |
Sasha Levin | 26b02a1 | 2015-04-14 15:44:59 -0700 | [diff] [blame] | 123 | |
| 124 | #ifdef CONFIG_CMA_DEBUGFS |
| 125 | INIT_HLIST_HEAD(&cma->mem_head); |
| 126 | spin_lock_init(&cma->mem_head_lock); |
| 127 | #endif |
| 128 | |
Mike Kravetz | 3a5139f | 2020-08-11 18:32:03 -0700 | [diff] [blame] | 129 | return; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 130 | |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 131 | not_in_zone: |
Yunfeng Ye | 2184f99 | 2019-11-30 17:57:22 -0800 | [diff] [blame] | 132 | bitmap_free(cma->bitmap); |
Mike Kravetz | 3a5139f | 2020-08-11 18:32:03 -0700 | [diff] [blame] | 133 | out_error: |
David Hildenbrand | 072355c | 2021-02-25 17:16:37 -0800 | [diff] [blame] | 134 | /* Expose all pages to the buddy, they are useless for CMA. */ |
Hari Bathini | 27d121d | 2022-03-22 14:46:14 -0700 | [diff] [blame] | 135 | if (!cma->reserve_pages_on_error) { |
| 136 | for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) |
| 137 | free_reserved_page(pfn_to_page(pfn)); |
| 138 | } |
David Hildenbrand | 072355c | 2021-02-25 17:16:37 -0800 | [diff] [blame] | 139 | totalcma_pages -= cma->count; |
Laurent Pinchart | f022d8c | 2014-10-24 13:18:39 +0300 | [diff] [blame] | 140 | cma->count = 0; |
Mike Kravetz | 3a5139f | 2020-08-11 18:32:03 -0700 | [diff] [blame] | 141 | pr_err("CMA area %s could not be activated\n", cma->name); |
| 142 | return; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 143 | } |
| 144 | |
| 145 | static int __init cma_init_reserved_areas(void) |
| 146 | { |
| 147 | int i; |
| 148 | |
Mike Kravetz | 3a5139f | 2020-08-11 18:32:03 -0700 | [diff] [blame] | 149 | for (i = 0; i < cma_area_count; i++) |
| 150 | cma_activate_area(&cma_areas[i]); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 151 | |
| 152 | return 0; |
| 153 | } |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 154 | core_initcall(cma_init_reserved_areas); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 155 | |
Hari Bathini | 27d121d | 2022-03-22 14:46:14 -0700 | [diff] [blame] | 156 | void __init cma_reserve_pages_on_error(struct cma *cma) |
| 157 | { |
| 158 | cma->reserve_pages_on_error = true; |
| 159 | } |
| 160 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 161 | /** |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 162 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory |
| 163 | * @base: Base address of the reserved area |
| 164 | * @size: Size of the reserved area (in bytes), |
| 165 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
Mike Rapoport | e8b098f | 2018-04-05 16:24:57 -0700 | [diff] [blame] | 166 | * @name: The name of the area. If this parameter is NULL, the name of |
| 167 | * the area will be set to "cmaN", where N is a running counter of |
| 168 | * used areas. |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 169 | * @res_cma: Pointer to store the created cma region. |
| 170 | * |
| 171 | * This function creates custom contiguous area from already reserved memory. |
| 172 | */ |
| 173 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 174 | unsigned int order_per_bit, |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 175 | const char *name, |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 176 | struct cma **res_cma) |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 177 | { |
| 178 | struct cma *cma; |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 179 | |
| 180 | /* Sanity checks */ |
| 181 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
| 182 | pr_err("Not enough slots for CMA reserved regions!\n"); |
| 183 | return -ENOSPC; |
| 184 | } |
| 185 | |
| 186 | if (!size || !memblock_is_region_reserved(base, size)) |
| 187 | return -EINVAL; |
| 188 | |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 189 | /* alignment should be aligned with order_per_bit */ |
David Hildenbrand | e16faf2 | 2022-03-22 14:43:17 -0700 | [diff] [blame] | 190 | if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit)) |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 191 | return -EINVAL; |
| 192 | |
David Hildenbrand | e16faf2 | 2022-03-22 14:43:17 -0700 | [diff] [blame] | 193 | /* ensure minimal alignment required by mm core */ |
| 194 | if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES)) |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 195 | return -EINVAL; |
| 196 | |
| 197 | /* |
| 198 | * Each reserved area must be initialised later, when more kernel |
| 199 | * subsystems (like slab allocator) are available. |
| 200 | */ |
| 201 | cma = &cma_areas[cma_area_count]; |
Barry Song | 18e98e5 | 2020-08-11 18:31:57 -0700 | [diff] [blame] | 202 | |
| 203 | if (name) |
| 204 | snprintf(cma->name, CMA_MAX_NAME, name); |
| 205 | else |
| 206 | snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); |
| 207 | |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 208 | cma->base_pfn = PFN_DOWN(base); |
| 209 | cma->count = size >> PAGE_SHIFT; |
| 210 | cma->order_per_bit = order_per_bit; |
| 211 | *res_cma = cma; |
| 212 | cma_area_count++; |
George G. Davis | 94737a8 | 2015-02-11 15:26:27 -0800 | [diff] [blame] | 213 | totalcma_pages += (size / PAGE_SIZE); |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 214 | |
| 215 | return 0; |
| 216 | } |
| 217 | |
| 218 | /** |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 219 | * cma_declare_contiguous_nid() - reserve custom contiguous area |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 220 | * @base: Base address of the reserved area optional, use 0 for any |
Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 221 | * @size: Size of the reserved area (in bytes), |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 222 | * @limit: End address of the reserved memory (optional, 0 for any). |
| 223 | * @alignment: Alignment for the CMA area, should be power of 2 or zero |
| 224 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 225 | * @fixed: hint about where to place the reserved area |
Mike Rapoport | e8b098f | 2018-04-05 16:24:57 -0700 | [diff] [blame] | 226 | * @name: The name of the area. See function cma_init_reserved_mem() |
Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 227 | * @res_cma: Pointer to store the created cma region. |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 228 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 229 | * |
| 230 | * This function reserves memory from early allocator. It should be |
| 231 | * called by arch specific code once the early allocator (memblock or bootmem) |
| 232 | * has been activated and all other subsystems have already allocated/reserved |
| 233 | * memory. This function allows to create custom reserved areas. |
| 234 | * |
| 235 | * If @fixed is true, reserve contiguous area at exactly @base. If false, |
| 236 | * reserve in range from @base to @limit. |
| 237 | */ |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 238 | int __init cma_declare_contiguous_nid(phys_addr_t base, |
Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 239 | phys_addr_t size, phys_addr_t limit, |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 240 | phys_addr_t alignment, unsigned int order_per_bit, |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 241 | bool fixed, const char *name, struct cma **res_cma, |
| 242 | int nid) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 243 | { |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 244 | phys_addr_t memblock_end = memblock_end_of_DRAM(); |
Joonsoo Kim | 6b101e2 | 2014-12-10 15:41:12 -0800 | [diff] [blame] | 245 | phys_addr_t highmem_start; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 246 | int ret = 0; |
| 247 | |
Joonsoo Kim | 6b101e2 | 2014-12-10 15:41:12 -0800 | [diff] [blame] | 248 | /* |
Laura Abbott | 2dece44 | 2017-01-10 13:35:41 -0800 | [diff] [blame] | 249 | * We can't use __pa(high_memory) directly, since high_memory |
| 250 | * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) |
| 251 | * complain. Find the boundary by adding one to the last valid |
| 252 | * address. |
Joonsoo Kim | 6b101e2 | 2014-12-10 15:41:12 -0800 | [diff] [blame] | 253 | */ |
Laura Abbott | 2dece44 | 2017-01-10 13:35:41 -0800 | [diff] [blame] | 254 | highmem_start = __pa(high_memory - 1) + 1; |
Laurent Pinchart | 56fa4f6 | 2014-10-24 13:18:42 +0300 | [diff] [blame] | 255 | pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", |
| 256 | __func__, &size, &base, &limit, &alignment); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 257 | |
| 258 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
| 259 | pr_err("Not enough slots for CMA reserved regions!\n"); |
| 260 | return -ENOSPC; |
| 261 | } |
| 262 | |
| 263 | if (!size) |
| 264 | return -EINVAL; |
| 265 | |
| 266 | if (alignment && !is_power_of_2(alignment)) |
| 267 | return -EINVAL; |
| 268 | |
David Hildenbrand | e16faf2 | 2022-03-22 14:43:17 -0700 | [diff] [blame] | 269 | /* Sanitise input arguments. */ |
| 270 | alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); |
Doug Berger | c633324 | 2019-07-16 16:26:24 -0700 | [diff] [blame] | 271 | if (fixed && base & (alignment - 1)) { |
| 272 | ret = -EINVAL; |
| 273 | pr_err("Region at %pa must be aligned to %pa bytes\n", |
| 274 | &base, &alignment); |
| 275 | goto err; |
| 276 | } |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 277 | base = ALIGN(base, alignment); |
| 278 | size = ALIGN(size, alignment); |
| 279 | limit &= ~(alignment - 1); |
| 280 | |
Laurent Pinchart | 800a85d | 2014-10-24 13:18:40 +0300 | [diff] [blame] | 281 | if (!base) |
| 282 | fixed = false; |
| 283 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 284 | /* size should be aligned with order_per_bit */ |
| 285 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) |
| 286 | return -EINVAL; |
| 287 | |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 288 | /* |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 289 | * If allocating at a fixed base the request region must not cross the |
| 290 | * low/high memory boundary. |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 291 | */ |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 292 | if (fixed && base < highmem_start && base + size > highmem_start) { |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 293 | ret = -EINVAL; |
Laurent Pinchart | 56fa4f6 | 2014-10-24 13:18:42 +0300 | [diff] [blame] | 294 | pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", |
| 295 | &base, &highmem_start); |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 296 | goto err; |
| 297 | } |
| 298 | |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 299 | /* |
| 300 | * If the limit is unspecified or above the memblock end, its effective |
| 301 | * value will be the memblock end. Set it explicitly to simplify further |
| 302 | * checks. |
| 303 | */ |
| 304 | if (limit == 0 || limit > memblock_end) |
| 305 | limit = memblock_end; |
| 306 | |
Doug Berger | c633324 | 2019-07-16 16:26:24 -0700 | [diff] [blame] | 307 | if (base + size > limit) { |
| 308 | ret = -EINVAL; |
| 309 | pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", |
| 310 | &size, &base, &limit); |
| 311 | goto err; |
| 312 | } |
| 313 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 314 | /* Reserve memory */ |
Laurent Pinchart | 800a85d | 2014-10-24 13:18:40 +0300 | [diff] [blame] | 315 | if (fixed) { |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 316 | if (memblock_is_region_reserved(base, size) || |
| 317 | memblock_reserve(base, size) < 0) { |
| 318 | ret = -EBUSY; |
| 319 | goto err; |
| 320 | } |
| 321 | } else { |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 322 | phys_addr_t addr = 0; |
| 323 | |
| 324 | /* |
| 325 | * All pages in the reserved area must come from the same zone. |
| 326 | * If the requested region crosses the low/high memory boundary, |
| 327 | * try allocating from high memory first and fall back to low |
| 328 | * memory in case of failure. |
| 329 | */ |
| 330 | if (base < highmem_start && limit > highmem_start) { |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 331 | addr = memblock_alloc_range_nid(size, alignment, |
Barry Song | 40366bd | 2020-07-03 15:15:24 -0700 | [diff] [blame] | 332 | highmem_start, limit, nid, true); |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 333 | limit = highmem_start; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 334 | } |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 335 | |
Roman Gushchin | df2ff39 | 2021-02-25 17:16:33 -0800 | [diff] [blame] | 336 | /* |
| 337 | * If there is enough memory, try a bottom-up allocation first. |
| 338 | * It will place the new cma area close to the start of the node |
| 339 | * and guarantee that the compaction is moving pages out of the |
| 340 | * cma area and not into it. |
| 341 | * Avoid using first 4GB to not interfere with constrained zones |
| 342 | * like DMA/DMA32. |
| 343 | */ |
| 344 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
| 345 | if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) { |
| 346 | memblock_set_bottom_up(true); |
| 347 | addr = memblock_alloc_range_nid(size, alignment, SZ_4G, |
| 348 | limit, nid, true); |
| 349 | memblock_set_bottom_up(false); |
| 350 | } |
| 351 | #endif |
| 352 | |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 353 | if (!addr) { |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 354 | addr = memblock_alloc_range_nid(size, alignment, base, |
Barry Song | 40366bd | 2020-07-03 15:15:24 -0700 | [diff] [blame] | 355 | limit, nid, true); |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 356 | if (!addr) { |
| 357 | ret = -ENOMEM; |
| 358 | goto err; |
| 359 | } |
| 360 | } |
| 361 | |
Thierry Reding | 620951e | 2014-12-12 16:58:31 -0800 | [diff] [blame] | 362 | /* |
| 363 | * kmemleak scans/reads tracked objects for pointers to other |
| 364 | * objects but this address isn't mapped and accessible |
| 365 | */ |
Catalin Marinas | 9099dae | 2016-10-11 13:55:11 -0700 | [diff] [blame] | 366 | kmemleak_ignore_phys(addr); |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 367 | base = addr; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 368 | } |
| 369 | |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 370 | ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 371 | if (ret) |
Peng Fan | 0d3bd18 | 2019-03-05 15:49:50 -0800 | [diff] [blame] | 372 | goto free_mem; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 373 | |
Laurent Pinchart | 56fa4f6 | 2014-10-24 13:18:42 +0300 | [diff] [blame] | 374 | pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, |
| 375 | &base); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 376 | return 0; |
| 377 | |
Peng Fan | 0d3bd18 | 2019-03-05 15:49:50 -0800 | [diff] [blame] | 378 | free_mem: |
Mike Rapoport | 3ecc683 | 2021-11-05 13:43:19 -0700 | [diff] [blame] | 379 | memblock_phys_free(base, size); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 380 | err: |
Joonsoo Kim | 0de9d2e | 2014-08-06 16:05:34 -0700 | [diff] [blame] | 381 | pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 382 | return ret; |
| 383 | } |
| 384 | |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 385 | #ifdef CONFIG_CMA_DEBUG |
| 386 | static void cma_debug_show_areas(struct cma *cma) |
| 387 | { |
Yue Hu | 2b59e01 | 2019-05-13 17:17:41 -0700 | [diff] [blame] | 388 | unsigned long next_zero_bit, next_set_bit, nr_zero; |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 389 | unsigned long start = 0; |
Yue Hu | 2b59e01 | 2019-05-13 17:17:41 -0700 | [diff] [blame] | 390 | unsigned long nr_part, nr_total = 0; |
| 391 | unsigned long nbits = cma_bitmap_maxno(cma); |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 392 | |
Mike Kravetz | 0ef7dca | 2021-05-04 18:34:44 -0700 | [diff] [blame] | 393 | spin_lock_irq(&cma->lock); |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 394 | pr_info("number of available pages: "); |
| 395 | for (;;) { |
Yue Hu | 2b59e01 | 2019-05-13 17:17:41 -0700 | [diff] [blame] | 396 | next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); |
| 397 | if (next_zero_bit >= nbits) |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 398 | break; |
Yue Hu | 2b59e01 | 2019-05-13 17:17:41 -0700 | [diff] [blame] | 399 | next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 400 | nr_zero = next_set_bit - next_zero_bit; |
Yue Hu | 2b59e01 | 2019-05-13 17:17:41 -0700 | [diff] [blame] | 401 | nr_part = nr_zero << cma->order_per_bit; |
| 402 | pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part, |
| 403 | next_zero_bit); |
| 404 | nr_total += nr_part; |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 405 | start = next_zero_bit + nr_zero; |
| 406 | } |
Yue Hu | 2b59e01 | 2019-05-13 17:17:41 -0700 | [diff] [blame] | 407 | pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); |
Mike Kravetz | 0ef7dca | 2021-05-04 18:34:44 -0700 | [diff] [blame] | 408 | spin_unlock_irq(&cma->lock); |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 409 | } |
| 410 | #else |
| 411 | static inline void cma_debug_show_areas(struct cma *cma) { } |
| 412 | #endif |
| 413 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 414 | /** |
| 415 | * cma_alloc() - allocate pages from contiguous area |
| 416 | * @cma: Contiguous memory region for which the allocation is performed. |
| 417 | * @count: Requested number of pages. |
| 418 | * @align: Requested alignment of pages (in PAGE_SIZE order). |
Marek Szyprowski | 6518202 | 2018-08-17 15:48:57 -0700 | [diff] [blame] | 419 | * @no_warn: Avoid printing message about failed allocation |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 420 | * |
| 421 | * This function allocates part of contiguous memory on specific |
| 422 | * contiguous memory area. |
| 423 | */ |
Minchan Kim | 78fa515 | 2021-05-04 18:37:34 -0700 | [diff] [blame] | 424 | struct page *cma_alloc(struct cma *cma, unsigned long count, |
| 425 | unsigned int align, bool no_warn) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 426 | { |
Andrew Morton | 3acaea6804 | 2015-11-05 18:50:08 -0800 | [diff] [blame] | 427 | unsigned long mask, offset; |
| 428 | unsigned long pfn = -1; |
| 429 | unsigned long start = 0; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 430 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
Minchan Kim | 78fa515 | 2021-05-04 18:37:34 -0700 | [diff] [blame] | 431 | unsigned long i; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 432 | struct page *page = NULL; |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 433 | int ret = -ENOMEM; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 434 | |
Jianqun Xu | 835832b | 2020-08-11 18:31:54 -0700 | [diff] [blame] | 435 | if (!cma || !cma->count || !cma->bitmap) |
Minchan Kim | bbb2692 | 2021-05-04 18:37:19 -0700 | [diff] [blame] | 436 | goto out; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 437 | |
Minchan Kim | 78fa515 | 2021-05-04 18:37:34 -0700 | [diff] [blame] | 438 | pr_debug("%s(cma %p, count %lu, align %d)\n", __func__, (void *)cma, |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 439 | count, align); |
| 440 | |
| 441 | if (!count) |
Minchan Kim | bbb2692 | 2021-05-04 18:37:19 -0700 | [diff] [blame] | 442 | goto out; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 443 | |
Liam Mark | 7bc1aec | 2021-05-04 18:37:25 -0700 | [diff] [blame] | 444 | trace_cma_alloc_start(cma->name, count, align); |
| 445 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 446 | mask = cma_bitmap_aligned_mask(cma, align); |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 447 | offset = cma_bitmap_aligned_offset(cma, align); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 448 | bitmap_maxno = cma_bitmap_maxno(cma); |
| 449 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
| 450 | |
Shiraz Hashim | 6b36ba5 | 2016-11-10 10:46:16 -0800 | [diff] [blame] | 451 | if (bitmap_count > bitmap_maxno) |
Minchan Kim | bbb2692 | 2021-05-04 18:37:19 -0700 | [diff] [blame] | 452 | goto out; |
Shiraz Hashim | 6b36ba5 | 2016-11-10 10:46:16 -0800 | [diff] [blame] | 453 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 454 | for (;;) { |
Mike Kravetz | 0ef7dca | 2021-05-04 18:34:44 -0700 | [diff] [blame] | 455 | spin_lock_irq(&cma->lock); |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 456 | bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, |
| 457 | bitmap_maxno, start, bitmap_count, mask, |
| 458 | offset); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 459 | if (bitmap_no >= bitmap_maxno) { |
Mike Kravetz | 0ef7dca | 2021-05-04 18:34:44 -0700 | [diff] [blame] | 460 | spin_unlock_irq(&cma->lock); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 461 | break; |
| 462 | } |
| 463 | bitmap_set(cma->bitmap, bitmap_no, bitmap_count); |
| 464 | /* |
| 465 | * It's safe to drop the lock here. We've marked this region for |
| 466 | * our exclusive use. If the migration fails we will take the |
| 467 | * lock again and unmark it. |
| 468 | */ |
Mike Kravetz | 0ef7dca | 2021-05-04 18:34:44 -0700 | [diff] [blame] | 469 | spin_unlock_irq(&cma->lock); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 470 | |
| 471 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); |
Dong Aisheng | 60a60e3 | 2022-05-13 15:11:26 -0700 | [diff] [blame] | 472 | mutex_lock(&cma_mutex); |
Lucas Stach | ca96b62 | 2017-02-24 14:58:37 -0800 | [diff] [blame] | 473 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, |
Marek Szyprowski | 6518202 | 2018-08-17 15:48:57 -0700 | [diff] [blame] | 474 | GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); |
Dong Aisheng | 60a60e3 | 2022-05-13 15:11:26 -0700 | [diff] [blame] | 475 | mutex_unlock(&cma_mutex); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 476 | if (ret == 0) { |
| 477 | page = pfn_to_page(pfn); |
| 478 | break; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 479 | } |
Joonsoo Kim | b7155e7 | 2014-08-06 16:05:30 -0700 | [diff] [blame] | 480 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 481 | cma_clear_bitmap(cma, pfn, count); |
Joonsoo Kim | b7155e7 | 2014-08-06 16:05:30 -0700 | [diff] [blame] | 482 | if (ret != -EBUSY) |
| 483 | break; |
| 484 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 485 | pr_debug("%s(): memory range at %p is busy, retrying\n", |
| 486 | __func__, pfn_to_page(pfn)); |
Liam Mark | 7bc1aec | 2021-05-04 18:37:25 -0700 | [diff] [blame] | 487 | |
Minchan Kim | 3aab8ae | 2021-05-04 18:37:31 -0700 | [diff] [blame] | 488 | trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), |
| 489 | count, align); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 490 | /* try again with a bit different memory target */ |
| 491 | start = bitmap_no + mask + 1; |
| 492 | } |
| 493 | |
Minchan Kim | 3aab8ae | 2021-05-04 18:37:31 -0700 | [diff] [blame] | 494 | trace_cma_alloc_finish(cma->name, pfn, page, count, align); |
Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 495 | |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 496 | /* |
| 497 | * CMA can allocate multiple page blocks, which results in different |
| 498 | * blocks being marked with different tags. Reset the tags to ignore |
| 499 | * those page blocks. |
| 500 | */ |
| 501 | if (page) { |
| 502 | for (i = 0; i < count; i++) |
| 503 | page_kasan_tag_reset(page + i); |
| 504 | } |
| 505 | |
Marek Szyprowski | 6518202 | 2018-08-17 15:48:57 -0700 | [diff] [blame] | 506 | if (ret && !no_warn) { |
Minchan Kim | 78fa515 | 2021-05-04 18:37:34 -0700 | [diff] [blame] | 507 | pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n", |
Baolin Wang | 63f83b3 | 2021-05-04 18:37:22 -0700 | [diff] [blame] | 508 | __func__, cma->name, count, ret); |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 509 | cma_debug_show_areas(cma); |
| 510 | } |
| 511 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 512 | pr_debug("%s(): returned %p\n", __func__, page); |
Minchan Kim | bbb2692 | 2021-05-04 18:37:19 -0700 | [diff] [blame] | 513 | out: |
Minchan Kim | 43ca106 | 2021-05-04 18:37:28 -0700 | [diff] [blame] | 514 | if (page) { |
Minchan Kim | bbb2692 | 2021-05-04 18:37:19 -0700 | [diff] [blame] | 515 | count_vm_event(CMA_ALLOC_SUCCESS); |
Minchan Kim | 43ca106 | 2021-05-04 18:37:28 -0700 | [diff] [blame] | 516 | cma_sysfs_account_success_pages(cma, count); |
| 517 | } else { |
Minchan Kim | bbb2692 | 2021-05-04 18:37:19 -0700 | [diff] [blame] | 518 | count_vm_event(CMA_ALLOC_FAIL); |
Minchan Kim | 43ca106 | 2021-05-04 18:37:28 -0700 | [diff] [blame] | 519 | if (cma) |
| 520 | cma_sysfs_account_fail_pages(cma, count); |
| 521 | } |
Minchan Kim | bbb2692 | 2021-05-04 18:37:19 -0700 | [diff] [blame] | 522 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 523 | return page; |
| 524 | } |
| 525 | |
Mike Kravetz | 9871e2d | 2021-11-05 13:41:23 -0700 | [diff] [blame] | 526 | bool cma_pages_valid(struct cma *cma, const struct page *pages, |
| 527 | unsigned long count) |
| 528 | { |
| 529 | unsigned long pfn; |
| 530 | |
| 531 | if (!cma || !pages) |
| 532 | return false; |
| 533 | |
| 534 | pfn = page_to_pfn(pages); |
| 535 | |
| 536 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) { |
| 537 | pr_debug("%s(page %p, count %lu)\n", __func__, |
| 538 | (void *)pages, count); |
| 539 | return false; |
| 540 | } |
| 541 | |
| 542 | return true; |
| 543 | } |
| 544 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 545 | /** |
| 546 | * cma_release() - release allocated pages |
| 547 | * @cma: Contiguous memory region for which the allocation is performed. |
| 548 | * @pages: Allocated pages. |
| 549 | * @count: Number of allocated pages. |
| 550 | * |
Ryohei Suzuki | 929f92f | 2019-07-16 16:26:00 -0700 | [diff] [blame] | 551 | * This function releases memory allocated by cma_alloc(). |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 552 | * It returns false when provided pages do not belong to contiguous area and |
| 553 | * true otherwise. |
| 554 | */ |
Minchan Kim | 78fa515 | 2021-05-04 18:37:34 -0700 | [diff] [blame] | 555 | bool cma_release(struct cma *cma, const struct page *pages, |
| 556 | unsigned long count) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 557 | { |
| 558 | unsigned long pfn; |
| 559 | |
Mike Kravetz | 9871e2d | 2021-11-05 13:41:23 -0700 | [diff] [blame] | 560 | if (!cma_pages_valid(cma, pages, count)) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 561 | return false; |
| 562 | |
Minchan Kim | 78fa515 | 2021-05-04 18:37:34 -0700 | [diff] [blame] | 563 | pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 564 | |
| 565 | pfn = page_to_pfn(pages); |
| 566 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 567 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); |
| 568 | |
| 569 | free_contig_range(pfn, count); |
| 570 | cma_clear_bitmap(cma, pfn, count); |
Minchan Kim | 3aab8ae | 2021-05-04 18:37:31 -0700 | [diff] [blame] | 571 | trace_cma_release(cma->name, pfn, pages, count); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 572 | |
| 573 | return true; |
| 574 | } |
Laura Abbott | e4231bc | 2017-04-18 11:27:04 -0700 | [diff] [blame] | 575 | |
| 576 | int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) |
| 577 | { |
| 578 | int i; |
| 579 | |
| 580 | for (i = 0; i < cma_area_count; i++) { |
| 581 | int ret = it(&cma_areas[i], data); |
| 582 | |
| 583 | if (ret) |
| 584 | return ret; |
| 585 | } |
| 586 | |
| 587 | return 0; |
| 588 | } |