1 #ifndef _LINUX_MEMBLOCK_H
2 #define _LINUX_MEMBLOCK_H
6 * Logical memory blocks.
8 * Copyright (C) 2001 Peter Bergner, IBM Corp.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/init.h>
20 extern unsigned long max_low_pfn;
21 extern unsigned long min_low_pfn;
26 extern unsigned long max_pfn;
28 * highest possible page
30 extern unsigned long long max_possible_pfn;
32 #define INIT_MEMBLOCK_REGIONS 128
33 #define INIT_PHYSMEM_REGIONS 4
36 * enum memblock_flags - definition of memory region attributes
37 * @MEMBLOCK_NONE: no special request
38 * @MEMBLOCK_HOTPLUG: hotpluggable region
39 * @MEMBLOCK_MIRROR: mirrored region
40 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
43 MEMBLOCK_NONE = 0x0, /* No special request */
44 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
45 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
46 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
50 * struct memblock_region - represents a memory region
51 * @base: physical address of the region
52 * @size: size of the region
53 * @flags: memory region attributes
56 struct memblock_region {
59 enum memblock_flags flags;
60 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
66 * struct memblock_type - collection of memory regions of certain type
67 * @cnt: number of regions
68 * @max: size of the allocated array
69 * @total_size: size of all regions
70 * @regions: array of regions
71 * @name: the memory type symbolic name
73 struct memblock_type {
76 phys_addr_t total_size;
77 struct memblock_region *regions;
82 * struct memblock - memblock allocator metadata
83 * @bottom_up: is bottom up direction?
84 * @current_limit: physical address of the current allocation limit
85 * @memory: usabe memory regions
86 * @reserved: reserved memory regions
87 * @physmem: all physical memory
90 bool bottom_up; /* is bottom up direction? */
91 phys_addr_t current_limit;
92 struct memblock_type memory;
93 struct memblock_type reserved;
94 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
95 struct memblock_type physmem;
99 extern struct memblock memblock;
100 extern int memblock_debug;
102 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
103 #define __init_memblock __meminit
104 #define __initdata_memblock __meminitdata
105 void memblock_discard(void);
107 #define __init_memblock
108 #define __initdata_memblock
111 #define memblock_dbg(fmt, ...) \
112 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
114 phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
115 phys_addr_t start, phys_addr_t end,
116 int nid, enum memblock_flags flags);
117 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
118 phys_addr_t size, phys_addr_t align);
119 void memblock_allow_resize(void);
120 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
121 int memblock_add(phys_addr_t base, phys_addr_t size);
122 int memblock_remove(phys_addr_t base, phys_addr_t size);
123 int memblock_free(phys_addr_t base, phys_addr_t size);
124 int memblock_reserve(phys_addr_t base, phys_addr_t size);
125 void memblock_trim_memory(phys_addr_t align);
126 bool memblock_overlaps_region(struct memblock_type *type,
127 phys_addr_t base, phys_addr_t size);
128 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
129 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
130 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
131 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
132 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
133 enum memblock_flags choose_memblock_flags(void);
135 unsigned long memblock_free_all(void);
136 void reset_node_managed_pages(pg_data_t *pgdat);
137 void reset_all_zones_managed_pages(void);
139 /* Low level functions */
140 int memblock_add_range(struct memblock_type *type,
141 phys_addr_t base, phys_addr_t size,
142 int nid, enum memblock_flags flags);
144 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
145 struct memblock_type *type_a,
146 struct memblock_type *type_b, phys_addr_t *out_start,
147 phys_addr_t *out_end, int *out_nid);
149 void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
150 struct memblock_type *type_a,
151 struct memblock_type *type_b, phys_addr_t *out_start,
152 phys_addr_t *out_end, int *out_nid);
154 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
155 phys_addr_t *out_end);
157 void __memblock_free_late(phys_addr_t base, phys_addr_t size);
160 * for_each_mem_range - iterate through memblock areas from type_a and not
161 * included in type_b. Or just type_a if type_b is NULL.
162 * @i: u64 used as loop variable
163 * @type_a: ptr to memblock_type to iterate
164 * @type_b: ptr to memblock_type which excludes from the iteration
165 * @nid: node selector, %NUMA_NO_NODE for all nodes
166 * @flags: pick from blocks based on memory attributes
167 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
168 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
169 * @p_nid: ptr to int for nid of the range, can be %NULL
171 #define for_each_mem_range(i, type_a, type_b, nid, flags, \
172 p_start, p_end, p_nid) \
173 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
174 p_start, p_end, p_nid); \
175 i != (u64)ULLONG_MAX; \
176 __next_mem_range(&i, nid, flags, type_a, type_b, \
177 p_start, p_end, p_nid))
180 * for_each_mem_range_rev - reverse iterate through memblock areas from
181 * type_a and not included in type_b. Or just type_a if type_b is NULL.
182 * @i: u64 used as loop variable
183 * @type_a: ptr to memblock_type to iterate
184 * @type_b: ptr to memblock_type which excludes from the iteration
185 * @nid: node selector, %NUMA_NO_NODE for all nodes
186 * @flags: pick from blocks based on memory attributes
187 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
188 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
189 * @p_nid: ptr to int for nid of the range, can be %NULL
191 #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
192 p_start, p_end, p_nid) \
193 for (i = (u64)ULLONG_MAX, \
194 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
195 p_start, p_end, p_nid); \
196 i != (u64)ULLONG_MAX; \
197 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
198 p_start, p_end, p_nid))
201 * for_each_reserved_mem_region - iterate over all reserved memblock areas
202 * @i: u64 used as loop variable
203 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
204 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
206 * Walks over reserved areas of memblock. Available as soon as memblock
209 #define for_each_reserved_mem_region(i, p_start, p_end) \
210 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
211 i != (u64)ULLONG_MAX; \
212 __next_reserved_mem_region(&i, p_start, p_end))
214 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
216 return m->flags & MEMBLOCK_HOTPLUG;
219 static inline bool memblock_is_mirror(struct memblock_region *m)
221 return m->flags & MEMBLOCK_MIRROR;
224 static inline bool memblock_is_nomap(struct memblock_region *m)
226 return m->flags & MEMBLOCK_NOMAP;
229 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
230 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
231 unsigned long *end_pfn);
232 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
233 unsigned long *out_end_pfn, int *out_nid);
236 * for_each_mem_pfn_range - early memory pfn range iterator
237 * @i: an integer used as loop variable
238 * @nid: node selector, %MAX_NUMNODES for all nodes
239 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
240 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
241 * @p_nid: ptr to int for nid of the range, can be %NULL
243 * Walks over configured memory ranges.
245 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
246 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
247 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
248 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
250 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
251 void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
252 unsigned long *out_spfn,
253 unsigned long *out_epfn);
255 * for_each_free_mem_range_in_zone - iterate through zone specific free
257 * @i: u64 used as loop variable
258 * @zone: zone in which all of the memory blocks reside
259 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
260 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
262 * Walks over free (memory && !reserved) areas of memblock in a specific
263 * zone. Available once memblock and an empty zone is initialized. The main
264 * assumption is that the zone start, end, and pgdat have been associated.
265 * This way we can use the zone to determine NUMA node, and if a given part
266 * of the memblock is valid for the zone.
268 #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
270 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
272 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
275 * for_each_free_mem_range_in_zone_from - iterate through zone specific
276 * free memblock areas from a given point
277 * @i: u64 used as loop variable
278 * @zone: zone in which all of the memory blocks reside
279 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
280 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
282 * Walks over free (memory && !reserved) areas of memblock in a specific
283 * zone, continuing from current position. Available as soon as memblock is
286 #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
287 for (; i != U64_MAX; \
288 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
289 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
292 * for_each_free_mem_range - iterate through free memblock areas
293 * @i: u64 used as loop variable
294 * @nid: node selector, %NUMA_NO_NODE for all nodes
295 * @flags: pick from blocks based on memory attributes
296 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
297 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
298 * @p_nid: ptr to int for nid of the range, can be %NULL
300 * Walks over free (memory && !reserved) areas of memblock. Available as
301 * soon as memblock is initialized.
303 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
304 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
305 nid, flags, p_start, p_end, p_nid)
308 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
309 * @i: u64 used as loop variable
310 * @nid: node selector, %NUMA_NO_NODE for all nodes
311 * @flags: pick from blocks based on memory attributes
312 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
313 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
314 * @p_nid: ptr to int for nid of the range, can be %NULL
316 * Walks over free (memory && !reserved) areas of memblock in reverse
317 * order. Available as soon as memblock is initialized.
319 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
321 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
322 nid, flags, p_start, p_end, p_nid)
324 static inline void memblock_set_region_flags(struct memblock_region *r,
325 enum memblock_flags flags)
330 static inline void memblock_clear_region_flags(struct memblock_region *r,
331 enum memblock_flags flags)
336 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
337 int memblock_set_node(phys_addr_t base, phys_addr_t size,
338 struct memblock_type *type, int nid);
340 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
345 static inline int memblock_get_region_node(const struct memblock_region *r)
350 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
354 static inline int memblock_get_region_node(const struct memblock_region *r)
358 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
360 /* Flags for memblock allocation APIs */
361 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
362 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
363 #define MEMBLOCK_ALLOC_KASAN 1
365 /* We are using top down, so it is safe to use 0 here */
366 #define MEMBLOCK_LOW_LIMIT 0
368 #ifndef ARCH_LOW_ADDRESS_LIMIT
369 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
372 phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
373 phys_addr_t start, phys_addr_t end);
374 phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
375 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
377 static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
380 return memblock_phys_alloc_range(size, align, 0,
381 MEMBLOCK_ALLOC_ACCESSIBLE);
384 void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
385 phys_addr_t min_addr, phys_addr_t max_addr,
387 void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align,
388 phys_addr_t min_addr, phys_addr_t max_addr,
390 void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
391 phys_addr_t min_addr, phys_addr_t max_addr,
394 static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
396 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
397 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
400 static inline void * __init memblock_alloc_raw(phys_addr_t size,
403 return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
404 MEMBLOCK_ALLOC_ACCESSIBLE,
408 static inline void * __init memblock_alloc_from(phys_addr_t size,
410 phys_addr_t min_addr)
412 return memblock_alloc_try_nid(size, align, min_addr,
413 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
416 static inline void * __init memblock_alloc_nopanic(phys_addr_t size,
419 return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
420 MEMBLOCK_ALLOC_ACCESSIBLE,
424 static inline void * __init memblock_alloc_low(phys_addr_t size,
427 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
428 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
430 static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size,
433 return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
434 ARCH_LOW_ADDRESS_LIMIT,
438 static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size,
440 phys_addr_t min_addr)
442 return memblock_alloc_try_nid_nopanic(size, align, min_addr,
443 MEMBLOCK_ALLOC_ACCESSIBLE,
447 static inline void * __init memblock_alloc_node(phys_addr_t size,
448 phys_addr_t align, int nid)
450 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
451 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
454 static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size,
457 return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES,
459 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
462 static inline void __init memblock_free_early(phys_addr_t base,
465 memblock_free(base, size);
468 static inline void __init memblock_free_early_nid(phys_addr_t base,
469 phys_addr_t size, int nid)
471 memblock_free(base, size);
474 static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
476 __memblock_free_late(base, size);
480 * Set the allocation direction to bottom-up or top-down.
482 static inline void __init memblock_set_bottom_up(bool enable)
484 memblock.bottom_up = enable;
488 * Check if the allocation direction is bottom-up or not.
489 * if this is true, that said, memblock will allocate memory
490 * in bottom-up direction.
492 static inline bool memblock_bottom_up(void)
494 return memblock.bottom_up;
497 phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
498 phys_addr_t max_addr);
499 phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
500 phys_addr_t max_addr);
501 phys_addr_t memblock_phys_mem_size(void);
502 phys_addr_t memblock_reserved_size(void);
503 phys_addr_t memblock_mem_size(unsigned long limit_pfn);
504 phys_addr_t memblock_start_of_DRAM(void);
505 phys_addr_t memblock_end_of_DRAM(void);
506 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
507 void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
508 void memblock_mem_limit_remove_map(phys_addr_t limit);
509 bool memblock_is_memory(phys_addr_t addr);
510 bool memblock_is_map_memory(phys_addr_t addr);
511 bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
512 bool memblock_is_reserved(phys_addr_t addr);
513 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
515 extern void __memblock_dump_all(void);
517 static inline void memblock_dump_all(void)
520 __memblock_dump_all();
524 * memblock_set_current_limit - Set the current allocation limit to allow
525 * limiting allocations to what is currently
526 * accessible during boot
527 * @limit: New limit value (physical address)
529 void memblock_set_current_limit(phys_addr_t limit);
532 phys_addr_t memblock_get_current_limit(void);
535 * pfn conversion functions
537 * While the memory MEMBLOCKs should always be page aligned, the reserved
538 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
539 * idea of what they return for such non aligned MEMBLOCKs.
543 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
544 * @reg: memblock_region structure
546 * Return: the lowest pfn intersecting with the memory region
548 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
550 return PFN_UP(reg->base);
554 * memblock_region_memory_end_pfn - get the end pfn of the memory region
555 * @reg: memblock_region structure
557 * Return: the end_pfn of the reserved region
559 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
561 return PFN_DOWN(reg->base + reg->size);
565 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
566 * @reg: memblock_region structure
568 * Return: the lowest pfn intersecting with the reserved region
570 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
572 return PFN_DOWN(reg->base);
576 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
577 * @reg: memblock_region structure
579 * Return: the end_pfn of the reserved region
581 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
583 return PFN_UP(reg->base + reg->size);
586 #define for_each_memblock(memblock_type, region) \
587 for (region = memblock.memblock_type.regions; \
588 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
591 #define for_each_memblock_type(i, memblock_type, rgn) \
592 for (i = 0, rgn = &memblock_type->regions[0]; \
593 i < memblock_type->cnt; \
594 i++, rgn = &memblock_type->regions[i])
596 extern void *alloc_large_system_hash(const char *tablename,
597 unsigned long bucketsize,
598 unsigned long numentries,
601 unsigned int *_hash_shift,
602 unsigned int *_hash_mask,
603 unsigned long low_limit,
604 unsigned long high_limit);
606 #define HASH_EARLY 0x00000001 /* Allocating during early boot? */
607 #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
608 * shift passed via *_hash_shift */
609 #define HASH_ZERO 0x00000004 /* Zero allocated hash table */
611 /* Only NUMA needs hash distribution. 64bit NUMA architectures have
612 * sufficient vmalloc space.
615 #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
616 extern int hashdist; /* Distribute hashes across NUMA nodes? */
621 #ifdef CONFIG_MEMTEST
622 extern void early_memtest(phys_addr_t start, phys_addr_t end);
624 static inline void early_memtest(phys_addr_t start, phys_addr_t end)
629 #endif /* __KERNEL__ */
631 #endif /* _LINUX_MEMBLOCK_H */