3 * Procedures for interfacing to Open Firmware.
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/config.h>
15 #include <linux/kernel.h>
16 #include <asm/types.h>
20 #include <asm/abs_addr.h>
21 #include <asm/bitops.h>
24 extern unsigned long klimit;
25 extern unsigned long reloc_offset(void);
28 static long lmb_add_region(struct lmb_region *, unsigned long, unsigned long, unsigned long);
37 /* Assumption: base addr of region 1 < base addr of region 2 */
39 lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
43 rgn->region[r1].size += rgn->region[r2].size;
44 for (i=r2; i < rgn->cnt-1 ;i++) {
45 rgn->region[i].base = rgn->region[i+1].base;
46 rgn->region[i].physbase = rgn->region[i+1].physbase;
47 rgn->region[i].size = rgn->region[i+1].size;
48 rgn->region[i].type = rgn->region[i+1].type;
54 /* This routine called with relocation disabled. */
58 unsigned long offset = reloc_offset();
59 struct lmb *_lmb = PTRRELOC(&lmb);
61 /* Create a dummy zero size LMB which will get coalesced away later.
62 * This simplifies the lmb_add() code below...
64 _lmb->memory.region[0].base = 0;
65 _lmb->memory.region[0].size = 0;
66 _lmb->memory.region[0].type = LMB_MEMORY_AREA;
70 _lmb->reserved.region[0].base = 0;
71 _lmb->reserved.region[0].size = 0;
72 _lmb->reserved.region[0].type = LMB_MEMORY_AREA;
73 _lmb->reserved.cnt = 1;
76 /* This routine called with relocation disabled. */
81 unsigned long mem_size = 0;
82 unsigned long io_size = 0;
83 unsigned long size_mask = 0;
84 unsigned long offset = reloc_offset();
85 struct lmb *_lmb = PTRRELOC(&lmb);
86 #ifdef CONFIG_MSCHUNKS
87 unsigned long physbase = 0;
90 for (i=0; i < _lmb->memory.cnt ;i++) {
91 unsigned long lmb_type = _lmb->memory.region[i].type;
92 unsigned long lmb_size;
94 if ( lmb_type != LMB_MEMORY_AREA )
97 lmb_size = _lmb->memory.region[i].size;
99 #ifdef CONFIG_MSCHUNKS
100 _lmb->memory.region[i].physbase = physbase;
101 physbase += lmb_size;
103 _lmb->memory.region[i].physbase = _lmb->memory.region[i].base;
105 mem_size += lmb_size;
106 size_mask |= lmb_size;
109 #ifdef CONFIG_MSCHUNKS
110 for (i=0; i < _lmb->memory.cnt ;i++) {
111 unsigned long lmb_type = _lmb->memory.region[i].type;
112 unsigned long lmb_size;
114 if ( lmb_type != LMB_IO_AREA )
117 lmb_size = _lmb->memory.region[i].size;
119 _lmb->memory.region[i].physbase = physbase;
120 physbase += lmb_size;
122 size_mask |= lmb_size;
124 #endif /* CONFIG_MSCHUNKS */
126 _lmb->memory.size = mem_size;
127 _lmb->memory.iosize = io_size;
128 _lmb->memory.lcd_size = (1UL << cnt_trailing_zeros(size_mask));
131 /* This routine called with relocation disabled. */
133 lmb_add(unsigned long base, unsigned long size)
135 unsigned long offset = reloc_offset();
136 struct lmb *_lmb = PTRRELOC(&lmb);
137 struct lmb_region *_rgn = &(_lmb->memory);
139 /* On pSeries LPAR systems, the first LMB is our RMO region. */
141 _lmb->rmo_size = size;
143 return lmb_add_region(_rgn, base, size, LMB_MEMORY_AREA);
147 #ifdef CONFIG_MSCHUNKS
148 /* This routine called with relocation disabled. */
150 lmb_add_io(unsigned long base, unsigned long size)
152 unsigned long offset = reloc_offset();
153 struct lmb *_lmb = PTRRELOC(&lmb);
154 struct lmb_region *_rgn = &(_lmb->memory);
156 return lmb_add_region(_rgn, base, size, LMB_IO_AREA);
159 #endif /* CONFIG_MSCHUNKS */
162 lmb_reserve(unsigned long base, unsigned long size)
164 unsigned long offset = reloc_offset();
165 struct lmb *_lmb = PTRRELOC(&lmb);
166 struct lmb_region *_rgn = &(_lmb->reserved);
168 return lmb_add_region(_rgn, base, size, LMB_MEMORY_AREA);
171 /* This routine called with relocation disabled. */
173 lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size,
176 unsigned long i, coalesced = 0;
179 /* First try and coalesce this LMB with another. */
180 for (i=0; i < rgn->cnt ;i++) {
181 unsigned long rgnbase = rgn->region[i].base;
182 unsigned long rgnsize = rgn->region[i].size;
183 unsigned long rgntype = rgn->region[i].type;
185 if ( rgntype != type )
188 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
189 if ( adjacent > 0 ) {
190 rgn->region[i].base -= size;
191 rgn->region[i].physbase -= size;
192 rgn->region[i].size += size;
196 else if ( adjacent < 0 ) {
197 rgn->region[i].size += size;
203 if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
204 lmb_coalesce_regions(rgn, i, i+1);
210 } else if ( rgn->cnt >= MAX_LMB_REGIONS ) {
214 /* Couldn't coalesce the LMB, so add it to the sorted table. */
215 for (i=rgn->cnt-1; i >= 0 ;i--) {
216 if (base < rgn->region[i].base) {
217 rgn->region[i+1].base = rgn->region[i].base;
218 rgn->region[i+1].physbase = rgn->region[i].physbase;
219 rgn->region[i+1].size = rgn->region[i].size;
220 rgn->region[i+1].type = rgn->region[i].type;
222 rgn->region[i+1].base = base;
223 rgn->region[i+1].physbase = lmb_abs_to_phys(base);
224 rgn->region[i+1].size = size;
225 rgn->region[i+1].type = type;
235 lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
239 for (i=0; i < rgn->cnt ;i++) {
240 unsigned long rgnbase = rgn->region[i].base;
241 unsigned long rgnsize = rgn->region[i].size;
242 if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
247 return (i < rgn->cnt) ? i : -1;
251 lmb_alloc(unsigned long size, unsigned long align)
253 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
257 lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
260 unsigned long base = 0;
261 unsigned long offset = reloc_offset();
262 struct lmb *_lmb = PTRRELOC(&lmb);
263 struct lmb_region *_mem = &(_lmb->memory);
264 struct lmb_region *_rsv = &(_lmb->reserved);
266 for (i=_mem->cnt-1; i >= 0 ;i--) {
267 unsigned long lmbbase = _mem->region[i].base;
268 unsigned long lmbsize = _mem->region[i].size;
269 unsigned long lmbtype = _mem->region[i].type;
271 if ( lmbtype != LMB_MEMORY_AREA )
274 if ( max_addr == LMB_ALLOC_ANYWHERE )
275 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
276 else if ( lmbbase < max_addr )
277 base = _ALIGN_DOWN(min(lmbbase+lmbsize,max_addr)-size, align);
281 while ( (lmbbase <= base) &&
282 ((j = lmb_overlaps_region(_rsv,base,size)) >= 0) ) {
283 base = _ALIGN_DOWN(_rsv->region[j].base-size, align);
286 if ( (base != 0) && (lmbbase <= base) )
293 lmb_add_region(_rsv, base, size, LMB_MEMORY_AREA);
299 lmb_phys_mem_size(void)
301 unsigned long offset = reloc_offset();
302 struct lmb *_lmb = PTRRELOC(&lmb);
303 #ifdef CONFIG_MSCHUNKS
304 return _lmb->memory.size;
306 struct lmb_region *_mem = &(_lmb->memory);
307 unsigned long idx = _mem->cnt-1;
308 unsigned long lastbase = _mem->region[idx].physbase;
309 unsigned long lastsize = _mem->region[idx].size;
311 return (lastbase + lastsize);
312 #endif /* CONFIG_MSCHUNKS */
316 lmb_end_of_DRAM(void)
318 unsigned long offset = reloc_offset();
319 struct lmb *_lmb = PTRRELOC(&lmb);
320 struct lmb_region *_mem = &(_lmb->memory);
323 for(idx=_mem->cnt-1; idx >= 0 ;idx--) {
324 if ( _mem->region[idx].type != LMB_MEMORY_AREA )
326 #ifdef CONFIG_MSCHUNKS
327 return (_mem->region[idx].physbase + _mem->region[idx].size);
329 return (_mem->region[idx].base + _mem->region[idx].size);
330 #endif /* CONFIG_MSCHUNKS */
338 lmb_abs_to_phys(unsigned long aa)
340 unsigned long i, pa = aa;
341 unsigned long offset = reloc_offset();
342 struct lmb *_lmb = PTRRELOC(&lmb);
343 struct lmb_region *_mem = &(_lmb->memory);
345 for (i=0; i < _mem->cnt ;i++) {
346 unsigned long lmbbase = _mem->region[i].base;
347 unsigned long lmbsize = _mem->region[i].size;
348 if ( lmb_addrs_overlap(aa,1,lmbbase,lmbsize) ) {
349 pa = _mem->region[i].physbase + (aa - lmbbase);
362 udbg_printf("\nlmb_dump: %s\n", str);
363 udbg_printf(" debug = %s\n",
364 (lmb.debug) ? "TRUE" : "FALSE");
365 udbg_printf(" memory.cnt = %d\n",
367 udbg_printf(" memory.size = 0x%lx\n",
369 udbg_printf(" memory.lcd_size = 0x%lx\n",
370 lmb.memory.lcd_size);
371 for (i=0; i < lmb.memory.cnt ;i++) {
372 udbg_printf(" memory.region[%d].base = 0x%lx\n",
373 i, lmb.memory.region[i].base);
374 udbg_printf(" .physbase = 0x%lx\n",
375 lmb.memory.region[i].physbase);
376 udbg_printf(" .size = 0x%lx\n",
377 lmb.memory.region[i].size);
378 udbg_printf(" .type = 0x%lx\n",
379 lmb.memory.region[i].type);
383 udbg_printf(" reserved.cnt = %d\n",
385 udbg_printf(" reserved.size = 0x%lx\n",
387 udbg_printf(" reserved.lcd_size = 0x%lx\n",
388 lmb.reserved.lcd_size);
389 for (i=0; i < lmb.reserved.cnt ;i++) {
390 udbg_printf(" reserved.region[%d].base = 0x%lx\n",
391 i, lmb.reserved.region[i].base);
392 udbg_printf(" .physbase = 0x%lx\n",
393 lmb.reserved.region[i].physbase);
394 udbg_printf(" .size = 0x%lx\n",
395 lmb.reserved.region[i].size);
396 udbg_printf(" .type = 0x%lx\n",
397 lmb.reserved.region[i].type);