X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=mm%2Fslob.c;h=c6933bc19bcd8b5cc5a6efd50035b99bfd007d47;hb=5351fb106a84d6ac584c2501e3b335093d38a58c;hp=542394184a58e6c825a3b24cc7dfe1ad0e6e4fba;hpb=cdb8355add9b1d87ecfcb58b12879897dc1e3e36;p=powerpc.git diff --git a/mm/slob.c b/mm/slob.c index 542394184a..c6933bc19b 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -21,7 +21,7 @@ * * SLAB is emulated on top of SLOB by simply calling constructors and * destructors for every SLAB allocation. Objects are returned with - * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is + * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is * set, in which case the low-level allocator will fragment blocks to * create the proper alignment. Again, objects of page-size or greater * are allocated by calling __get_free_pages. As SLAB objects know @@ -60,6 +60,8 @@ static DEFINE_SPINLOCK(slob_lock); static DEFINE_SPINLOCK(block_lock); static void slob_free(void *b, int size); +static void slob_timer_cbk(void); + static void *slob_alloc(size_t size, gfp_t gfp, int align) { @@ -148,16 +150,7 @@ static void slob_free(void *block, int size) spin_unlock_irqrestore(&slob_lock, flags); } -static int FASTCALL(find_order(int size)); -static int fastcall find_order(int size) -{ - int order = 0; - for ( ; size > 4096 ; size >>=1) - order++; - return order; -} - -void *kmalloc(size_t size, gfp_t gfp) +void *__kmalloc(size_t size, gfp_t gfp) { slob_t *m; bigblock_t *bb; @@ -172,7 +165,7 @@ void *kmalloc(size_t size, gfp_t gfp) if (!bb) return 0; - bb->order = find_order(size); + bb->order = get_order(size); bb->pages = (void *)__get_free_pages(gfp, bb->order); if (bb->pages) { @@ -186,8 +179,40 @@ void *kmalloc(size_t size, gfp_t gfp) slob_free(bb, sizeof(bigblock_t)); return 0; } +EXPORT_SYMBOL(__kmalloc); + +/** + * krealloc - reallocate memory. The contents will remain unchanged. + * + * @p: object to reallocate memory for. + * @new_size: how many bytes of memory are required. + * @flags: the type of memory to allocate. + * + * The contents of the object pointed to are preserved up to the + * lesser of the new and old sizes. If @p is %NULL, krealloc() + * behaves exactly like kmalloc(). If @size is 0 and @p is not a + * %NULL pointer, the object pointed to is freed. + */ +void *krealloc(const void *p, size_t new_size, gfp_t flags) +{ + void *ret; -EXPORT_SYMBOL(kmalloc); + if (unlikely(!p)) + return kmalloc_track_caller(new_size, flags); + + if (unlikely(!new_size)) { + kfree(p); + return NULL; + } + + ret = kmalloc_track_caller(new_size, flags); + if (ret) { + memcpy(ret, p, min(new_size, ksize(p))); + kfree(p); + } + return ret; +} +EXPORT_SYMBOL(krealloc); void kfree(const void *block) { @@ -218,7 +243,7 @@ void kfree(const void *block) EXPORT_SYMBOL(kfree); -unsigned int ksize(const void *block) +size_t ksize(const void *block) { bigblock_t *bb; unsigned long flags; @@ -261,10 +286,11 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, c->ctor = ctor; c->dtor = dtor; /* ignore alignment unless it's forced */ - c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; + c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; if (c->align < align) c->align = align; - } + } else if (flags & SLAB_PANIC) + panic("Cannot create slab cache %s\n", name); return c; } @@ -283,7 +309,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags) if (c->size < PAGE_SIZE) b = slob_alloc(c->size, flags, c->align); else - b = (void *)__get_free_pages(flags, find_order(c->size)); + b = (void *)__get_free_pages(flags, get_order(c->size)); if (c->ctor) c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR); @@ -310,7 +336,7 @@ void kmem_cache_free(struct kmem_cache *c, void *b) if (c->size < PAGE_SIZE) slob_free(b, c->size); else - free_pages((unsigned long)b, find_order(c->size)); + free_pages((unsigned long)b, get_order(c->size)); } EXPORT_SYMBOL(kmem_cache_free); @@ -327,9 +353,25 @@ const char *kmem_cache_name(struct kmem_cache *c) EXPORT_SYMBOL(kmem_cache_name); static struct timer_list slob_timer = TIMER_INITIALIZER( - (void (*)(unsigned long))kmem_cache_init, 0, 0); + (void (*)(unsigned long))slob_timer_cbk, 0, 0); + +int kmem_cache_shrink(struct kmem_cache *d) +{ + return 0; +} +EXPORT_SYMBOL(kmem_cache_shrink); + +int kmem_ptr_validate(struct kmem_cache *a, const void *b) +{ + return 0; +} + +void __init kmem_cache_init(void) +{ + slob_timer_cbk(); +} -void kmem_cache_init(void) +static void slob_timer_cbk(void) { void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1);