projects
/
powerpc.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
[POWERPC] Update documentation for flat device tree format v17
[powerpc.git]
/
mm
/
slab.c
diff --git
a/mm/slab.c
b/mm/slab.c
index
b595323
..
57f7aa4
100644
(file)
--- a/
mm/slab.c
+++ b/
mm/slab.c
@@
-793,8
+793,10
@@
static inline struct kmem_cache *__find_general_cachep(size_t size,
* has cs_{dma,}cachep==NULL. Thus no special case
* for large kmalloc calls required.
*/
* has cs_{dma,}cachep==NULL. Thus no special case
* for large kmalloc calls required.
*/
+#ifdef CONFIG_ZONE_DMA
if (unlikely(gfpflags & GFP_DMA))
return csizep->cs_dmacachep;
if (unlikely(gfpflags & GFP_DMA))
return csizep->cs_dmacachep;
+#endif
return csizep->cs_cachep;
}
return csizep->cs_cachep;
}
@@
-1040,7
+1042,7
@@
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
static struct array_cache **alloc_alien_cache(int node, int limit)
{
struct array_cache **ac_ptr;
static struct array_cache **alloc_alien_cache(int node, int limit)
{
struct array_cache **ac_ptr;
- int memsize = sizeof(void *) *
MAX_NUMNODES
;
+ int memsize = sizeof(void *) *
nr_node_ids
;
int i;
if (limit > 1)
int i;
if (limit > 1)
@@
-1493,13
+1495,15
@@
void __init kmem_cache_init(void)
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL);
}
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL);
}
-
- sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
+#ifdef CONFIG_ZONE_DMA
+ sizes->cs_dmacachep = kmem_cache_create(
+ names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC,
NULL, NULL);
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC,
NULL, NULL);
+#endif
sizes++;
names++;
}
sizes++;
names++;
}
@@
-2321,7
+2325,7
@@
kmem_cache_create (const char *name, size_t size, size_t align,
cachep->slab_size = slab_size;
cachep->flags = flags;
cachep->gfpflags = 0;
cachep->slab_size = slab_size;
cachep->flags = flags;
cachep->gfpflags = 0;
- if (
flags & SLAB_CACHE_DMA
)
+ if (
CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)
)
cachep->gfpflags |= GFP_DMA;
cachep->buffer_size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
cachep->gfpflags |= GFP_DMA;
cachep->buffer_size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
@@
-2516,7
+2520,7
@@
EXPORT_SYMBOL(kmem_cache_shrink);
* kmem_cache_destroy - delete a cache
* @cachep: the cache to destroy
*
* kmem_cache_destroy - delete a cache
* @cachep: the cache to destroy
*
- * Remove a struct kmem_cache object from the slab cache.
+ * Remove a
&
struct kmem_cache object from the slab cache.
*
* It is expected this function will be called by a module when it is
* unloaded. This will remove the cache completely, and avoid a duplicate
*
* It is expected this function will be called by a module when it is
* unloaded. This will remove the cache completely, and avoid a duplicate
@@
-2643,10
+2647,12
@@
static void cache_init_objs(struct kmem_cache *cachep,
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{
- if (flags & GFP_DMA)
- BUG_ON(!(cachep->gfpflags & GFP_DMA));
- else
- BUG_ON(cachep->gfpflags & GFP_DMA);
+ if (CONFIG_ZONE_DMA_FLAG) {
+ if (flags & GFP_DMA)
+ BUG_ON(!(cachep->gfpflags & GFP_DMA));
+ else
+ BUG_ON(cachep->gfpflags & GFP_DMA);
+ }
}
static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
}
static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
@@
-3745,6
+3751,7
@@
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
BUG_ON(virt_to_cache(objp) != cachep);
local_irq_save(flags);
BUG_ON(virt_to_cache(objp) != cachep);
local_irq_save(flags);
+ debug_check_no_locks_freed(objp, obj_size(cachep));
__cache_free(cachep, objp);
local_irq_restore(flags);
}
__cache_free(cachep, objp);
local_irq_restore(flags);
}
@@
-4019,7
+4026,7
@@
void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
/**
* cache_reap - Reclaim memory from caches.
/**
* cache_reap - Reclaim memory from caches.
- * @
unused: unused paramete
r
+ * @
w: work descripto
r
*
* Called from workqueue/eventd every few seconds.
* Purpose:
*
* Called from workqueue/eventd every few seconds.
* Purpose:
@@
-4029,18
+4036,17
@@
void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
* If we cannot acquire the cache chain mutex then just give up - we'll try
* again on the next iteration.
*/
* If we cannot acquire the cache chain mutex then just give up - we'll try
* again on the next iteration.
*/
-static void cache_reap(struct work_struct *
unused
)
+static void cache_reap(struct work_struct *
w
)
{
struct kmem_cache *searchp;
struct kmem_list3 *l3;
int node = numa_node_id();
{
struct kmem_cache *searchp;
struct kmem_list3 *l3;
int node = numa_node_id();
+ struct delayed_work *work =
+ container_of(w, struct delayed_work, work);
- if (!mutex_trylock(&cache_chain_mutex))
{
+ if (!mutex_trylock(&cache_chain_mutex))
/* Give up. Setup the next iteration. */
/* Give up. Setup the next iteration. */
- schedule_delayed_work(&__get_cpu_var(reap_work),
- round_jiffies_relative(REAPTIMEOUT_CPUC));
- return;
- }
+ goto out;
list_for_each_entry(searchp, &cache_chain, next) {
check_irq_on();
list_for_each_entry(searchp, &cache_chain, next) {
check_irq_on();
@@
-4083,9
+4089,9
@@
next:
mutex_unlock(&cache_chain_mutex);
next_reap_node();
refresh_cpu_vm_stats(smp_processor_id());
mutex_unlock(&cache_chain_mutex);
next_reap_node();
refresh_cpu_vm_stats(smp_processor_id());
+out:
/* Set up the next iteration */
/* Set up the next iteration */
- schedule_delayed_work(&__get_cpu_var(reap_work),
- round_jiffies_relative(REAPTIMEOUT_CPUC));
+ schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
}
#ifdef CONFIG_PROC_FS
}
#ifdef CONFIG_PROC_FS