X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=include%2Flinux%2Fmmzone.h;h=2f1544e83042af69d45862f216781de0fe22d9fe;hb=18d8362d517cb2bd97761294924fe6c2a6ee5e3c;hp=e339a7345f25ed88cce27e5e50544aa085a4a465;hpb=463e7c7cf9aaf95dd05e97e1a47854fdf5454cdc;p=powerpc.git diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e339a7345f..2f1544e830 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -47,15 +47,20 @@ struct zone_padding { #endif enum zone_stat_item { + /* First 128 byte cacheline (assuming 64 bit words) */ + NR_FREE_PAGES, + NR_INACTIVE, + NR_ACTIVE, NR_ANON_PAGES, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. only modified from process context */ NR_FILE_PAGES, - NR_SLAB_RECLAIMABLE, - NR_SLAB_UNRECLAIMABLE, - NR_PAGETABLE, /* used for pagetables */ NR_FILE_DIRTY, NR_WRITEBACK, + /* Second 128 byte cacheline */ + NR_SLAB_RECLAIMABLE, + NR_SLAB_UNRECLAIMABLE, + NR_PAGETABLE, /* used for pagetables */ NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_BOUNCE, NR_VMSCAN_WRITE, @@ -91,6 +96,7 @@ struct per_cpu_pageset { #endif enum zone_type { +#ifdef CONFIG_ZONE_DMA /* * ZONE_DMA is used when there are devices that are not able * to do DMA to all of addressable memory (ZONE_NORMAL). Then we @@ -111,6 +117,7 @@ enum zone_type { * <16M. */ ZONE_DMA, +#endif #ifdef CONFIG_ZONE_DMA32 /* * x86_64 needs two ZONE_DMAs because it supports devices that are @@ -147,15 +154,30 @@ enum zone_type { * match the requested limits. See gfp_zone() in include/linux/gfp.h */ -#if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM) +/* + * Count the active zones. Note that the use of defined(X) outside + * #if and family is not necessarily defined so ensure we cannot use + * it later. Use __ZONE_COUNT to work out how many shift bits we need. + */ +#define __ZONE_COUNT ( \ + defined(CONFIG_ZONE_DMA) \ + + defined(CONFIG_ZONE_DMA32) \ + + 1 \ + + defined(CONFIG_HIGHMEM) \ +) +#if __ZONE_COUNT < 2 +#define ZONES_SHIFT 0 +#elif __ZONE_COUNT <= 2 #define ZONES_SHIFT 1 -#else +#elif __ZONE_COUNT <= 4 #define ZONES_SHIFT 2 +#else +#error ZONES_SHIFT -- too many zones configured adjust calculation #endif +#undef __ZONE_COUNT struct zone { /* Fields commonly accessed by the page allocator */ - unsigned long free_pages; unsigned long pages_min, pages_low, pages_high; /* * We don't know if the memory that we're going to allocate will be freeable @@ -197,8 +219,6 @@ struct zone { struct list_head inactive_list; unsigned long nr_scan_active; unsigned long nr_scan_inactive; - unsigned long nr_active; - unsigned long nr_inactive; unsigned long pages_scanned; /* since last reclaim */ int all_unreclaimable; /* All pages pinned */ @@ -442,17 +462,19 @@ typedef struct pglist_data { #include -void __get_zone_counts(unsigned long *active, unsigned long *inactive, - unsigned long *free, struct pglist_data *pgdat); void get_zone_counts(unsigned long *active, unsigned long *inactive, unsigned long *free); void build_all_zonelists(void); void wakeup_kswapd(struct zone *zone, int order); int zone_watermark_ok(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags); - +enum memmap_context { + MEMMAP_EARLY, + MEMMAP_HOTPLUG, +}; extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, - unsigned long size); + unsigned long size, + enum memmap_context context); #ifdef CONFIG_HAVE_MEMORY_PRESENT void memory_present(int nid, unsigned long start, unsigned long end); @@ -519,7 +541,11 @@ static inline int is_dma32(struct zone *zone) static inline int is_dma(struct zone *zone) { +#ifdef CONFIG_ZONE_DMA return zone == zone->zone_pgdat->node_zones + ZONE_DMA; +#else + return 0; +#endif } /* These two functions are used to setup the per zone pages min values */ @@ -758,6 +784,18 @@ void sparse_init(void); void memory_present(int nid, unsigned long start, unsigned long end); unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); +/* + * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we + * need to check pfn validility within that MAX_ORDER_NR_PAGES block. + * pfn_valid_within() should be used in this case; we optimise this away + * when we have no holes within a MAX_ORDER_NR_PAGES block. + */ +#ifdef CONFIG_HOLES_IN_ZONE +#define pfn_valid_within(pfn) pfn_valid(pfn) +#else +#define pfn_valid_within(pfn) (1) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _LINUX_MMZONE_H */