X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=include%2Flinux%2Fpagemap.h;h=c3e255bf859487aee64d3de7b5b34bf9d23e002a;hb=d5112a4f31a361409d3c57dc9d58dd69f8014bef;hp=1245df7141aa6c6c1a973ac429ef417fa70488e9;hpb=62ed948cb1405fe95d61d8c6445c102e0c9da0a6;p=powerpc.git diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1245df7141..c3e255bf85 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -52,19 +52,23 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) void release_pages(struct page **pages, int nr, int cold); #ifdef CONFIG_NUMA -extern struct page *page_cache_alloc(struct address_space *x); -extern struct page *page_cache_alloc_cold(struct address_space *x); +extern struct page *__page_cache_alloc(gfp_t gfp); #else +static inline struct page *__page_cache_alloc(gfp_t gfp) +{ + return alloc_pages(gfp, 0); +} +#endif + static inline struct page *page_cache_alloc(struct address_space *x) { - return alloc_pages(mapping_gfp_mask(x), 0); + return __page_cache_alloc(mapping_gfp_mask(x)); } static inline struct page *page_cache_alloc_cold(struct address_space *x) { - return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); + return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); } -#endif typedef int filler_t(void *, struct page *); @@ -113,51 +117,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, extern void remove_from_page_cache(struct page *page); extern void __remove_from_page_cache(struct page *page); -extern atomic_t nr_pagecache; - -#ifdef CONFIG_SMP - -#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) -DECLARE_PER_CPU(long, nr_pagecache_local); - -/* - * pagecache_acct implements approximate accounting for pagecache. - * vm_enough_memory() do not need high accuracy. Writers will keep - * an offset in their per-cpu arena and will spill that into the - * global count whenever the absolute value of the local count - * exceeds the counter's threshold. - * - * MUST be protected from preemption. - * current protection is mapping->page_lock. - */ -static inline void pagecache_acct(int count) -{ - long *local; - - local = &__get_cpu_var(nr_pagecache_local); - *local += count; - if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { - atomic_add(*local, &nr_pagecache); - *local = 0; - } -} - -#else - -static inline void pagecache_acct(int count) -{ - atomic_add(count, &nr_pagecache); -} -#endif - -static inline unsigned long get_page_cache_size(void) -{ - int ret = atomic_read(&nr_pagecache); - if (unlikely(ret < 0)) - ret = 0; - return ret; -} - /* * Return byte-offset into filesystem object for page. */ @@ -175,14 +134,29 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, } extern void FASTCALL(__lock_page(struct page *page)); +extern void FASTCALL(__lock_page_nosync(struct page *page)); extern void FASTCALL(unlock_page(struct page *page)); +/* + * lock_page may only be called if we have the page's inode pinned. + */ static inline void lock_page(struct page *page) { might_sleep(); if (TestSetPageLocked(page)) __lock_page(page); } + +/* + * lock_page_nosync should only be used if we can't pin the page's inode. + * Doesn't play quite so well with block device plugging. + */ +static inline void lock_page_nosync(struct page *page) +{ + might_sleep(); + if (TestSetPageLocked(page)) + __lock_page_nosync(page); +} /* * This is exported only for wait_on_page_locked/wait_on_page_writeback.