hugetlb: introduce generic version of prepare_hugepage_range
authorAlexandre Ghiti <alex@ghiti.fr>
Fri, 26 Oct 2018 22:08:31 +0000 (15:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Oct 2018 23:26:34 +0000 (16:26 -0700)
arm, arm64, powerpc, sparc, x86 architectures use the same version of
prepare_hugepage_range, so move this generic implementation into
asm-generic/hugetlb.h.

Link: http://lkml.kernel.org/r/20180920060358.16606-9-alex@ghiti.fr
Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
Reviewed-by: Luiz Capitulino <lcapitulino@redhat.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Tested-by: Helge Deller <deller@gmx.de> [parisc]
Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64]
Acked-by: Paul Burton <paul.burton@mips.com> [MIPS]
Acked-by: Ingo Molnar <mingo@kernel.org> [x86]
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James E.J. Bottomley <jejb@parisc-linux.org>
Cc: James Hogan <jhogan@kernel.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/arm/include/asm/hugetlb.h
arch/arm64/include/asm/hugetlb.h
arch/ia64/include/asm/hugetlb.h
arch/mips/include/asm/hugetlb.h
arch/parisc/include/asm/hugetlb.h
arch/powerpc/include/asm/hugetlb.h
arch/sh/include/asm/hugetlb.h
arch/sparc/include/asm/hugetlb.h
arch/x86/include/asm/hugetlb.h
include/asm-generic/hugetlb.h

index 9ca1422..3fcef21 100644 (file)
@@ -33,17 +33,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
        return 0;
 }
 
-static inline int prepare_hugepage_range(struct file *file,
-                                        unsigned long addr, unsigned long len)
-{
-       struct hstate *h = hstate_file(file);
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (addr & ~huge_page_mask(h))
-               return -EINVAL;
-       return 0;
-}
-
 static inline void arch_clear_hugepage_flags(struct page *page)
 {
        clear_bit(PG_dcache_clean, &page->flags);
index 1fd64eb..3e7f6e6 100644 (file)
@@ -31,17 +31,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
        return 0;
 }
 
-static inline int prepare_hugepage_range(struct file *file,
-                                        unsigned long addr, unsigned long len)
-{
-       struct hstate *h = hstate_file(file);
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (addr & ~huge_page_mask(h))
-               return -EINVAL;
-       return 0;
-}
-
 static inline void arch_clear_hugepage_flags(struct page *page)
 {
        clear_bit(PG_dcache_clean, &page->flags);
index 82fe3d7..cbe2962 100644 (file)
@@ -9,6 +9,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
                            unsigned long end, unsigned long floor,
                            unsigned long ceiling);
 
+#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
 int prepare_hugepage_range(struct file *file,
                        unsigned long addr, unsigned long len);
 
index b3d6bb5..6ff2531 100644 (file)
@@ -18,6 +18,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
        return 0;
 }
 
+#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
 static inline int prepare_hugepage_range(struct file *file,
                                         unsigned long addr,
                                         unsigned long len)
index 5a102d7..fb7e0fd 100644 (file)
@@ -22,6 +22,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
  * If the arch doesn't supply something else, assume that hugepage
  * size aligned regions are ok without further preparation.
  */
+#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
 static inline int prepare_hugepage_range(struct file *file,
                        unsigned long addr, unsigned long len)
 {
index b5b57b3..2a90f38 100644 (file)
@@ -114,21 +114,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
                            unsigned long end, unsigned long floor,
                            unsigned long ceiling);
 
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-static inline int prepare_hugepage_range(struct file *file,
-                       unsigned long addr, unsigned long len)
-{
-       struct hstate *h = hstate_file(file);
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (addr & ~huge_page_mask(h))
-               return -EINVAL;
-       return 0;
-}
-
 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
                                            unsigned long addr, pte_t *ptep)
index 54f6509..f1bbd25 100644 (file)
@@ -15,6 +15,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
  * If the arch doesn't supply something else, assume that hugepage
  * size aligned regions are ok without further preparation.
  */
+#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
 static inline int prepare_hugepage_range(struct file *file,
                        unsigned long addr, unsigned long len)
 {
index f661362..2101ea2 100644 (file)
@@ -26,22 +26,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
        return 0;
 }
 
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-static inline int prepare_hugepage_range(struct file *file,
-                       unsigned long addr, unsigned long len)
-{
-       struct hstate *h = hstate_file(file);
-
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (addr & ~huge_page_mask(h))
-               return -EINVAL;
-       return 0;
-}
-
 #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
                                         unsigned long addr, pte_t *ptep)
index 3cd3a2c..59c056a 100644 (file)
@@ -13,21 +13,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
        return 0;
 }
 
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-static inline int prepare_hugepage_range(struct file *file,
-                       unsigned long addr, unsigned long len)
-{
-       struct hstate *h = hstate_file(file);
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (addr & ~huge_page_mask(h))
-               return -EINVAL;
-       return 0;
-}
-
 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
                                           unsigned long addr, pte_t *ptep)
 {
index cd96976..6c0c8b0 100644 (file)
@@ -87,4 +87,19 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
 }
 #endif
 
+#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
+static inline int prepare_hugepage_range(struct file *file,
+               unsigned long addr, unsigned long len)
+{
+       struct hstate *h = hstate_file(file);
+
+       if (len & ~huge_page_mask(h))
+               return -EINVAL;
+       if (addr & ~huge_page_mask(h))
+               return -EINVAL;
+
+       return 0;
+}
+#endif
+
 #endif /* _ASM_GENERIC_HUGETLB_H */