mm/sparse: don't panic if the allocation in sparse_buffer_init fails
[linux] / mm / huge_memory.c
index faf357e..d484702 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/page_idle.h>
 #include <linux/shmem_fs.h>
 #include <linux/oom.h>
+#include <linux/numa.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -616,6 +617,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
                mm_inc_nr_ptes(vma->vm_mm);
                spin_unlock(vmf->ptl);
                count_vm_event(THP_FAULT_ALLOC);
+               count_memcg_events(memcg, THP_FAULT_ALLOC, 1);
        }
 
        return 0;
@@ -1337,6 +1339,7 @@ alloc:
        }
 
        count_vm_event(THP_FAULT_ALLOC);
+       count_memcg_events(memcg, THP_FAULT_ALLOC, 1);
 
        if (!page)
                clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR);
@@ -1475,7 +1478,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
        struct anon_vma *anon_vma = NULL;
        struct page *page;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
-       int page_nid = -1, this_nid = numa_node_id();
+       int page_nid = NUMA_NO_NODE, this_nid = numa_node_id();
        int target_nid, last_cpupid = -1;
        bool page_locked;
        bool migrated = false;
@@ -1520,7 +1523,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
         */
        page_locked = trylock_page(page);
        target_nid = mpol_misplaced(page, vma, haddr);
-       if (target_nid == -1) {
+       if (target_nid == NUMA_NO_NODE) {
                /* If the page was locked, there are no parallel migrations */
                if (page_locked)
                        goto clear_pmdnuma;
@@ -1528,7 +1531,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
 
        /* Migration could have started since the pmd_trans_migrating check */
        if (!page_locked) {
-               page_nid = -1;
+               page_nid = NUMA_NO_NODE;
                if (!get_page_unless_zero(page))
                        goto out_unlock;
                spin_unlock(vmf->ptl);
@@ -1549,14 +1552,14 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
        if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
                unlock_page(page);
                put_page(page);
-               page_nid = -1;
+               page_nid = NUMA_NO_NODE;
                goto out_unlock;
        }
 
        /* Bail if we fail to protect against THP splits for any reason */
        if (unlikely(!anon_vma)) {
                put_page(page);
-               page_nid = -1;
+               page_nid = NUMA_NO_NODE;
                goto clear_pmdnuma;
        }
 
@@ -1618,7 +1621,7 @@ out:
        if (anon_vma)
                page_unlock_anon_vma_read(anon_vma);
 
-       if (page_nid != -1)
+       if (page_nid != NUMA_NO_NODE)
                task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
                                flags);
 
@@ -2886,12 +2889,8 @@ DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
 
 static int __init split_huge_pages_debugfs(void)
 {
-       void *ret;
-
-       ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
-                       &split_huge_pages_fops);
-       if (!ret)
-               pr_warn("Failed to create split_huge_pages in debugfs");
+       debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
+                           &split_huge_pages_fops);
        return 0;
 }
 late_initcall(split_huge_pages_debugfs);