~ubuntu-branches/ubuntu/quantal/linux-linaro-vexpress/quantal

« back to all changes in this revision

Viewing changes to mm/huge_memory.c

  • Committer: Bazaar Package Importer
  • Author(s): John Rigby, John Rigby
  • Date: 2011-03-18 07:36:33 UTC
  • mfrom: (5.1.1 upstream)
  • Revision ID: james.westby@ubuntu.com-20110318073633-tqfe9391ct71zb2p
Tags: 2.6.38-1001.2
[ John Rigby ]

* Rebase to new upstreams:
  Linux v2.6.38-rc6 -- same
  linaro-linux-2.6.38-upstream-1Mar2011 -- new
  Ubuntu-2.6.38-5.32 -- same
  - LP: #724377
* Enable CONFIG_THUMB2_KERNEL for OMAP[34]
* Bump ABI
* Rebase to new upstreams:
  Linux v2.6.38-rc7
  linaro-linux-2.6.38-upstream-4Mar2011
  ubuntu-natty master-next as of 4Mar2011
* Re-enable display on OMAP4
* Disable CONFIG_OMAP2_DSS_SDI
  - LP: #728603
  - LP: #720055
* Rebase to new upstreams:
  Linux v2.6.38-rc8
  linaro-linux-2.6.38-upstream-9Mar2011
    rebased to 2.6.38-rc8
* Remove generated file kernel-versions and sort kernel-versions.in
* Enable CONFIG_TIMER_STATS
  - LP: #718677
* Rebase to new upstreams:
  Linux v2.6.38 final
  linaro-linux-2.6.38-upstream-16Mar2011
  - LP: #708883
  - LP: #723159
  ubuntu-natty Ubuntu-2.6.38-7.35
* Enable CONFIG_IP_PNP and CONFIG_ROOT_NFS for all flavours
  - LP: #736429
* mach-ux500: fix build error
  workaround a problem in linux-linaro-2.6.38
* OMAP4:Fix -EINVAL for vana, vcxio, vdac
  from omap-linux mailing list pending ack
* turn off ROOT_NFS for mx51
  it makes the kernel too large to boot with current hwpack settings

Show diffs side-by-side

added added

removed removed

Lines of Context:
650
650
 
651
651
static inline struct page *alloc_hugepage_vma(int defrag,
652
652
                                              struct vm_area_struct *vma,
653
 
                                              unsigned long haddr)
 
653
                                              unsigned long haddr, int nd)
654
654
{
655
655
        return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
656
 
                               HPAGE_PMD_ORDER, vma, haddr);
 
656
                               HPAGE_PMD_ORDER, vma, haddr, nd);
657
657
}
658
658
 
659
659
#ifndef CONFIG_NUMA
678
678
                if (unlikely(khugepaged_enter(vma)))
679
679
                        return VM_FAULT_OOM;
680
680
                page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
681
 
                                          vma, haddr);
 
681
                                          vma, haddr, numa_node_id());
682
682
                if (unlikely(!page))
683
683
                        goto out;
684
684
                if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
799
799
        }
800
800
 
801
801
        for (i = 0; i < HPAGE_PMD_NR; i++) {
802
 
                pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
803
 
                                          vma, address);
 
802
                pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE,
 
803
                                               vma, address, page_to_nid(page));
804
804
                if (unlikely(!pages[i] ||
805
805
                             mem_cgroup_newpage_charge(pages[i], mm,
806
806
                                                       GFP_KERNEL))) {
902
902
        if (transparent_hugepage_enabled(vma) &&
903
903
            !transparent_hugepage_debug_cow())
904
904
                new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
905
 
                                              vma, haddr);
 
905
                                              vma, haddr, numa_node_id());
906
906
        else
907
907
                new_page = NULL;
908
908
 
1745
1745
static void collapse_huge_page(struct mm_struct *mm,
1746
1746
                               unsigned long address,
1747
1747
                               struct page **hpage,
1748
 
                               struct vm_area_struct *vma)
 
1748
                               struct vm_area_struct *vma,
 
1749
                               int node)
1749
1750
{
1750
1751
        pgd_t *pgd;
1751
1752
        pud_t *pud;
1761
1762
#ifndef CONFIG_NUMA
1762
1763
        VM_BUG_ON(!*hpage);
1763
1764
        new_page = *hpage;
 
1765
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
 
1766
                up_read(&mm->mmap_sem);
 
1767
                return;
 
1768
        }
1764
1769
#else
1765
1770
        VM_BUG_ON(*hpage);
1766
1771
        /*
1773
1778
         * mmap_sem in read mode is good idea also to allow greater
1774
1779
         * scalability.
1775
1780
         */
1776
 
        new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address);
 
1781
        new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
 
1782
                                      node);
1777
1783
        if (unlikely(!new_page)) {
1778
1784
                up_read(&mm->mmap_sem);
1779
1785
                *hpage = ERR_PTR(-ENOMEM);
1780
1786
                return;
1781
1787
        }
1782
 
#endif
1783
1788
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1784
1789
                up_read(&mm->mmap_sem);
1785
1790
                put_page(new_page);
1786
1791
                return;
1787
1792
        }
 
1793
#endif
1788
1794
 
1789
1795
        /* after allocating the hugepage upgrade to mmap_sem write mode */
1790
1796
        up_read(&mm->mmap_sem);
1919
1925
        struct page *page;
1920
1926
        unsigned long _address;
1921
1927
        spinlock_t *ptl;
 
1928
        int node = -1;
1922
1929
 
1923
1930
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1924
1931
 
1949
1956
                page = vm_normal_page(vma, _address, pteval);
1950
1957
                if (unlikely(!page))
1951
1958
                        goto out_unmap;
 
1959
                /*
 
1960
                 * Chose the node of the first page. This could
 
1961
                 * be more sophisticated and look at more pages,
 
1962
                 * but isn't for now.
 
1963
                 */
 
1964
                if (node == -1)
 
1965
                        node = page_to_nid(page);
1952
1966
                VM_BUG_ON(PageCompound(page));
1953
1967
                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
1954
1968
                        goto out_unmap;
1965
1979
        pte_unmap_unlock(pte, ptl);
1966
1980
        if (ret)
1967
1981
                /* collapse_huge_page will return with the mmap_sem released */
1968
 
                collapse_huge_page(mm, address, hpage, vma);
 
1982
                collapse_huge_page(mm, address, hpage, vma, node);
1969
1983
out:
1970
1984
        return ret;
1971
1985
}