From: venkatesh.pallipadi@intel.com Date: Sat, 10 Jan 2009 00:13:11 +0000 (-0800) Subject: x86 PAT: change track_pfn_vma_new to take pgprot_t pointer param X-Git-Tag: v2.6.29-rc2~84^2~2 X-Git-Url: https://openfabrics.org/gitweb/?a=commitdiff_plain;h=e4b866ed197cef9989348e0479fed8d864ea465b;p=~shefty%2Frdma-dev.git x86 PAT: change track_pfn_vma_new to take pgprot_t pointer param Impact: cleanup Change the protection parameter for track_pfn_vma_new() into a pgprot_t pointer. Subsequent patch changes the x86 PAT handling to return a compatible memtype in pgprot_t, if what was requested cannot be allowed due to conflicts. No fuctionality change in this patch. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 85cbd3cd372..f88ac80530c 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -741,7 +741,7 @@ cleanup_ret: * Note that this function can be called with caller trying to map only a * subrange/page inside the vma. */ -int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, +int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size) { int retval = 0; @@ -758,14 +758,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, if (is_linear_pfn_mapping(vma)) { /* reserve the whole chunk starting from vm_pgoff */ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; - return reserve_pfn_range(paddr, vma_size, prot); + return reserve_pfn_range(paddr, vma_size, *prot); } /* reserve page by page using pfn and size */ base_paddr = (resource_size_t)pfn << PAGE_SHIFT; for (i = 0; i < size; i += PAGE_SIZE) { paddr = base_paddr + i; - retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); + retval = reserve_pfn_range(paddr, PAGE_SIZE, *prot); if (retval) goto cleanup_ret; } diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 72ebe91005a..8e6d0ca70ab 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -301,7 +301,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, * track_pfn_vma_new is called when a _new_ pfn mapping is being established * for physical range indicated by pfn and size. */ -static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, +static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size) { return 0; @@ -332,7 +332,7 @@ static inline void untrack_pfn_vma(struct vm_area_struct *vma, { } #else -extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, +extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size); extern int track_pfn_vma_copy(struct vm_area_struct *vma); extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, diff --git a/mm/memory.c b/mm/memory.c index d3ee2ea5615..22bfa7a47a0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1511,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { int ret; + pgprot_t pgprot = vma->vm_page_prot; /* * Technically, architectures with pte_special can avoid all these * restrictions (same for remap_pfn_range). However we would like @@ -1525,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; - if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE)) + if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE)) return -EINVAL; - ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot); + ret = insert_pfn(vma, addr, pfn, pgprot); if (ret) untrack_pfn_vma(vma, pfn, PAGE_SIZE); @@ -1671,7 +1672,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; - err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size)); + err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size)); if (err) { /* * To indicate that track_pfn related cleanup is not