Skip to content

Commit

Permalink
[PATCH] add vm_insert_pfn()
Browse files Browse the repository at this point in the history
Add a vm_insert_pfn helper, so that ->fault handlers can have nopfn
functionality by installing their own pte and returning NULL.

Signed-off-by: Nick Piggin <[email protected]>
Signed-off-by: Benjamin Herrenschmidt <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed Feb 12, 2007
1 parent 2ca48ed commit e0dc0d8
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 0 deletions.
2 changes: 2 additions & 0 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1124,6 +1124,8 @@ unsigned long vmalloc_to_pfn(void *addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);

struct page *follow_page(struct vm_area_struct *, unsigned long address,
unsigned int foll_flags);
Expand Down
45 changes: 45 additions & 0 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1277,6 +1277,51 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
}
EXPORT_SYMBOL(vm_insert_page);

/**
* vm_insert_pfn - insert single pfn into user vma
* @vma: user vma to map to
* @addr: target user address of this page
* @pfn: source kernel pfn
*
* Similar to vm_inert_page, this allows drivers to insert individual pages
* they've allocated into a user vma. Same comments apply.
*
* This function should only be called from a vm_ops->fault handler, and
* in that case the handler should return NULL.
*/
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
struct mm_struct *mm = vma->vm_mm;
int retval;
pte_t *pte, entry;
spinlock_t *ptl;

BUG_ON(!(vma->vm_flags & VM_PFNMAP));
BUG_ON(is_cow_mapping(vma->vm_flags));

retval = -ENOMEM;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
retval = -EBUSY;
if (!pte_none(*pte))
goto out_unlock;

/* Ok, finally just insert the thing.. */
entry = pfn_pte(pfn, vma->vm_page_prot);
set_pte_at(mm, addr, pte, entry);
update_mmu_cache(vma, addr, entry);

retval = 0;
out_unlock:
pte_unmap_unlock(pte, ptl);

out:
return retval;
}
EXPORT_SYMBOL(vm_insert_pfn);

/*
* maps a range of physical memory into the requested pages. the old
* mappings are removed. any references to nonexistent pages results
Expand Down

0 comments on commit e0dc0d8

Please sign in to comment.