Skip to content

Commit

Permalink
Merge tag 'drm-vmwgfx-coherent-2019-11-29' of git://anongit.freedeskt…
Browse files Browse the repository at this point in the history
…op.org/drm/drm

Pull drm coherent memory support for vmwgfx from Dave Airlie:
 "This is a separate pull for the mm pagewalking + drm/vmwgfx work
  Thomas did and you were involved in, I've left it separate in case you
  don't feel as comfortable with it as the other stuff.

  It has mm acks/r-b in the right places from what I can see"

* tag 'drm-vmwgfx-coherent-2019-11-29' of git://anongit.freedesktop.org/drm/drm:
  drm/vmwgfx: Add surface dirty-tracking callbacks
  drm/vmwgfx: Implement an infrastructure for read-coherent resources
  drm/vmwgfx: Use an RBtree instead of linked list for MOB resources
  drm/vmwgfx: Implement an infrastructure for write-coherent resources
  mm: Add write-protect and clean utilities for address space ranges
  mm: Add a walk_page_mapping() function to the pagewalk code
  mm: pagewalk: Take the pagetable lock in walk_pte_range()
  mm: Remove BUG_ON mmap_sem not held from xxx_trans_huge_lock()
  drm/ttm: Convert vm callbacks to helpers
  drm/ttm: Remove explicit typecasts of vm_private_data
  • Loading branch information
torvalds committed Nov 30, 2019
2 parents 81b6b96 + 0a6cad5 commit d5bb349
Show file tree
Hide file tree
Showing 23 changed files with 1,996 additions and 123 deletions.
174 changes: 107 additions & 67 deletions drivers/gpu/drm/ttm/ttm_bo_vm.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,6 @@
#include <linux/uaccess.h>
#include <linux/mem_encrypt.h>

#define TTM_BO_VM_NUM_PREFAULT 16

static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
Expand Down Expand Up @@ -106,25 +104,30 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ page_offset;
}

static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
/**
* ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
* @bo: The buffer object
* @vmf: The fault structure handed to the callback
*
* vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
* during long waits, and after the wait the callback will be restarted. This
* is to allow other threads using the same virtual memory space concurrent
* access to map(), unmap() completely unrelated buffer objects. TTM buffer
* object reservations sometimes wait for GPU and should therefore be
* considered long waits. This function reserves the buffer object interruptibly
* taking this into account. Starvation is avoided by the vm system not
* allowing too many repeated restarts.
* This function is intended to be used in customized fault() and _mkwrite()
* handlers.
*
* Return:
* 0 on success and the bo was reserved.
* VM_FAULT_RETRY if blocking wait.
* VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
*/
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
unsigned long page_offset;
unsigned long page_last;
unsigned long pfn;
struct ttm_tt *ttm = NULL;
struct page *page;
int err;
int i;
vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
struct vm_area_struct cvma;

/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
Expand All @@ -151,14 +154,54 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}

return 0;
}
EXPORT_SYMBOL(ttm_bo_vm_reserve);

/**
* ttm_bo_vm_fault_reserved - TTM fault helper
* @vmf: The struct vm_fault given as argument to the fault callback
* @prot: The page protection to be used for this memory area.
* @num_prefault: Maximum number of prefault pages. The caller may want to
* specify this based on madvice settings and the size of the GPU object
* backed by the memory.
*
* This function inserts one or more page table entries pointing to the
* memory backing the buffer object, and then returns a return code
* instructing the caller to retry the page access.
*
* Return:
* VM_FAULT_NOPAGE on success or pending signal
* VM_FAULT_SIGBUS on unspecified error
* VM_FAULT_OOM on out-of-memory
* VM_FAULT_RETRY if retryable wait
*/
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
pgoff_t num_prefault)
{
struct vm_area_struct *vma = vmf->vma;
struct vm_area_struct cvma = *vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
unsigned long page_offset;
unsigned long page_last;
unsigned long pfn;
struct ttm_tt *ttm = NULL;
struct page *page;
int err;
pgoff_t i;
vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];

/*
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
return VM_FAULT_SIGBUS;

if (bdev->driver->fault_reserve_notify) {
struct dma_fence *moving = dma_fence_get(bo->moving);
Expand All @@ -169,11 +212,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
break;
case -EBUSY:
case -ERESTARTSYS:
ret = VM_FAULT_NOPAGE;
goto out_unlock;
return VM_FAULT_NOPAGE;
default:
ret = VM_FAULT_SIGBUS;
goto out_unlock;
return VM_FAULT_SIGBUS;
}

if (bo->moving != moving) {
Expand All @@ -189,21 +230,12 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
* move.
*/
ret = ttm_bo_vm_fault_idle(bo, vmf);
if (unlikely(ret != 0)) {
if (ret == VM_FAULT_RETRY &&
!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
/* The BO has already been unreserved. */
return ret;
}

goto out_unlock;
}
if (unlikely(ret != 0))
return ret;

err = ttm_mem_io_lock(man, true);
if (unlikely(err != 0)) {
ret = VM_FAULT_NOPAGE;
goto out_unlock;
}
if (unlikely(err != 0))
return VM_FAULT_NOPAGE;
err = ttm_mem_io_reserve_vm(bo);
if (unlikely(err != 0)) {
ret = VM_FAULT_SIGBUS;
Expand All @@ -220,18 +252,8 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
goto out_io_unlock;
}

/*
* Make a local vma copy to modify the page_prot member
* and vm_flags if necessary. The vma parameter is protected
* by mmap_sem in write mode.
*/
cvma = *vma;
cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);

if (bo->mem.bus.is_iomem) {
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
cvma.vm_page_prot);
} else {
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
if (!bo->mem.bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
Expand All @@ -240,24 +262,21 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
};

ttm = bo->ttm;
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
cvma.vm_page_prot);

/* Allocate all page at once, most common usage */
if (ttm_tt_populate(ttm, &ctx)) {
if (ttm_tt_populate(bo->ttm, &ctx)) {
ret = VM_FAULT_OOM;
goto out_io_unlock;
}
} else {
/* Iomem should not be marked encrypted */
cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
}

/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
for (i = 0; i < num_prefault; ++i) {
if (bo->mem.bus.is_iomem) {
/* Iomem should not be marked encrypted */
cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
} else {
page = ttm->pages[page_offset];
Expand Down Expand Up @@ -293,28 +312,49 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
ret = VM_FAULT_NOPAGE;
out_io_unlock:
ttm_mem_io_unlock(man);
out_unlock:
return ret;
}
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);

static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
pgprot_t prot;
struct ttm_buffer_object *bo = vma->vm_private_data;
vm_fault_t ret;

ret = ttm_bo_vm_reserve(bo, vmf);
if (ret)
return ret;

prot = vm_get_page_prot(vma->vm_flags);
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;

dma_resv_unlock(bo->base.resv);

return ret;
}

static void ttm_bo_vm_open(struct vm_area_struct *vma)
void ttm_bo_vm_open(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo =
(struct ttm_buffer_object *)vma->vm_private_data;
struct ttm_buffer_object *bo = vma->vm_private_data;

WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);

ttm_bo_get(bo);
}
EXPORT_SYMBOL(ttm_bo_vm_open);

static void ttm_bo_vm_close(struct vm_area_struct *vma)
void ttm_bo_vm_close(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
struct ttm_buffer_object *bo = vma->vm_private_data;

ttm_bo_put(bo);
vma->vm_private_data = NULL;
}
EXPORT_SYMBOL(ttm_bo_vm_close);

static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
unsigned long offset,
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/vmwgfx/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ config DRM_VMWGFX
select FB_CFB_IMAGEBLIT
select DRM_TTM
select FB
select MAPPING_DIRTY_HELPERS
# Only needed for the transitional use of drm_crtc_init - can be removed
# again once vmwgfx sets up the primary plane itself.
select DRM_KMS_HELPER
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/vmwgfx/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o \
vmwgfx_validation.o vmwgfx_page_dirty.o \
ttm_object.o ttm_lock.o

obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
Loading

0 comments on commit d5bb349

Please sign in to comment.