Skip to content

Commit

Permalink
drm/gem: Use struct dma_buf_map in GEM vmap ops and convert GEM backends
Browse files Browse the repository at this point in the history
This patch replaces the vmap/vunmap's use of raw pointers in GEM object
functions with instances of struct dma_buf_map. GEM backends are
converted as well. For most of them, this simply changes the returned type.

TTM-based drivers now return information about the location of the memory,
either system or I/O memory. GEM VRAM helpers and qxl now use ttm_bo_vmap()
et al. Amdgpu, nouveau and radeon use drm_gem_ttm_vmap() et al instead of
implementing their own vmap callbacks.

v7:
	* init QXL cursor to mapped BO buffer (kernel test robot)
v5:
	* update vkms after switch to shmem
v4:
	* use ttm_bo_vmap(), drm_gem_ttm_vmap(), et al. (Daniel, Christian)
	* fix a trailing { in drm_gem_vmap()
	* remove several empty functions instead of converting them (Daniel)
	* comment uses of raw pointers with a TODO (Daniel)
	* TODO list: convert more helpers to use struct dma_buf_map

Signed-off-by: Thomas Zimmermann <[email protected]>
Acked-by: Christian König <[email protected]>
Tested-by: Sam Ravnborg <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
  • Loading branch information
Thomas Zimmermann committed Nov 9, 2020
1 parent 4367660 commit 49a3f51
Show file tree
Hide file tree
Showing 49 changed files with 351 additions and 308 deletions.
18 changes: 18 additions & 0 deletions Documentation/gpu/todo.rst
Original file line number Diff line number Diff line change
Expand Up @@ -450,6 +450,24 @@ Contact: Ville Syrjälä, Daniel Vetter

Level: Intermediate

Use struct dma_buf_map throughout codebase
------------------------------------------

Pointers to shared device memory are stored in struct dma_buf_map. Each
instance knows whether it refers to system or I/O memory. Most of the DRM-wide
interface have been converted to use struct dma_buf_map, but implementations
often still use raw pointers.

The task is to use struct dma_buf_map where it makes sense.

* Memory managers should use struct dma_buf_map for dma-buf-imported buffers.
* TTM might benefit from using struct dma_buf_map internally.
* Framebuffer copying and blitting helpers should operate on struct dma_buf_map.

Contact: Thomas Zimmermann <[email protected]>, Christian König, Daniel Vetter

Level: Intermediate


Core refactorings
=================
Expand Down
2 changes: 2 additions & 0 deletions drivers/gpu/drm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,7 @@ config DRM_RADEON
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
Expand All @@ -252,6 +253,7 @@ config DRM_AMDGPU
select DRM_KMS_HELPER
select DRM_SCHED
select DRM_TTM
select DRM_TTM_HELPER
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
Expand Down
36 changes: 0 additions & 36 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,42 +41,6 @@
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>

/**
* amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
* @obj: GEM BO
*
* Sets up an in-kernel virtual mapping of the BO's memory.
*
* Returns:
* The virtual address of the mapping or an error pointer.
*/
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int ret;

ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
&bo->dma_buf_vmap);
if (ret)
return ERR_PTR(ret);

return bo->dma_buf_vmap.virtual;
}

/**
* amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
* @obj: GEM BO
* @vaddr: Virtual address (unused)
*
* Tears down the in-kernel virtual mapping of the BO's memory.
*/
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);

ttm_bo_kunmap(&bo->dma_buf_vmap);
}

/**
* amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
* @obj: GEM BO
Expand Down
2 changes: 0 additions & 2 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);

Expand Down
5 changes: 3 additions & 2 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@

#include <drm/amdgpu_drm.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_gem_ttm_helper.h>

#include "amdgpu.h"
#include "amdgpu_display.h"
Expand Down Expand Up @@ -220,8 +221,8 @@ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
.open = amdgpu_gem_object_open,
.close = amdgpu_gem_object_close,
.export = amdgpu_gem_prime_export,
.vmap = amdgpu_gem_prime_vmap,
.vunmap = amdgpu_gem_prime_vunmap,
.vmap = drm_gem_ttm_vmap,
.vunmap = drm_gem_ttm_vunmap,
};

/*
Expand Down
1 change: 0 additions & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,6 @@ struct amdgpu_bo {
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;

struct ttm_bo_kmap_obj dma_buf_vmap;
struct amdgpu_mn *mn;


Expand Down
27 changes: 13 additions & 14 deletions drivers/gpu/drm/ast/ast_cursor.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ static void ast_cursor_fini(struct ast_private *ast)

for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
gbo = ast->cursor.gbo[i];
drm_gem_vram_vunmap(gbo, ast->cursor.vaddr[i]);
drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
Expand All @@ -60,7 +60,7 @@ int ast_cursor_init(struct ast_private *ast)
struct drm_device *dev = &ast->base;
size_t size, i;
struct drm_gem_vram_object *gbo;
void __iomem *vaddr;
struct dma_buf_map map;
int ret;

size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
Expand All @@ -77,16 +77,15 @@ int ast_cursor_init(struct ast_private *ast)
drm_gem_vram_put(gbo);
goto err_drm_gem_vram_put;
}
vaddr = drm_gem_vram_vmap(gbo);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
ret = drm_gem_vram_vmap(gbo, &map);
if (ret) {
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
goto err_drm_gem_vram_put;
}

ast->cursor.gbo[i] = gbo;
ast->cursor.vaddr[i] = vaddr;
ast->cursor.map[i] = map;
}

return drmm_add_action_or_reset(dev, ast_cursor_release, NULL);
Expand All @@ -95,7 +94,7 @@ int ast_cursor_init(struct ast_private *ast)
while (i) {
--i;
gbo = ast->cursor.gbo[i];
drm_gem_vram_vunmap(gbo, ast->cursor.vaddr[i]);
drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
Expand Down Expand Up @@ -170,6 +169,7 @@ int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
{
struct drm_device *dev = &ast->base;
struct drm_gem_vram_object *gbo;
struct dma_buf_map map;
int ret;
void *src;
void __iomem *dst;
Expand All @@ -183,18 +183,17 @@ int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
ret = drm_gem_vram_pin(gbo, 0);
if (ret)
return ret;
src = drm_gem_vram_vmap(gbo);
if (IS_ERR(src)) {
ret = PTR_ERR(src);
ret = drm_gem_vram_vmap(gbo, &map);
if (ret)
goto err_drm_gem_vram_unpin;
}
src = map.vaddr; /* TODO: Use mapping abstraction properly */

dst = ast->cursor.vaddr[ast->cursor.next_index];
dst = ast->cursor.map[ast->cursor.next_index].vaddr_iomem;

/* do data transfer to cursor BO */
update_cursor_image(dst, src, fb->width, fb->height);

drm_gem_vram_vunmap(gbo, src);
drm_gem_vram_vunmap(gbo, &map);
drm_gem_vram_unpin(gbo);

return 0;
Expand Down Expand Up @@ -257,7 +256,7 @@ void ast_cursor_show(struct ast_private *ast, int x, int y,
u8 __iomem *sig;
u8 jreg;

dst = ast->cursor.vaddr[ast->cursor.next_index];
dst = ast->cursor.map[ast->cursor.next_index].vaddr;

sig = dst + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);
Expand Down
7 changes: 4 additions & 3 deletions drivers/gpu/drm/ast/ast_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,11 @@
#ifndef __AST_DRV_H__
#define __AST_DRV_H__

#include <linux/types.h>
#include <linux/io.h>
#include <linux/dma-buf-map.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
#include <linux/types.h>

#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
Expand Down Expand Up @@ -131,7 +132,7 @@ struct ast_private {

struct {
struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM];
void __iomem *vaddr[AST_DEFAULT_HWC_NUM];
struct dma_buf_map map[AST_DEFAULT_HWC_NUM];
unsigned int next_index;
} cursor;

Expand Down
23 changes: 14 additions & 9 deletions drivers/gpu/drm/drm_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <linux/dma-buf-map.h>
#include <linux/mem_encrypt.h>
#include <linux/pagevec.h>

Expand Down Expand Up @@ -1207,26 +1208,30 @@ void drm_gem_unpin(struct drm_gem_object *obj)

void *drm_gem_vmap(struct drm_gem_object *obj)
{
void *vaddr;
struct dma_buf_map map;
int ret;

if (obj->funcs->vmap)
vaddr = obj->funcs->vmap(obj);
else
vaddr = ERR_PTR(-EOPNOTSUPP);
if (!obj->funcs->vmap)
return ERR_PTR(-EOPNOTSUPP);

if (!vaddr)
vaddr = ERR_PTR(-ENOMEM);
ret = obj->funcs->vmap(obj, &map);
if (ret)
return ERR_PTR(ret);
else if (dma_buf_map_is_null(&map))
return ERR_PTR(-ENOMEM);

return vaddr;
return map.vaddr;
}

void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
{
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(vaddr);

if (!vaddr)
return;

if (obj->funcs->vunmap)
obj->funcs->vunmap(obj, vaddr);
obj->funcs->vunmap(obj, &map);
}

/**
Expand Down
10 changes: 7 additions & 3 deletions drivers/gpu/drm/drm_gem_cma_helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -519,6 +519,8 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
* drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
* address space
* @obj: GEM object
* @map: Returns the kernel virtual address of the CMA GEM object's backing
* store.
*
* This function maps a buffer exported via DRM PRIME into the kernel's
* virtual address space. Since the CMA buffers are already mapped into the
Expand All @@ -527,13 +529,15 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
* driver's &drm_gem_object_funcs.vmap callback.
*
* Returns:
* The kernel virtual address of the CMA GEM object's backing store.
* 0 on success, or a negative error code otherwise.
*/
void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);

return cma_obj->vaddr;
dma_buf_map_set_vaddr(map, cma_obj->vaddr);

return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);

Expand Down
Loading

0 comments on commit 49a3f51

Please sign in to comment.