Skip to content

Commit

Permalink
dma-buf: add enum dma_resv_usage v4
Browse files Browse the repository at this point in the history
This change adds the dma_resv_usage enum and allows us to specify why a
dma_resv object is queried for its containing fences.

Additional to that a dma_resv_usage_rw() helper function is added to aid
retrieving the fences for a read or write userspace submission.

This is then deployed to the different query functions of the dma_resv
object and all of their users. When the write paratermer was previously
true we now use DMA_RESV_USAGE_WRITE and DMA_RESV_USAGE_READ otherwise.

v2: add KERNEL/OTHER in separate patch
v3: some kerneldoc suggestions by Daniel
v4: some more kerneldoc suggestions by Daniel, fix missing cases lost in
    the rebase pointed out by Bas.

Signed-off-by: Christian König <[email protected]>
Reviewed-by: Daniel Vetter <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
  • Loading branch information
ChristianKoenigAMD committed Apr 7, 2022
1 parent 6e87601 commit 7bc80a5
Show file tree
Hide file tree
Showing 46 changed files with 215 additions and 126 deletions.
6 changes: 4 additions & 2 deletions drivers/dma-buf/dma-buf.c
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,8 @@ static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
struct dma_fence *fence;
int r;

dma_resv_for_each_fence(&cursor, resv, write, fence) {
dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
fence) {
dma_fence_get(fence);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
if (!r)
Expand Down Expand Up @@ -1124,7 +1125,8 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
long ret;

/* Wait on any implicit rendering fences */
ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
true, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;

Expand Down
35 changes: 17 additions & 18 deletions drivers/dma-buf/dma-resv.c
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
cursor->seq = read_seqcount_begin(&cursor->obj->seq);
cursor->index = -1;
cursor->shared_count = 0;
if (cursor->all_fences) {
if (cursor->usage >= DMA_RESV_USAGE_READ) {
cursor->fences = dma_resv_shared_list(cursor->obj);
if (cursor->fences)
cursor->shared_count = cursor->fences->shared_count;
Expand Down Expand Up @@ -496,7 +496,7 @@ struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
dma_resv_assert_held(cursor->obj);

cursor->index = 0;
if (cursor->all_fences)
if (cursor->usage >= DMA_RESV_USAGE_READ)
cursor->fences = dma_resv_shared_list(cursor->obj);
else
cursor->fences = NULL;
Expand Down Expand Up @@ -551,7 +551,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
list = NULL;
excl = NULL;

dma_resv_iter_begin(&cursor, src, true);
dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ);
dma_resv_for_each_fence_unlocked(&cursor, f) {

if (dma_resv_iter_is_restarted(&cursor)) {
Expand Down Expand Up @@ -597,15 +597,15 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
* dma_resv_get_fences - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
* @write: true if we should return all fences
* @usage: controls which fences to include, see enum dma_resv_usage.
* @num_fences: the number of fences returned
* @fences: the array of fence ptrs returned (array is krealloc'd to the
* required size, and must be freed by caller)
*
* Retrieve all fences from the reservation object.
* Returns either zero or -ENOMEM.
*/
int dma_resv_get_fences(struct dma_resv *obj, bool write,
int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
unsigned int *num_fences, struct dma_fence ***fences)
{
struct dma_resv_iter cursor;
Expand All @@ -614,7 +614,7 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
*num_fences = 0;
*fences = NULL;

dma_resv_iter_begin(&cursor, obj, write);
dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {

if (dma_resv_iter_is_restarted(&cursor)) {
Expand Down Expand Up @@ -646,7 +646,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences);
/**
* dma_resv_get_singleton - Get a single fence for all the fences
* @obj: the reservation object
* @write: true if we should return all fences
* @usage: controls which fences to include, see enum dma_resv_usage.
* @fence: the resulting fence
*
* Get a single fence representing all the fences inside the resv object.
Expand All @@ -658,15 +658,15 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences);
*
* Returns 0 on success and negative error values on failure.
*/
int dma_resv_get_singleton(struct dma_resv *obj, bool write,
int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
struct dma_fence **fence)
{
struct dma_fence_array *array;
struct dma_fence **fences;
unsigned count;
int r;

r = dma_resv_get_fences(obj, write, &count, &fences);
r = dma_resv_get_fences(obj, usage, &count, &fences);
if (r)
return r;

Expand Down Expand Up @@ -700,7 +700,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
* dma_resv_wait_timeout - Wait on reservation's objects
* shared and/or exclusive fences.
* @obj: the reservation object
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
* @usage: controls which fences to include, see enum dma_resv_usage.
* @intr: if true, do interruptible wait
* @timeout: timeout value in jiffies or zero to return immediately
*
Expand All @@ -710,14 +710,14 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than zer on success.
*/
long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
unsigned long timeout)
long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
bool intr, unsigned long timeout)
{
long ret = timeout ? timeout : 1;
struct dma_resv_iter cursor;
struct dma_fence *fence;

dma_resv_iter_begin(&cursor, obj, wait_all);
dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {

ret = dma_fence_wait_timeout(fence, intr, ret);
Expand All @@ -737,8 +737,7 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
* dma_resv_test_signaled - Test if a reservation object's fences have been
* signaled.
* @obj: the reservation object
* @test_all: if true, test all fences, otherwise only test the exclusive
* fence
* @usage: controls which fences to include, see enum dma_resv_usage.
*
* Callers are not required to hold specific locks, but maybe hold
* dma_resv_lock() already.
Expand All @@ -747,12 +746,12 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
*
* True if all fences signaled, else false.
*/
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
{
struct dma_resv_iter cursor;
struct dma_fence *fence;

dma_resv_iter_begin(&cursor, obj, test_all);
dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
dma_resv_iter_end(&cursor);
return false;
Expand All @@ -775,7 +774,7 @@ void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
struct dma_resv_iter cursor;
struct dma_fence *fence;

dma_resv_for_each_fence(&cursor, obj, true, fence) {
dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
seq_printf(seq, "\t%s fence:",
dma_resv_iter_is_exclusive(&cursor) ?
"Exclusive" : "Shared");
Expand Down
48 changes: 25 additions & 23 deletions drivers/dma-buf/st-dma-resv.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ static int sanitycheck(void *arg)
return r;
}

static int test_signaling(void *arg, bool shared)
static int test_signaling(void *arg, enum dma_resv_usage usage)
{
struct dma_resv resv;
struct dma_fence *f;
Expand All @@ -81,18 +81,18 @@ static int test_signaling(void *arg, bool shared)
goto err_unlock;
}

if (shared)
if (usage >= DMA_RESV_USAGE_READ)
dma_resv_add_shared_fence(&resv, f);
else
dma_resv_add_excl_fence(&resv, f);

if (dma_resv_test_signaled(&resv, shared)) {
if (dma_resv_test_signaled(&resv, usage)) {
pr_err("Resv unexpectedly signaled\n");
r = -EINVAL;
goto err_unlock;
}
dma_fence_signal(f);
if (!dma_resv_test_signaled(&resv, shared)) {
if (!dma_resv_test_signaled(&resv, usage)) {
pr_err("Resv not reporting signaled\n");
r = -EINVAL;
goto err_unlock;
Expand All @@ -107,15 +107,15 @@ static int test_signaling(void *arg, bool shared)

static int test_excl_signaling(void *arg)
{
return test_signaling(arg, false);
return test_signaling(arg, DMA_RESV_USAGE_WRITE);
}

static int test_shared_signaling(void *arg)
{
return test_signaling(arg, true);
return test_signaling(arg, DMA_RESV_USAGE_READ);
}

static int test_for_each(void *arg, bool shared)
static int test_for_each(void *arg, enum dma_resv_usage usage)
{
struct dma_resv_iter cursor;
struct dma_fence *f, *fence;
Expand All @@ -139,13 +139,13 @@ static int test_for_each(void *arg, bool shared)
goto err_unlock;
}

if (shared)
if (usage >= DMA_RESV_USAGE_READ)
dma_resv_add_shared_fence(&resv, f);
else
dma_resv_add_excl_fence(&resv, f);

r = -ENOENT;
dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
dma_resv_for_each_fence(&cursor, &resv, usage, fence) {
if (!r) {
pr_err("More than one fence found\n");
r = -EINVAL;
Expand All @@ -156,7 +156,8 @@ static int test_for_each(void *arg, bool shared)
r = -EINVAL;
goto err_unlock;
}
if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
if (dma_resv_iter_is_exclusive(&cursor) !=
(usage >= DMA_RESV_USAGE_READ)) {
pr_err("Unexpected fence usage\n");
r = -EINVAL;
goto err_unlock;
Expand All @@ -178,15 +179,15 @@ static int test_for_each(void *arg, bool shared)

static int test_excl_for_each(void *arg)
{
return test_for_each(arg, false);
return test_for_each(arg, DMA_RESV_USAGE_WRITE);
}

static int test_shared_for_each(void *arg)
{
return test_for_each(arg, true);
return test_for_each(arg, DMA_RESV_USAGE_READ);
}

static int test_for_each_unlocked(void *arg, bool shared)
static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
{
struct dma_resv_iter cursor;
struct dma_fence *f, *fence;
Expand All @@ -211,14 +212,14 @@ static int test_for_each_unlocked(void *arg, bool shared)
goto err_free;
}

if (shared)
if (usage >= DMA_RESV_USAGE_READ)
dma_resv_add_shared_fence(&resv, f);
else
dma_resv_add_excl_fence(&resv, f);
dma_resv_unlock(&resv);

r = -ENOENT;
dma_resv_iter_begin(&cursor, &resv, shared);
dma_resv_iter_begin(&cursor, &resv, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (!r) {
pr_err("More than one fence found\n");
Expand All @@ -234,7 +235,8 @@ static int test_for_each_unlocked(void *arg, bool shared)
r = -EINVAL;
goto err_iter_end;
}
if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
if (dma_resv_iter_is_exclusive(&cursor) !=
(usage >= DMA_RESV_USAGE_READ)) {
pr_err("Unexpected fence usage\n");
r = -EINVAL;
goto err_iter_end;
Expand Down Expand Up @@ -262,15 +264,15 @@ static int test_for_each_unlocked(void *arg, bool shared)

static int test_excl_for_each_unlocked(void *arg)
{
return test_for_each_unlocked(arg, false);
return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE);
}

static int test_shared_for_each_unlocked(void *arg)
{
return test_for_each_unlocked(arg, true);
return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ);
}

static int test_get_fences(void *arg, bool shared)
static int test_get_fences(void *arg, enum dma_resv_usage usage)
{
struct dma_fence *f, **fences = NULL;
struct dma_resv resv;
Expand All @@ -294,13 +296,13 @@ static int test_get_fences(void *arg, bool shared)
goto err_resv;
}

if (shared)
if (usage >= DMA_RESV_USAGE_READ)
dma_resv_add_shared_fence(&resv, f);
else
dma_resv_add_excl_fence(&resv, f);
dma_resv_unlock(&resv);

r = dma_resv_get_fences(&resv, shared, &i, &fences);
r = dma_resv_get_fences(&resv, usage, &i, &fences);
if (r) {
pr_err("get_fences failed\n");
goto err_free;
Expand All @@ -324,12 +326,12 @@ static int test_get_fences(void *arg, bool shared)

static int test_excl_get_fences(void *arg)
{
return test_get_fences(arg, false);
return test_get_fences(arg, DMA_RESV_USAGE_WRITE);
}

static int test_shared_get_fences(void *arg)
{
return test_get_fences(arg, true);
return test_get_fences(arg, DMA_RESV_USAGE_READ);
}

int dma_resv(void)
Expand Down
4 changes: 3 additions & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1288,7 +1288,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
*
* TODO: Remove together with dma_resv rework.
*/
dma_resv_for_each_fence(&cursor, resv, false, fence) {
dma_resv_for_each_fence(&cursor, resv,
DMA_RESV_USAGE_WRITE,
fence) {
break;
}
dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
Expand Down
3 changes: 1 addition & 2 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
Original file line number Diff line number Diff line change
Expand Up @@ -200,8 +200,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}

/* TODO: Unify this with other drivers */
r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
Expand Down
3 changes: 2 additions & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -526,7 +526,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
true, timeout);

/* ret == 0 means not signaled,
* ret > 0 means signaled
Expand Down
5 changes: 3 additions & 2 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
struct dma_fence *fence;
int r;

r = dma_resv_get_singleton(resv, true, &fence);
r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_READ, &fence);
if (r)
goto fallback;

Expand Down Expand Up @@ -139,7 +139,8 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
/* Not enough memory for the delayed delete, as last resort
* block for all the fences to complete.
*/
dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ,
false, MAX_SCHEDULE_TIMEOUT);
amdgpu_pasid_free(pasid);
}

Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,

mmu_interval_set_seq(mni, cur_seq);

r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
MAX_SCHEDULE_TIMEOUT);
r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ,
false, MAX_SCHEDULE_TIMEOUT);
mutex_unlock(&adev->notifier_lock);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
Expand Down
Loading

0 comments on commit 7bc80a5

Please sign in to comment.