Skip to content

Commit

Permalink
drm/i915: Make i915_spin_request() static
Browse files Browse the repository at this point in the history
No users now outside of i915_wait_request(), so we can make it private to
i915_gem_request.c, and assume the caller knows the seqno. In the
process, also remove i915_gem_request_started() as that was only ever
used by i915_spin_request().

Signed-off-by: Chris Wilson <[email protected]>
Cc: Michal Winiarski <[email protected]>
Cc: Tvrtko Ursulin <[email protected]>
Cc: Joonas Lahtinen <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
Reviewed-by: Joonas Lahtinen <[email protected]>
  • Loading branch information
ickle committed Sep 22, 2017
1 parent 7fd0b1a commit b2f2f0f
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 41 deletions.
27 changes: 21 additions & 6 deletions drivers/gpu/drm/i915/i915_gem_request.c
Original file line number Diff line number Diff line change
Expand Up @@ -1021,12 +1021,28 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
return this_cpu != cpu;
}

bool __i915_spin_request(const struct drm_i915_gem_request *req,
u32 seqno, int state, unsigned long timeout_us)
static bool __i915_spin_request(const struct drm_i915_gem_request *req,
u32 seqno, int state, unsigned long timeout_us)
{
struct intel_engine_cs *engine = req->engine;
unsigned int irq, cpu;

GEM_BUG_ON(!seqno);

/*
* Only wait for the request if we know it is likely to complete.
*
* We don't track the timestamps around requests, nor the average
* request length, so we do not have a good indicator that this
* request will complete within the timeout. What we do know is the
* order in which requests are executed by the engine and so we can
* tell if the request has started. If the request hasn't started yet,
* it is a fair assumption that it will not complete within our
* relatively short timeout.
*/
if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
return false;

/* When waiting for high frequency requests, e.g. during synchronous
* rendering split between the CPU and GPU, the finite amount of time
* required to set up the irq and wait upon it limits the response
Expand All @@ -1040,8 +1056,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
irq = atomic_read(&engine->irq_count);
timeout_us += local_clock_us(&cpu);
do {
if (i915_seqno_passed(intel_engine_get_seqno(req->engine),
seqno))
if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
return seqno == i915_gem_request_global_seqno(req);

/* Seqno are meant to be ordered *before* the interrupt. If
Expand Down Expand Up @@ -1153,7 +1168,7 @@ long i915_wait_request(struct drm_i915_gem_request *req,
GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));

/* Optimistic short spin before touching IRQs */
if (i915_spin_request(req, state, 5))
if (__i915_spin_request(req, wait.seqno, state, 5))
goto complete;

set_current_state(state);
Expand Down Expand Up @@ -1210,7 +1225,7 @@ long i915_wait_request(struct drm_i915_gem_request *req,
continue;

/* Only spin if we know the GPU is processing this request */
if (i915_spin_request(req, state, 2))
if (__i915_spin_request(req, wait.seqno, state, 2))
break;

if (!intel_wait_check_request(&wait, req)) {
Expand Down
35 changes: 0 additions & 35 deletions drivers/gpu/drm/i915/i915_gem_request.h
Original file line number Diff line number Diff line change
Expand Up @@ -312,26 +312,6 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
return (s32)(seq1 - seq2) >= 0;
}

static inline bool
__i915_gem_request_started(const struct drm_i915_gem_request *req, u32 seqno)
{
GEM_BUG_ON(!seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
seqno - 1);
}

static inline bool
i915_gem_request_started(const struct drm_i915_gem_request *req)
{
u32 seqno;

seqno = i915_gem_request_global_seqno(req);
if (!seqno)
return false;

return __i915_gem_request_started(req, seqno);
}

static inline bool
__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
{
Expand All @@ -352,21 +332,6 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req)
return __i915_gem_request_completed(req, seqno);
}

bool __i915_spin_request(const struct drm_i915_gem_request *request,
u32 seqno, int state, unsigned long timeout_us);
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
int state, unsigned long timeout_us)
{
u32 seqno;

seqno = i915_gem_request_global_seqno(request);
if (!seqno)
return 0;

return (__i915_gem_request_started(request, seqno) &&
__i915_spin_request(request, seqno, state, timeout_us));
}

/* We treat requests as fences. This is not be to confused with our
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
* We use the fences to synchronize access from the CPU with activity on the
Expand Down

0 comments on commit b2f2f0f

Please sign in to comment.