Skip to content

Commit

Permalink
drm/i915/gvt: Move common vGPU workload creation into scheduler.c
Browse files Browse the repository at this point in the history
Move common vGPU workload creation functions into scheduler.c since
they are not specific to execlist emulation.

Signed-off-by: Zhi Wang <[email protected]>
  • Loading branch information
zhiwang1 authored and zhenyw committed Nov 16, 2017
1 parent d8235b5 commit 6d76303
Show file tree
Hide file tree
Showing 3 changed files with 183 additions and 172 deletions.
156 changes: 3 additions & 153 deletions drivers/gpu/drm/i915/gvt/execlist.c
Original file line number Diff line number Diff line change
Expand Up @@ -438,179 +438,29 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
return ret;
}

#define RING_CTX_OFF(x) \
offsetof(struct execlist_ring_context, x)

static void read_guest_pdps(struct intel_vgpu *vgpu,
u64 ring_context_gpa, u32 pdp[8])
{
u64 gpa;
int i;

gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);

for (i = 0; i < 8; i++)
intel_gvt_hypervisor_read_gpa(vgpu,
gpa + i * 8, &pdp[7 - i], 4);
}

static int prepare_mm(struct intel_vgpu_workload *workload)
{
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu;
int page_table_level;
u32 pdp[8];

if (desc->addressing_mode == 1) { /* legacy 32-bit */
page_table_level = 3;
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
page_table_level = 4;
} else {
gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL;
}

read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);

mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
if (mm) {
intel_gvt_mm_reference(mm);
} else {

mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0);
if (IS_ERR(mm)) {
gvt_vgpu_err("fail to create mm object.\n");
return PTR_ERR(mm);
}
}
workload->shadow_mm = mm;
return 0;
}

#define get_last_workload(q) \
(list_empty(q) ? NULL : container_of(q->prev, \
struct intel_vgpu_workload, list))

static int submit_context(struct intel_vgpu *vgpu, int ring_id,
struct execlist_ctx_descriptor_format *desc,
bool emulate_schedule_in)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct list_head *q = workload_q_head(vgpu, ring_id);
struct intel_vgpu_workload *last_workload = get_last_workload(q);
struct intel_vgpu_workload *workload = NULL;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
int ret;

ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
return -EINVAL;
}

intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_header.val), &head, 4);

intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_tail.val), &tail, 4);

head &= RB_HEAD_OFF_MASK;
tail &= RB_TAIL_OFF_MASK;

if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
gvt_dbg_el("ctx head %x real head %lx\n", head,
last_workload->rb_tail);
/*
* cannot use guest context head pointer here,
* as it might not be updated at this time
*/
head = last_workload->rb_tail;
}

gvt_dbg_el("ring id %d begin a new workload\n", ring_id);

/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_start.val), &start, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);

workload = intel_vgpu_create_workload(vgpu);
workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
if (IS_ERR(workload))
return PTR_ERR(workload);

workload->ring_id = ring_id;
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
workload->rb_tail = tail;
workload->rb_start = start;
workload->rb_ctl = ctl;
workload->prepare = prepare_execlist_workload;
workload->complete = complete_execlist_workload;
workload->emulate_schedule_in = emulate_schedule_in;

if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);

workload->wa_ctx.indirect_ctx.guest_gma =
indirect_ctx & INDIRECT_CTX_ADDR_MASK;
workload->wa_ctx.indirect_ctx.size =
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.per_ctx.valid = per_ctx & 1;
}

if (emulate_schedule_in)
workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;

gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
workload, ring_id, head, tail, start, ctl);

gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
emulate_schedule_in);

ret = prepare_mm(workload);
if (ret) {
kmem_cache_free(s->workloads, workload);
return ret;
}

/* Only scan and shadow the first workload in the queue
* as there is only one pre-allocated buf-obj for shadow.
*/
if (list_empty(workload_q_head(vgpu, ring_id))) {
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
}

if (ret == 0)
queue_workload(workload);
else {
intel_vgpu_destroy_workload(workload);
if (vgpu_is_vm_unhealthy(ret)) {
intel_vgpu_clean_execlist(vgpu);
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
}
return ret;

queue_workload(workload);
return 0;
}

int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
Expand Down
Loading

0 comments on commit 6d76303

Please sign in to comment.