Skip to content

Commit

Permalink
Merge tag 'drm-fixes-2021-08-20-3' of git://anongit.freedesktop.org/d…
Browse files Browse the repository at this point in the history
…rm/drm

Pull drm fixes from Dave Airlie:
 "Regularly scheduled fixes. The ttm one solves a problem of GPU drivers
  failing to load if debugfs is off in Kconfig, otherwise the i915 and
  mediatek, and amdgpu fixes all fairly normal.

  Nouveau has a couple of display fixes, but it has a fix for a
  longstanding race condition in it's memory manager code, and the fix
  mostly removes some code that wasn't working properly and has no
  userspace users. This fix makes the diffstat kinda larger but in a
  good (negative line-count) way.

  core:
   - fix drm_wait_vblank uapi copying bug

  ttm:
   - fix debugfs init when debugfs is off

  amdgpu:
   - vega10 SMU workload fix
   - DCN VM fix
   - DCN 3.01 watermark fix

  amdkfd:
   - SVM fix

  nouveau:
   - ampere display fixes
   - remove MM misfeature to fix a longstanding race condition

  i915:
   - tweaked display workaround for all PCHs
   - eDP MSO pipe sanity for ADL-P fix
   - remove unused symbol export

  mediatek:
   - AAL output size setting
   - Delete component in remove function"

* tag 'drm-fixes-2021-08-20-3' of git://anongit.freedesktop.org/drm/drm:
  drm/amd/display: Use DCN30 watermark calc for DCN301
  drm/i915/dp: remove superfluous EXPORT_SYMBOL()
  drm/i915/edp: fix eDP MSO pipe sanity checks for ADL-P
  drm/i915: Tweaked Wa_14010685332 for all PCHs
  drm/nouveau: rip out nvkm_client.super
  drm/nouveau: block a bunch of classes from userspace
  drm/nouveau/fifo/nv50-: rip out dma channels
  drm/nouveau/kms/nv50: workaround EFI GOP window channel format differences
  drm/nouveau/disp: power down unused DP links during init
  drm/nouveau: recognise GA107
  drm: Copy drm_wait_vblank to user before returning
  drm/amd/display: Ensure DCN save after VM setup
  drm/amdkfd: fix random KFDSVMRangeTest.SetGetAttributesTest test failure
  drm/amd/pm: change the workload type for some cards
  Revert "drm/amd/pm: fix workload mismatch on vega10"
  drm: ttm: Don't bail from ttm_global_init if debugfs_create_dir fails
  drm/mediatek: Add component_del in OVL and COLOR remove function
  drm/mediatek: Add AAL output size configuration
  • Loading branch information
torvalds committed Aug 20, 2021
2 parents 3db903a + daa7772 commit 8ba9fbe
Show file tree
Hide file tree
Showing 61 changed files with 224 additions and 477 deletions.
8 changes: 8 additions & 0 deletions drivers/gpu/drm/amd/amdkfd/kfd_svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -3026,6 +3026,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
start + size - 1, nattr);

/* Flush pending deferred work to avoid racing with deferred actions from
* previous memory map changes (e.g. munmap). Concurrent memory map changes
* can still race with get_attr because we don't hold the mmap lock. But that
* would be a race condition in the application anyway, and undefined
* behaviour is acceptable in that case.
*/
flush_work(&p->svms.deferred_list_work);

mmap_read_lock(mm);
if (!svm_range_is_valid(mm, start, size)) {
pr_debug("invalid range\n");
Expand Down
6 changes: 6 additions & 0 deletions drivers/gpu/drm/amd/display/dc/core/dc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1530,6 +1530,12 @@ void dc_z10_restore(struct dc *dc)
if (dc->hwss.z10_restore)
dc->hwss.z10_restore(dc);
}

void dc_z10_save_init(struct dc *dc)
{
if (dc->hwss.z10_save_init)
dc->hwss.z10_save_init(dc);
}
#endif
/*
* Applies given context to HW and copy it into current context.
Expand Down
3 changes: 3 additions & 0 deletions drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_save_init(dc);
#endif
}

return num_vmids;
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/amd/display/dc/dc.h
Original file line number Diff line number Diff line change
Expand Up @@ -1338,6 +1338,7 @@ void dc_hardware_release(struct dc *dc);
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
#if defined(CONFIG_DRM_AMD_DC_DCN)
void dc_z10_restore(struct dc *dc);
void dc_z10_save_init(struct dc *dc);
#endif

bool dc_enable_dmub_notifications(struct dc *dc);
Expand Down
96 changes: 1 addition & 95 deletions drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
Original file line number Diff line number Diff line change
Expand Up @@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
}

static void calculate_wm_set_for_vlevel(
int vlevel,
struct wm_range_table_entry *table_entry,
struct dcn_watermarks *wm_set,
struct display_mode_lib *dml,
display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;

ASSERT(vlevel < dml->soc.num_states);
/* only pipe 0 is read for voltage and dcf/soc clocks */
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;

dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;

wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;

}

static void dcn301_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel_req)
{
int i, pipe_idx;
int vlevel, vlevel_max;
struct wm_range_table_entry *table_entry;
struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;

ASSERT(bw_params);

vlevel_max = bw_params->clk_table.num_entries - 1;

/* WM Set D */
table_entry = &bw_params->wm_table.entries[WM_D];
if (table_entry->wm_type == WM_TYPE_RETRAINING)
vlevel = 0;
else
vlevel = vlevel_max;
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set C */
table_entry = &bw_params->wm_table.entries[WM_C];
vlevel = min(max(vlevel_req, 2), vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set B */
table_entry = &bw_params->wm_table.entries[WM_B];
vlevel = min(max(vlevel_req, 1), vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
&context->bw_ctx.dml, pipes, pipe_cnt);

/* WM Set A */
table_entry = &bw_params->wm_table.entries[WM_A];
vlevel = min(vlevel_req, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
&context->bw_ctx.dml, pipes, pipe_cnt);

for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;

pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);

if (dc->config.forced_clocks) {
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
}
if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;

pipe_idx++;
}

dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
}

static struct resource_funcs dcn301_res_pool_funcs = {
.destroy = dcn301_destroy_resource_pool,
.link_enc_create = dcn301_link_encoder_create,
.panel_cntl_create = dcn301_panel_cntl_create,
.validate_bandwidth = dcn30_validate_bandwidth,
.calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
Expand Down
12 changes: 12 additions & 0 deletions drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
Original file line number Diff line number Diff line change
Expand Up @@ -404,6 +404,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
&pipe_ctx->stream_res.encoder_info_frame);
}
}
void dcn31_z10_save_init(struct dc *dc)
{
union dmub_rb_cmd cmd;

memset(&cmd, 0, sizeof(cmd));
cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;

dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
}

void dcn31_z10_restore(struct dc *dc)
{
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane(
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);

void dcn31_z10_restore(struct dc *dc);
void dcn31_z10_save_init(struct dc *dc);

void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config);
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.is_abm_supported = dcn31_is_abm_supported,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,7 @@ struct hw_sequencer_funcs {
int width, int height, int offset);

void (*z10_restore)(struct dc *dc);
void (*z10_save_init)(struct dc *dc);

void (*update_visual_confirm_color)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
Expand Down
5 changes: 5 additions & 0 deletions drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
Original file line number Diff line number Diff line change
Expand Up @@ -856,6 +856,11 @@ enum dmub_cmd_idle_opt_type {
* DCN hardware restore.
*/
DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0,

/**
* DCN hardware save.
*/
DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1
};

/**
Expand Down
15 changes: 14 additions & 1 deletion drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
Original file line number Diff line number Diff line change
Expand Up @@ -5127,6 +5127,13 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
return size;
}

static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;

return (adev->pdev->device == 0x6860);
}

static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
{
struct vega10_hwmgr *data = hwmgr->backend;
Expand Down Expand Up @@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
}

out:
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
if (vega10_get_power_profile_mode_quirks(hwmgr))
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
1 << power_profile_mode,
NULL);
else
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
(!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
NULL);

hwmgr->power_profile_mode = power_profile_mode;

return 0;
Expand Down
4 changes: 1 addition & 3 deletions drivers/gpu/drm/drm_ioc32.c
Original file line number Diff line number Diff line change
Expand Up @@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
if (err)
return err;

req32.reply.type = req.reply.type;
req32.reply.sequence = req.reply.sequence;
Expand All @@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;

return 0;
return err;
}

#if defined(CONFIG_X86)
Expand Down
24 changes: 12 additions & 12 deletions drivers/gpu/drm/i915/display/intel_ddi.c
Original file line number Diff line number Diff line change
Expand Up @@ -2463,6 +2463,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
}
}

/* Splitter enable for eDP MSO is limited to certain pipes. */
static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
{
if (IS_ALDERLAKE_P(i915))
return BIT(PIPE_A) | BIT(PIPE_B);
else
return BIT(PIPE_A);
}

static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
Expand All @@ -2480,8 +2489,7 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
if (!pipe_config->splitter.enable)
return;

/* Splitter enable is supported for pipe A only. */
if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) {
if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
pipe_config->splitter.enable = false;
return;
}
Expand Down Expand Up @@ -2513,10 +2521,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
return;

if (crtc_state->splitter.enable) {
/* Splitter enable is supported for pipe A only. */
if (drm_WARN_ON(&i915->drm, pipe != PIPE_A))
return;

dss1 |= SPLITTER_ENABLE;
dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
if (crtc_state->splitter.link_count == 2)
Expand Down Expand Up @@ -4743,12 +4747,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)

dig_port->hpd_pulse = intel_dp_hpd_pulse;

/* Splitter enable for eDP MSO is limited to certain pipes. */
if (dig_port->dp.mso_link_count) {
encoder->pipe_mask = BIT(PIPE_A);
if (IS_ALDERLAKE_P(dev_priv))
encoder->pipe_mask |= BIT(PIPE_B);
}
if (dig_port->dp.mso_link_count)
encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
}

/* In theory we don't need the encoder->type check, but leave it just in
Expand Down
16 changes: 8 additions & 8 deletions drivers/gpu/drm/i915/display/intel_display_power.c
Original file line number Diff line number Diff line change
Expand Up @@ -6387,13 +6387,13 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
IS_BROXTON(i915)) {
bxt_enable_dc9(i915);
/* Tweaked Wa_14010685332:icp,jsp,mcc */
if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
intel_de_rmw(i915, SOUTH_CHICKEN1,
SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_enable_pc8(i915);
}

/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}

void intel_display_power_resume_early(struct drm_i915_private *i915)
Expand All @@ -6402,13 +6402,13 @@ void intel_display_power_resume_early(struct drm_i915_private *i915)
IS_BROXTON(i915)) {
gen9_sanitize_dc_state(i915);
bxt_disable_dc9(i915);
/* Tweaked Wa_14010685332:icp,jsp,mcc */
if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);

} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
}

/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
}

void intel_display_power_suspend(struct drm_i915_private *i915)
Expand Down
1 change: 0 additions & 1 deletion drivers/gpu/drm/i915/display/intel_dp_link_training.c
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,6 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)

return lttpr_count;
}
EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);

static u8 dp_voltage_max(u8 preemph)
{
Expand Down
21 changes: 0 additions & 21 deletions drivers/gpu/drm/i915/i915_irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -3064,24 +3064,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}

static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;

/*
* Wa_14010685332:cnp/cmp,tgp,adp
* TODO: Clarify which platforms this applies to
* TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
* on earlier platforms and whether the workaround is also needed for runtime suspend/resume
*/
if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
(INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
SBCLK_RUN_REFCLK_DIS);
intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
}
}

static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
Expand Down Expand Up @@ -3115,7 +3097,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);

cnp_display_clock_wa(dev_priv);
}

static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
Expand Down Expand Up @@ -3159,8 +3140,6 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)

if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);

cnp_display_clock_wa(dev_priv);
}

static void gen11_irq_reset(struct drm_i915_private *dev_priv)
Expand Down
Loading

0 comments on commit 8ba9fbe

Please sign in to comment.