summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2025-11-07 09:45:38 +1000
committerDave Airlie <airlied@redhat.com>2025-11-07 09:47:56 +1000
commit8f037e11d070a38d143b85d72d369190514dca99 (patch)
treecb56c88646b0c032fbcf0d8c175f8f1006b4e85e
parentf67d54e96bc9e4e20a927868f02c2e9d1aa09751 (diff)
parentc4227e1609b355cde373b5770bd91a7eb49eb9cb (diff)
Merge tag 'drm-intel-next-2025-11-04' of https://gitlab.freedesktop.org/drm/i915/kernel into drm-next
drm/i915 feature pull for v6.19: Features and functionality: - Enable LNL+ content adaptive sharpness filter (CASF) (Nemesa) - Use optimized VRR guardband (Ankit, Ville) - Enable Xe3p LT PHY (Suraj) - Enable FBC support for Xe3p_LPD display (Sai Teja, Vinod) - Specify DMC firmware for display version 30.02 (Dnyaneshwar) - Report reason for disabling PSR to debugfs (Michał) - Extend i915_display_info with Type-C port details (Khaled) - Log DSI send packet sequence errors and contents Refactoring and cleanups: - Refactoring to prepare for VRR guardband optimization (Ankit) - Abstract VRR live status wait (Ankit) - Refactor VRR and DSB timing to handle Set Context Latency explicitly (Ankit) - Helpers for prefill latency calculations (Ville) - Refactor SKL+ watermark latency setup (Ville) - VRR refactoring and cleanups (Ville) - SKL+ universal plane cleanups (Ville) - Decouple CDCLK from state->modeset refactor (Ville) - Refactor VLV/CHV clock functions (Jani) - Refactor fbdev handling (Jani) - Call i915 and xe runtime PM from display via function pointers (Jouni) - IRQ code refactoring (Jani) - Drop display dependency on i915 feature check macros (Jani) - Refactor and unify i915 and xe stolen memory interfaces towards display (Jani) - Switch to driver agnostic drm to display pointer chase (Jani) - Use display version over graphics version in display code (Matt A) - GVT cleanups (Jonathan, Andi) - Rename a VLV clock function to unify (Michał) - Explicitly sanitize DMC package header num entries (Luca) - Remove redundant port clock check from ALPM (Jouni) - Use sysfs_emit() instead of sprintf() in PMU sysfs (Madhur Kumar) - Clean up C20 PHY PLL register macros (Imre, Mika)) - Abstract "address in MMIO table" helper for general use (Matt A) - Improve VRR platform abstractions (Ville) - Move towards more standard PCI PM code usage (Ville) - Framebuffer refactoring (Ville) - Drop display dependency on i915_utils.h (Jani) - Include cleanups (Jani) Fixes: - Workaround docking station DSC issues with high pixel clock and bpp (Imre) - Fix Panel Replay in DSC mode (Imre) - Disable tracepoints for PREEMPT_RT as a workaround (Maarten) - Fix intel_crtc_get_vblank_counter() on PREEMPT_RT (Maarten) - Fix C10 PHY identification on PTL/WCL (Dnyaneshwar) - Take AS SDP into account with optimized guardband (Jouni) - Fix panic structure allocation memory leak (Jani) - Adjust an FBC workaround platforms (Vinod) - Add fallback for CDCLK selection (Naladala) - Avoid using invalid transcoder in MST transport select (Suraj) - Don't use cursor size reduction on display version 14+ (Nemesa) - Fix C20 PHY PLL register programming (Imre, Mika) - Fix PSR frontbuffer flush handling (Jouni) - Store ALPM parameters in crtc state (Jouni) - Defeature DRRS on LNL+ (Ville) - Fix the scope of the large DRAM DIMM workaround (Ville) - Fix PICA vs. AUX power ordering issue (Gustavo) - Fix pixel rate for computing watermark line time (Ville) - Fix framebuffer set_tiling vs. addfb race (Ville) - DMC event handler fixes (Ville) DRM Core: - CRTC sharpness strength property (Nemesa) - DPCD DSC quirk for Synaptics Panamera devices (Imre) - Helpers to query the branch DSC max throughput/line-width (Imre) Merges: - Backmerge drm-next for v6.18-rc and to sync with drm-xe-next (Jani) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patch.msgid.link/ec5a05f2df6d597a62033ee2d57225cce707b320@intel.com
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c156
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c35
-rw-r--r--drivers/gpu/drm/i915/Makefile12
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c59
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c7
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c91
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c315
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf_regs.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c437
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c98
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c131
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_dbuf_bw.c295
-rw-r--r--drivers/gpu/drm/i915/display/intel_dbuf_bw.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c319
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c128
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_jiffies.h43
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h68
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_utils.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_utils.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c108
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c263
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c198
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c61
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.c2000
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.h44
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy_regs.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c61
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c428
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c498
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h5
-rw-r--r--drivers/gpu/drm/i915/display/skl_prefill.c157
-rw-r--r--drivers/gpu/drm/i915/display/skl_prefill.h46
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c234
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h17
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c168
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane_regs.h12
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c341
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h3
-rw-r--r--drivers/gpu/drm/i915/display/vlv_clock.c88
-rw-r--r--drivers/gpu/drm/i915/display/vlv_clock.h38
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c104
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c7
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c9
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c35
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c4
-rw-r--r--drivers/gpu/drm/i915/i915_config.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c149
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c107
-rw-r--r--drivers/gpu/drm/i915/i915_jiffies.h16
-rw-r--r--drivers/gpu/drm/i915/i915_mmio_range.c18
-rw-r--r--drivers/gpu/drm/i915/i915_mmio_range.h19
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c67
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h10
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h41
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c77
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c15
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c10
-rw-r--r--drivers/gpu/drm/xe/Makefile8
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h105
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h4
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h13
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h9
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_utils.c27
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c3
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c66
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c12
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c61
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.h11
-rw-r--r--drivers/gpu/drm/xe/display/xe_stolen.c123
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h16
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c1
-rw-r--r--include/drm/display/drm_dp.h3
-rw-r--r--include/drm/display/drm_dp_helper.h14
-rw-r--r--include/drm/drm_crtc.h18
-rw-r--r--include/drm/intel/display_member.h42
-rw-r--r--include/drm/intel/display_parent_interface.h45
-rw-r--r--include/drm/intel/pciids.h5
182 files changed, 7573 insertions, 2274 deletions
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 49803528023b..f9fdf19de74a 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2552,6 +2552,10 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x0C, 0xE7), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
/* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
+ /* Synaptics Panamera supports only a compressed bpp of 12 above 50% of its max DSC pixel throughput */
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x22), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) },
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x31), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) },
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x33), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) },
};
#undef OUI
@@ -2841,6 +2845,158 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S
}
EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
+/*
+ * See DP Standard v2.1a 2.8.4 Minimum Slices/Display, Table 2-159 and
+ * Appendix L.1 Derivation of Slice Count Requirements.
+ */
+static int dsc_sink_min_slice_throughput(int peak_pixel_rate)
+{
+ if (peak_pixel_rate >= 4800000)
+ return 600000;
+ else if (peak_pixel_rate >= 2700000)
+ return 400000;
+ else
+ return 340000;
+}
+
+/**
+ * drm_dp_dsc_sink_max_slice_throughput() - Get a DSC sink's maximum pixel throughput per slice
+ * @dsc_dpcd: DSC sink's capabilities from DPCD
+ * @peak_pixel_rate: Cumulative peak pixel rate in kHz
+ * @is_rgb_yuv444: The mode is either RGB or YUV444
+ *
+ * Return the DSC sink device's maximum pixel throughput per slice, based on
+ * the device's @dsc_dpcd capabilities, the @peak_pixel_rate of the transferred
+ * stream(s) and whether the output format @is_rgb_yuv444 or yuv422/yuv420.
+ *
+ * Note that @peak_pixel_rate is the total pixel rate transferred to the same
+ * DSC/display sink. For instance to calculate a tile's slice count of an MST
+ * multi-tiled display sink (not considering here the required
+ * rounding/alignment of slice count)::
+ *
+ * @peak_pixel_rate = tile_pixel_rate * tile_count
+ * total_slice_count = @peak_pixel_rate / drm_dp_dsc_sink_max_slice_throughput(@peak_pixel_rate)
+ * tile_slice_count = total_slice_count / tile_count
+ *
+ * Returns:
+ * The maximum pixel throughput per slice supported by the DSC sink device
+ * in kPixels/sec.
+ */
+int drm_dp_dsc_sink_max_slice_throughput(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ int peak_pixel_rate, bool is_rgb_yuv444)
+{
+ int throughput;
+ int delta = 0;
+ int base;
+
+ throughput = dsc_dpcd[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT];
+
+ if (is_rgb_yuv444) {
+ throughput = (throughput & DP_DSC_THROUGHPUT_MODE_0_MASK) >>
+ DP_DSC_THROUGHPUT_MODE_0_SHIFT;
+
+ delta = ((dsc_dpcd[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT]) &
+ DP_DSC_THROUGHPUT_MODE_0_DELTA_MASK) >>
+ DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT; /* in units of 2 MPixels/sec */
+ delta *= 2000;
+ } else {
+ throughput = (throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >>
+ DP_DSC_THROUGHPUT_MODE_1_SHIFT;
+ }
+
+ switch (throughput) {
+ case 0:
+ return dsc_sink_min_slice_throughput(peak_pixel_rate);
+ case 1:
+ base = 340000;
+ break;
+ case 2 ... 14:
+ base = 400000 + 50000 * (throughput - 2);
+ break;
+ case 15:
+ base = 170000;
+ break;
+ }
+
+ return base + delta;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_throughput);
+
+static u8 dsc_branch_dpcd_cap(const u8 dpcd[DP_DSC_BRANCH_CAP_SIZE], int reg)
+{
+ return dpcd[reg - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
+}
+
+/**
+ * drm_dp_dsc_branch_max_overall_throughput() - Branch device's max overall DSC pixel throughput
+ * @dsc_branch_dpcd: DSC branch capabilities from DPCD
+ * @is_rgb_yuv444: The mode is either RGB or YUV444
+ *
+ * Return the branch device's maximum overall DSC pixel throughput, based on
+ * the device's DPCD DSC branch capabilities, and whether the output
+ * format @is_rgb_yuv444 or yuv422/yuv420.
+ *
+ * Returns:
+ * - 0: The maximum overall throughput capability is not indicated by
+ * the device separately and it must be determined from the per-slice
+ * max throughput (see @drm_dp_dsc_branch_slice_max_throughput())
+ * and the maximum slice count supported by the device.
+ * - > 0: The maximum overall DSC pixel throughput supported by the branch
+ * device in kPixels/sec.
+ */
+int drm_dp_dsc_branch_max_overall_throughput(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE],
+ bool is_rgb_yuv444)
+{
+ int throughput;
+
+ if (is_rgb_yuv444)
+ throughput = dsc_branch_dpcd_cap(dsc_branch_dpcd,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0);
+ else
+ throughput = dsc_branch_dpcd_cap(dsc_branch_dpcd,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_1);
+
+ switch (throughput) {
+ case 0:
+ return 0;
+ case 1:
+ return 680000;
+ default:
+ return 600000 + 50000 * throughput;
+ }
+}
+EXPORT_SYMBOL(drm_dp_dsc_branch_max_overall_throughput);
+
+/**
+ * drm_dp_dsc_branch_max_line_width() - Branch device's max DSC line width
+ * @dsc_branch_dpcd: DSC branch capabilities from DPCD
+ *
+ * Return the branch device's maximum overall DSC line width, based on
+ * the device's @dsc_branch_dpcd capabilities.
+ *
+ * Returns:
+ * - 0: The maximum line width is not indicated by the device
+ * separately and it must be determined from the maximum
+ * slice count and slice-width supported by the device.
+ * - %-EINVAL: The device indicates an invalid maximum line width
+ * (< 5120 pixels).
+ * - >= 5120: The maximum line width in pixels.
+ */
+int drm_dp_dsc_branch_max_line_width(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE])
+{
+ int line_width = dsc_branch_dpcd_cap(dsc_branch_dpcd, DP_DSC_BRANCH_MAX_LINE_WIDTH);
+
+ switch (line_width) {
+ case 0:
+ return 0;
+ case 1 ... 15:
+ return -EINVAL;
+ default:
+ return line_width * 320;
+ }
+}
+EXPORT_SYMBOL(drm_dp_dsc_branch_max_line_width);
+
static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE], int address,
u8 *buf, int buf_size)
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 85dbdaa4a2e2..b2cb5ae5a139 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -419,6 +419,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
set_out_fence_for_crtc(state->state, crtc, fence_ptr);
} else if (property == crtc->scaling_filter_property) {
state->scaling_filter = val;
+ } else if (property == crtc->sharpness_strength_property) {
+ state->sharpness_strength = val;
} else if (crtc->funcs->atomic_set_property) {
return crtc->funcs->atomic_set_property(crtc, state, property, val);
} else {
@@ -456,6 +458,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = 0;
else if (property == crtc->scaling_filter_property)
*val = state->scaling_filter;
+ else if (property == crtc->sharpness_strength_property)
+ *val = state->sharpness_strength;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else {
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 46655339003d..a7797d260f1e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -229,6 +229,25 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
* Driver's default scaling filter
* Nearest Neighbor:
* Nearest Neighbor scaling filter
+ * SHARPNESS_STRENGTH:
+ * Atomic property for setting the sharpness strength/intensity by userspace.
+ *
+ * The value of this property is set as an integer value ranging
+ * from 0 - 255 where:
+ *
+ * 0: Sharpness feature is disabled(default value).
+ *
+ * 1: Minimum sharpness.
+ *
+ * 255: Maximum sharpness.
+ *
+ * User can gradually increase or decrease the sharpness level and can
+ * set the optimum value depending on content.
+ * This value will be passed to kernel through the UAPI.
+ * The setting of this property does not require modeset.
+ * The sharpness effect takes place post blending on the final composed output.
+ * If the feature is disabled, the content remains same without any sharpening effect
+ * and when this feature is applied, it enhances the clarity of the content.
*/
__printf(6, 0)
@@ -940,6 +959,22 @@ int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
}
EXPORT_SYMBOL(drm_crtc_create_scaling_filter_property);
+int drm_crtc_create_sharpness_strength_property(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_property *prop =
+ drm_property_create_range(dev, 0, "SHARPNESS_STRENGTH", 0, 255);
+
+ if (!prop)
+ return -ENOMEM;
+
+ crtc->sharpness_strength_property = prop;
+ drm_object_attach_property(&crtc->base, prop, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_crtc_create_sharpness_strength_property);
+
/**
* drm_crtc_in_clone_mode - check if the given CRTC state is in clone mode
*
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e58c0c158b3a..84ec79b64960 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,6 +13,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# drivers. Define I915 when building i915.
subdir-ccflags-y += -DI915
+# FIXME: Disable tracepoints on i915 for PREEMPT_RT, unfortunately
+# it's an all or nothing flag. You cannot selectively disable
+# only some tracepoints.
+subdir-ccflags-$(CONFIG_PREEMPT_RT) += -DNOTRACE
+
subdir-ccflags-y += -I$(src)
# Please keep these build lists sorted!
@@ -26,6 +31,7 @@ i915-y += \
i915_ioctl.o \
i915_irq.o \
i915_mitigations.o \
+ i915_mmio_range.o \
i915_module.o \
i915_params.o \
i915_pci.o \
@@ -228,6 +234,7 @@ i915-y += \
display/intel_bios.o \
display/intel_bo.o \
display/intel_bw.o \
+ display/intel_casf.o \
display/intel_cdclk.o \
display/intel_cmtg.o \
display/intel_color.o \
@@ -236,6 +243,7 @@ i915-y += \
display/intel_crtc.o \
display/intel_crtc_state_dump.o \
display/intel_cursor.o \
+ display/intel_dbuf_bw.o \
display/intel_display.o \
display/intel_display_conversion.o \
display/intel_display_driver.o \
@@ -248,6 +256,7 @@ i915-y += \
display/intel_display_rpm.o \
display/intel_display_rps.o \
display/intel_display_snapshot.o \
+ display/intel_display_utils.o \
display/intel_display_wa.o \
display/intel_dmc.o \
display/intel_dmc_wl.o \
@@ -297,9 +306,11 @@ i915-y += \
display/intel_vblank.o \
display/intel_vga.o \
display/intel_wm.o \
+ display/skl_prefill.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
display/skl_watermark.o \
+ display/vlv_clock.o \
display/vlv_sideband.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
@@ -346,6 +357,7 @@ i915-y += \
display/intel_gmbus.o \
display/intel_hdmi.o \
display/intel_lspcon.o \
+ display/intel_lt_phy.o \
display/intel_lvds.o \
display/intel_panel.o \
display/intel_pfit.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index aa159f9ce12f..a3ff21b2f69f 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -11,7 +11,6 @@
#include "g4x_dp.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_audio.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -20,6 +19,7 @@
#include "intel_display_power.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 927fe56aec77..f444c5b7a27b 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -191,45 +191,46 @@ bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
static bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- /* IPS only exists on ULT machines and is tied to pipe A. */
if (!hsw_crtc_supports_ips(crtc))
return false;
- if (!display->params.enable_ips)
- return false;
-
if (crtc_state->pipe_bpp > 24)
return false;
- /*
- * We compare against max which means we must take
- * the increased cdclk requirement into account when
- * calculating the new cdclk.
- *
- * Should measure whether using a lower cdclk w/o IPS
- */
- if (display->platform.broadwell &&
- crtc_state->pixel_rate > display->cdclk.max_cdclk_freq * 95 / 100)
- return false;
-
return true;
}
+static int _hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (display->platform.broadwell)
+ return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+
+ /* no IPS specific limits to worry about */
+ return 0;
+}
+
int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
+ int min_cdclk;
- if (!display->platform.broadwell)
+ if (!hsw_crtc_state_ips_capable(crtc_state))
return 0;
- if (!hsw_crtc_state_ips_capable(crtc_state))
+ min_cdclk = _hsw_ips_min_cdclk(crtc_state);
+
+ /*
+ * Do not ask for more than the max CDCLK frequency,
+ * if that is not enough IPS will simply not be used.
+ */
+ if (min_cdclk > display->cdclk.max_cdclk_freq)
return 0;
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+ return min_cdclk;
}
int hsw_ips_compute_config(struct intel_atomic_state *state,
@@ -244,6 +245,12 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
if (!hsw_crtc_state_ips_capable(crtc_state))
return 0;
+ if (_hsw_ips_min_cdclk(crtc_state) > display->cdclk.max_cdclk_freq)
+ return 0;
+
+ if (!display->params.enable_ips)
+ return 0;
+
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
@@ -257,18 +264,6 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
return 0;
- if (display->platform.broadwell) {
- const struct intel_cdclk_state *cdclk_state;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (crtc_state->pixel_rate > intel_cdclk_logical(cdclk_state) * 95 / 100)
- return 0;
- }
-
crtc_state->ips_enabled = true;
return 0;
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 407deb5dfb57..6e39d7f2e0c2 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -11,7 +11,6 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
@@ -19,6 +18,7 @@
#include "intel_display_irq.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index fd3b7b35f351..b262319bc83d 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -2295,12 +2295,11 @@ static void i9xx_update_wm(struct intel_display *display)
crtc = single_enabled_crtc(display);
if (display->platform.i915gm && crtc) {
- struct drm_gem_object *obj;
-
- obj = intel_fb_bo(crtc->base.primary->state->fb);
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
/* self-refresh seems busted with untiled */
- if (!intel_bo_is_tiled(obj))
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
crtc = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 37faa8f19f6e..70d4c1bc70fc 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -35,7 +35,6 @@
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "icl_dsi.h"
#include "icl_dsi_regs.h"
#include "intel_atomic.h"
@@ -48,6 +47,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_regs.h"
+#include "intel_display_utils.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
@@ -1655,7 +1655,7 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- crtc_state->dsc.compression_enable = true;
+ intel_dsc_enable_on_crtc(crtc_state);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 1addd6288241..68c01932f7b4 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -11,10 +11,10 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_acpi.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index ed7a7ed486b5..6372f533f65b 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -49,7 +49,7 @@ void intel_alpm_init(struct intel_dp *intel_dp)
return;
intel_dp->alpm_dpcd = dpcd;
- mutex_init(&intel_dp->alpm_parameters.lock);
+ mutex_init(&intel_dp->alpm.lock);
}
static int get_silence_period_symbols(const struct intel_crtc_state *crtc_state)
@@ -58,43 +58,32 @@ static int get_silence_period_symbols(const struct intel_crtc_state *crtc_state)
1000 / 1000;
}
-static int get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state,
- int *min, int *max)
+static void get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state,
+ int *min, int *max)
{
if (crtc_state->port_clock < 540000) {
*min = 65 * LFPS_CYCLE_COUNT;
*max = 75 * LFPS_CYCLE_COUNT;
- } else if (crtc_state->port_clock <= 810000) {
+ } else {
*min = 140;
*max = 800;
- } else {
- *min = *max = -1;
- return -1;
}
-
- return 0;
}
static int get_lfps_cycle_time(const struct intel_crtc_state *crtc_state)
{
- int tlfps_cycle_min, tlfps_cycle_max, ret;
+ int tlfps_cycle_min, tlfps_cycle_max;
- ret = get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min,
- &tlfps_cycle_max);
- if (ret)
- return ret;
+ get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min,
+ &tlfps_cycle_max);
return tlfps_cycle_min + (tlfps_cycle_max - tlfps_cycle_min) / 2;
}
static int get_lfps_half_cycle_clocks(const struct intel_crtc_state *crtc_state)
{
- int lfps_cycle_time = get_lfps_cycle_time(crtc_state);
-
- if (lfps_cycle_time < 0)
- return -1;
-
- return lfps_cycle_time * crtc_state->port_clock / 1000 / 1000 / (2 * LFPS_CYCLE_COUNT);
+ return get_lfps_cycle_time(crtc_state) * crtc_state->port_clock / 1000 /
+ 1000 / (2 * LFPS_CYCLE_COUNT);
}
/*
@@ -133,7 +122,7 @@ static int _lnl_compute_aux_less_wake_time(const struct intel_crtc_state *crtc_s
static int
_lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
int aux_less_wake_time, aux_less_wake_lines, silence_period,
@@ -146,8 +135,6 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
silence_period = get_silence_period_symbols(crtc_state);
lfps_half_cycle = get_lfps_half_cycle_clocks(crtc_state);
- if (lfps_half_cycle < 0)
- return false;
if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK ||
@@ -157,15 +144,15 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
if (display->params.psr_safest_params)
aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
- intel_dp->alpm_parameters.aux_less_wake_lines = aux_less_wake_lines;
- intel_dp->alpm_parameters.silence_period_sym_clocks = silence_period;
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle;
+ crtc_state->alpm_state.aux_less_wake_lines = aux_less_wake_lines;
+ crtc_state->alpm_state.silence_period_sym_clocks = silence_period;
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms = lfps_half_cycle;
return true;
}
static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
int check_entry_lines;
@@ -186,7 +173,7 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
if (display->params.psr_safest_params)
check_entry_lines = 15;
- intel_dp->alpm_parameters.check_entry_lines = check_entry_lines;
+ crtc_state->alpm_state.check_entry_lines = check_entry_lines;
return true;
}
@@ -217,7 +204,7 @@ static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
}
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
@@ -255,8 +242,8 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
io_wake_lines = fast_wake_lines = max_wake_lines;
/* According to Bspec lower limit should be set as 7 lines. */
- intel_dp->alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
- intel_dp->alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
+ crtc_state->alpm_state.io_wake_lines = max(io_wake_lines, 7);
+ crtc_state->alpm_state.fast_wake_lines = max(fast_wake_lines, 7);
return true;
}
@@ -270,12 +257,12 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
int waketime_in_lines, first_sdp_position;
int context_latency, guardband;
- if (intel_dp->alpm_parameters.lobf_disable_debug) {
+ if (intel_dp->alpm.lobf_disable_debug) {
drm_dbg_kms(display->drm, "LOBF is disabled by debug flag\n");
return;
}
- if (intel_dp->alpm_parameters.sink_alpm_error)
+ if (intel_dp->alpm.sink_alpm_error)
return;
if (!intel_dp_is_edp(intel_dp))
@@ -306,9 +293,9 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
adjusted_mode->crtc_vdisplay - context_latency;
first_sdp_position = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_start;
if (intel_alpm_aux_less_wake_supported(intel_dp))
- waketime_in_lines = intel_dp->alpm_parameters.io_wake_lines;
+ waketime_in_lines = crtc_state->alpm_state.io_wake_lines;
else
- waketime_in_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
+ waketime_in_lines = crtc_state->alpm_state.aux_less_wake_lines;
crtc_state->has_lobf = (context_latency + guardband) >
(first_sdp_position + waketime_in_lines);
@@ -325,7 +312,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
!crtc_state->has_lobf))
return;
- mutex_lock(&intel_dp->alpm_parameters.lock);
+ mutex_lock(&intel_dp->alpm.lock);
/*
* Panel Replay on eDP is always using ALPM aux less. I.e. no need to
* check panel support at this point.
@@ -334,7 +321,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
alpm_ctl = ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE |
ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS |
- ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines);
+ ALPM_CTL_AUX_LESS_WAKE_TIME(crtc_state->alpm_state.aux_less_wake_lines);
if (intel_dp->as_sdp_supported) {
u32 pr_alpm_ctl = PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1;
@@ -352,7 +339,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
} else {
alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
- ALPM_CTL_EXTENDED_FAST_WAKE_TIME(intel_dp->alpm_parameters.fast_wake_lines);
+ ALPM_CTL_EXTENDED_FAST_WAKE_TIME(crtc_state->alpm_state.fast_wake_lines);
}
if (crtc_state->has_lobf) {
@@ -360,17 +347,17 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "Link off between frames (LOBF) enabled\n");
}
- alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(intel_dp->alpm_parameters.check_entry_lines);
+ alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(crtc_state->alpm_state.check_entry_lines);
intel_de_write(display, ALPM_CTL(display, cpu_transcoder), alpm_ctl);
- mutex_unlock(&intel_dp->alpm_parameters.lock);
+ mutex_unlock(&intel_dp->alpm.lock);
}
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
lnl_alpm_configure(intel_dp, crtc_state);
- intel_dp->alpm_parameters.transcoder = crtc_state->cpu_transcoder;
+ intel_dp->alpm.transcoder = crtc_state->cpu_transcoder;
}
void intel_alpm_port_configure(struct intel_dp *intel_dp,
@@ -388,14 +375,14 @@ void intel_alpm_port_configure(struct intel_dp *intel_dp,
PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
PORT_ALPM_CTL_SILENCE_PERIOD(
- intel_dp->alpm_parameters.silence_period_sym_clocks);
+ crtc_state->alpm_state.silence_period_sym_clocks);
lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(LFPS_CYCLE_COUNT) |
PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms) |
PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms) |
PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms);
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms);
}
intel_de_write(display, PORT_ALPM_CTL(port), alpm_ctl_val);
@@ -433,10 +420,10 @@ void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
continue;
if (old_crtc_state->has_lobf) {
- mutex_lock(&intel_dp->alpm_parameters.lock);
+ mutex_lock(&intel_dp->alpm.lock);
intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0);
drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n");
- mutex_unlock(&intel_dp->alpm_parameters.lock);
+ mutex_unlock(&intel_dp->alpm.lock);
}
}
}
@@ -530,7 +517,7 @@ i915_edp_lobf_debug_get(void *data, u64 *val)
struct intel_connector *connector = data;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- *val = intel_dp->alpm_parameters.lobf_disable_debug;
+ *val = intel_dp->alpm.lobf_disable_debug;
return 0;
}
@@ -541,7 +528,7 @@ i915_edp_lobf_debug_set(void *data, u64 val)
struct intel_connector *connector = data;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- intel_dp->alpm_parameters.lobf_disable_debug = val;
+ intel_dp->alpm.lobf_disable_debug = val;
return 0;
}
@@ -569,12 +556,12 @@ void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
void intel_alpm_disable(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->alpm_parameters.transcoder;
+ enum transcoder cpu_transcoder = intel_dp->alpm.transcoder;
if (DISPLAY_VER(display) < 20 || !intel_dp->alpm_dpcd)
return;
- mutex_lock(&intel_dp->alpm_parameters.lock);
+ mutex_lock(&intel_dp->alpm.lock);
intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE |
@@ -585,7 +572,7 @@ void intel_alpm_disable(struct intel_dp *intel_dp)
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
drm_dbg_kms(display->drm, "Disabling ALPM\n");
- mutex_unlock(&intel_dp->alpm_parameters.lock);
+ mutex_unlock(&intel_dp->alpm.lock);
}
bool intel_alpm_get_error(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h
index a861c20b5d79..53599b464dea 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.h
+++ b/drivers/gpu/drm/i915/display/intel_alpm.h
@@ -17,7 +17,7 @@ struct intel_crtc;
void intel_alpm_init(struct intel_dp *intel_dp);
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
+ struct intel_crtc_state *crtc_state);
void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 3b14f929825a..a68fdbd2acb9 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -13,7 +13,6 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
#include "intel_connector.h"
@@ -21,6 +20,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp_aux_backlight.h"
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 3596dce84c28..e2b51fa21d93 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -36,11 +36,11 @@
#include "soc/intel_rom.h"
#include "i915_drv.h"
-#include "i915_utils.h"
#include "intel_display.h"
#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_gmbus.h"
#define _INTEL_BIOS_PRIVATE
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index ac6da20d9529..3033c53e61d1 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -3,32 +3,23 @@
* Copyright © 2019 Intel Corporation
*/
-#include <drm/drm_atomic_state_helper.h>
-
#include "soc/intel_dram.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_utils.h"
-#include "intel_atomic.h"
#include "intel_bw.h"
-#include "intel_cdclk.h"
+#include "intel_crtc.h"
#include "intel_display_core.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_uncore.h"
#include "skl_watermark.h"
-struct intel_dbuf_bw {
- unsigned int max_bw[I915_MAX_DBUF_SLICES];
- u8 active_planes[I915_MAX_DBUF_SLICES];
-};
-
struct intel_bw_state {
struct intel_global_state base;
- struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
/*
* Contains a bit mask, used to determine, whether correspondent
@@ -836,49 +827,6 @@ void intel_bw_init_hw(struct intel_display *display)
icl_get_bw_info(display, dram_info, &icl_sa_info);
}
-static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
-{
- /*
- * We assume cursors are small enough
- * to not not cause bandwidth problems.
- */
- return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
-}
-
-static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- unsigned int data_rate = 0;
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- /*
- * We assume cursors are small enough
- * to not not cause bandwidth problems.
- */
- if (plane_id == PLANE_CURSOR)
- continue;
-
- data_rate += crtc_state->data_rate[plane_id];
-
- if (DISPLAY_VER(display) < 11)
- data_rate += crtc_state->data_rate_y[plane_id];
- }
-
- return data_rate;
-}
-
-/* "Maximum Pipe Read Bandwidth" */
-static int intel_bw_crtc_min_cdclk(struct intel_display *display,
- unsigned int data_rate)
-{
- if (DISPLAY_VER(display) < 12)
- return 0;
-
- return DIV_ROUND_UP_ULL(mul_u32_u32(data_rate, 10), 512);
-}
-
static unsigned int intel_bw_num_active_planes(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
@@ -894,14 +842,13 @@ static unsigned int intel_bw_num_active_planes(struct intel_display *display,
static unsigned int intel_bw_data_rate(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int data_rate = 0;
enum pipe pipe;
for_each_pipe(display, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915))
+ if (DISPLAY_VER(display) >= 13 && intel_display_vtd_active(display))
data_rate = DIV_ROUND_UP(data_rate * 105, 100);
return data_rate;
@@ -1262,223 +1209,6 @@ static int intel_bw_check_qgv_points(struct intel_display *display,
old_bw_state, new_bw_state);
}
-static bool intel_dbuf_bw_changed(struct intel_display *display,
- const struct intel_dbuf_bw *old_dbuf_bw,
- const struct intel_dbuf_bw *new_dbuf_bw)
-{
- enum dbuf_slice slice;
-
- for_each_dbuf_slice(display, slice) {
- if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
- old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
- return true;
- }
-
- return false;
-}
-
-static bool intel_bw_state_changed(struct intel_display *display,
- const struct intel_bw_state *old_bw_state,
- const struct intel_bw_state *new_bw_state)
-{
- enum pipe pipe;
-
- for_each_pipe(display, pipe) {
- const struct intel_dbuf_bw *old_dbuf_bw =
- &old_bw_state->dbuf_bw[pipe];
- const struct intel_dbuf_bw *new_dbuf_bw =
- &new_bw_state->dbuf_bw[pipe];
-
- if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
- return true;
-
- if (intel_bw_crtc_min_cdclk(display, old_bw_state->data_rate[pipe]) !=
- intel_bw_crtc_min_cdclk(display, new_bw_state->data_rate[pipe]))
- return true;
- }
-
- return false;
-}
-
-static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
- struct intel_crtc *crtc,
- enum plane_id plane_id,
- const struct skl_ddb_entry *ddb,
- unsigned int data_rate)
-{
- struct intel_display *display = to_intel_display(crtc);
- unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
- enum dbuf_slice slice;
-
- /*
- * The arbiter can only really guarantee an
- * equal share of the total bw to each plane.
- */
- for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
- dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
- dbuf_bw->active_planes[slice] |= BIT(plane_id);
- }
-}
-
-static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum plane_id plane_id;
-
- memset(dbuf_bw, 0, sizeof(*dbuf_bw));
-
- if (!crtc_state->hw.active)
- return;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- /*
- * We assume cursors are small enough
- * to not cause bandwidth problems.
- */
- if (plane_id == PLANE_CURSOR)
- continue;
-
- skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
- &crtc_state->wm.skl.plane_ddb[plane_id],
- crtc_state->data_rate[plane_id]);
-
- if (DISPLAY_VER(display) < 11)
- skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
- &crtc_state->wm.skl.plane_ddb_y[plane_id],
- crtc_state->data_rate[plane_id]);
- }
-}
-
-/* "Maximum Data Buffer Bandwidth" */
-static int
-intel_bw_dbuf_min_cdclk(struct intel_display *display,
- const struct intel_bw_state *bw_state)
-{
- unsigned int total_max_bw = 0;
- enum dbuf_slice slice;
-
- for_each_dbuf_slice(display, slice) {
- int num_active_planes = 0;
- unsigned int max_bw = 0;
- enum pipe pipe;
-
- /*
- * The arbiter can only really guarantee an
- * equal share of the total bw to each plane.
- */
- for_each_pipe(display, pipe) {
- const struct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe];
-
- max_bw = max(dbuf_bw->max_bw[slice], max_bw);
- num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
- }
- max_bw *= num_active_planes;
-
- total_max_bw = max(total_max_bw, max_bw);
- }
-
- return DIV_ROUND_UP(total_max_bw, 64);
-}
-
-int intel_bw_min_cdclk(struct intel_display *display,
- const struct intel_bw_state *bw_state)
-{
- enum pipe pipe;
- int min_cdclk;
-
- min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state);
-
- for_each_pipe(display, pipe)
- min_cdclk = max(min_cdclk,
- intel_bw_crtc_min_cdclk(display,
- bw_state->data_rate[pipe]));
-
- return min_cdclk;
-}
-
-int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
- bool *need_cdclk_calc)
-{
- struct intel_display *display = to_intel_display(state);
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- const struct intel_cdclk_state *cdclk_state;
- const struct intel_crtc_state *old_crtc_state;
- const struct intel_crtc_state *new_crtc_state;
- int old_min_cdclk, new_min_cdclk;
- struct intel_crtc *crtc;
- int i;
-
- if (DISPLAY_VER(display) < 9)
- return 0;
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
-
- skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
- skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
-
- if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
- continue;
-
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
-
- old_bw_state = intel_atomic_get_old_bw_state(state);
-
- new_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
- }
-
- if (!old_bw_state)
- return 0;
-
- if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) {
- int ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state);
- new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state);
-
- /*
- * No need to check against the cdclk state if
- * the min cdclk doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to bandwidth
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_min_cdclk <= old_min_cdclk)
- return 0;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- /*
- * No need to recalculate the cdclk state if
- * the min cdclk doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to bandwidth
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_min_cdclk <= intel_cdclk_bw_min_cdclk(cdclk_state))
- return 0;
-
- drm_dbg_kms(display->drm,
- "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
- new_min_cdclk, intel_cdclk_bw_min_cdclk(cdclk_state));
- *need_cdclk_calc = true;
-
- return 0;
-}
-
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
struct intel_display *display = to_intel_display(state);
@@ -1489,13 +1219,13 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
unsigned int old_data_rate =
- intel_bw_crtc_data_rate(old_crtc_state);
+ intel_crtc_bw_data_rate(old_crtc_state);
unsigned int new_data_rate =
- intel_bw_crtc_data_rate(new_crtc_state);
+ intel_crtc_bw_data_rate(new_crtc_state);
unsigned int old_active_planes =
- intel_bw_crtc_num_active_planes(old_crtc_state);
+ intel_crtc_bw_num_active_planes(old_crtc_state);
unsigned int new_active_planes =
- intel_bw_crtc_num_active_planes(new_crtc_state);
+ intel_crtc_bw_num_active_planes(new_crtc_state);
struct intel_bw_state *new_bw_state;
/*
@@ -1527,11 +1257,11 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
static int intel_bw_modeset_checks(struct intel_atomic_state *state)
{
- struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *old_bw_state;
struct intel_bw_state *new_bw_state;
+ int ret;
- if (DISPLAY_VER(display) < 9)
+ if (!intel_any_crtc_active_changed(state))
return 0;
new_bw_state = intel_atomic_get_bw_state(state);
@@ -1543,13 +1273,9 @@ static int intel_bw_modeset_checks(struct intel_atomic_state *state)
new_bw_state->active_pipes =
intel_calc_active_pipes(state, old_bw_state->active_pipes);
- if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
- int ret;
-
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
return 0;
}
@@ -1599,7 +1325,7 @@ static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
return 0;
}
-int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
+int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
bool changed = false;
@@ -1610,11 +1336,9 @@ int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
if (DISPLAY_VER(display) < 9)
return 0;
- if (any_ms) {
- ret = intel_bw_modeset_checks(state);
- if (ret)
- return ret;
- }
+ ret = intel_bw_modeset_checks(state);
+ if (ret)
+ return ret;
ret = intel_bw_check_sagv_mask(state);
if (ret)
@@ -1657,9 +1381,9 @@ static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
bw_state->data_rate[crtc->pipe] =
- intel_bw_crtc_data_rate(crtc_state);
+ intel_crtc_bw_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
- intel_bw_crtc_num_active_planes(crtc_state);
+ intel_crtc_bw_num_active_planes(crtc_state);
drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
@@ -1690,8 +1414,6 @@ void intel_bw_update_hw_state(struct intel_display *display)
if (DISPLAY_VER(display) >= 11)
intel_bw_crtc_update(bw_state, crtc_state);
- skl_crtc_calc_dbuf_bw(&bw_state->dbuf_bw[pipe], crtc_state);
-
/* initially SAGV has been forced off */
bw_state->pipe_sagv_reject |= BIT(pipe);
}
@@ -1709,7 +1431,6 @@ void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
- memset(&bw_state->dbuf_bw[pipe], 0, sizeof(bw_state->dbuf_bw[pipe]));
}
static struct intel_global_state *
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index d51f50c9d302..99b447388245 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -28,11 +28,7 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state);
void intel_bw_init_hw(struct intel_display *display);
int intel_bw_init(struct intel_display *display);
-int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms);
-int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
- bool *need_cdclk_calc);
-int intel_bw_min_cdclk(struct intel_display *display,
- const struct intel_bw_state *bw_state);
+int intel_bw_atomic_check(struct intel_atomic_state *state);
void intel_bw_update_hw_state(struct intel_display *display);
void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_casf.c b/drivers/gpu/drm/i915/display/intel_casf.c
new file mode 100644
index 000000000000..95339b496f24
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_casf.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_print.h>
+
+#include "i915_reg.h"
+#include "intel_casf.h"
+#include "intel_casf_regs.h"
+#include "intel_de.h"
+#include "intel_display_regs.h"
+#include "intel_display_types.h"
+#include "skl_scaler.h"
+
+#define MAX_PIXELS_FOR_3_TAP_FILTER (1920 * 1080)
+#define MAX_PIXELS_FOR_5_TAP_FILTER (3840 * 2160)
+
+#define FILTER_COEFF_0_125 125
+#define FILTER_COEFF_0_25 250
+#define FILTER_COEFF_0_5 500
+#define FILTER_COEFF_1_0 1000
+#define FILTER_COEFF_0_0 0
+#define SET_POSITIVE_SIGN(x) ((x) & (~SIGN))
+
+/**
+ * DOC: Content Adaptive Sharpness Filter (CASF)
+ *
+ * Starting from LNL the display engine supports an
+ * adaptive sharpening filter, enhancing the image
+ * quality. The display hardware utilizes the second
+ * pipe scaler for implementing CASF.
+ * If sharpness is being enabled then pipe scaling
+ * cannot be used.
+ * This filter operates on a region of pixels based
+ * on the tap size. Coefficients are used to generate
+ * an alpha value which blends the sharpened image to
+ * original image.
+ */
+
+/* Default LUT values to be loaded one time. */
+static const u16 sharpness_lut[] = {
+ 4095, 2047, 1364, 1022, 816, 678, 579,
+ 504, 444, 397, 357, 323, 293, 268, 244, 224,
+ 204, 187, 170, 154, 139, 125, 111, 98, 85,
+ 73, 60, 48, 36, 24, 12, 0
+};
+
+const u16 filtercoeff_1[] = {
+ FILTER_COEFF_0_0, FILTER_COEFF_0_0, FILTER_COEFF_0_5,
+ FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_0,
+ FILTER_COEFF_0_0,
+};
+
+const u16 filtercoeff_2[] = {
+ FILTER_COEFF_0_0, FILTER_COEFF_0_25, FILTER_COEFF_0_5,
+ FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_25,
+ FILTER_COEFF_0_0,
+};
+
+const u16 filtercoeff_3[] = {
+ FILTER_COEFF_0_125, FILTER_COEFF_0_25, FILTER_COEFF_0_5,
+ FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_25,
+ FILTER_COEFF_0_125,
+};
+
+static void intel_casf_filter_lut_load(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int i;
+
+ intel_de_write(display, SHRPLUT_INDEX(crtc->pipe),
+ INDEX_AUTO_INCR | INDEX_VALUE(0));
+
+ for (i = 0; i < ARRAY_SIZE(sharpness_lut); i++)
+ intel_de_write(display, SHRPLUT_DATA(crtc->pipe),
+ sharpness_lut[i]);
+}
+
+void intel_casf_update_strength(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int win_size;
+
+ intel_de_rmw(display, SHARPNESS_CTL(crtc->pipe), FILTER_STRENGTH_MASK,
+ FILTER_STRENGTH(crtc_state->hw.casf_params.strength));
+
+ win_size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, 1));
+
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(crtc->pipe, 1), win_size);
+}
+
+static void intel_casf_compute_win_size(struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *mode = &crtc_state->hw.adjusted_mode;
+ u32 total_pixels = mode->hdisplay * mode->vdisplay;
+
+ if (total_pixels <= MAX_PIXELS_FOR_3_TAP_FILTER)
+ crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_3X3;
+ else if (total_pixels <= MAX_PIXELS_FOR_5_TAP_FILTER)
+ crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_5X5;
+ else
+ crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_7X7;
+}
+
+int intel_casf_compute_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!HAS_CASF(display))
+ return 0;
+
+ if (crtc_state->uapi.sharpness_strength == 0) {
+ crtc_state->hw.casf_params.casf_enable = false;
+ crtc_state->hw.casf_params.strength = 0;
+ return 0;
+ }
+
+ crtc_state->hw.casf_params.casf_enable = true;
+
+ /*
+ * HW takes a value in form (1.0 + strength) in 4.4 fixed format.
+ * Strength is from 0.0-14.9375 ie from 0-239.
+ * User can give value from 0-255 but is clamped to 239.
+ * Ex. User gives 85 which is 5.3125 and adding 1.0 gives 6.3125.
+ * 6.3125 in 4.4 format is b01100101 which is equal to 101.
+ * Also 85 + 16 = 101.
+ */
+ crtc_state->hw.casf_params.strength =
+ min(crtc_state->uapi.sharpness_strength, 0xEF) + 0x10;
+
+ intel_casf_compute_win_size(crtc_state);
+
+ intel_casf_scaler_compute_config(crtc_state);
+
+ return 0;
+}
+
+void intel_casf_sharpness_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 sharp;
+
+ sharp = intel_de_read(display, SHARPNESS_CTL(crtc->pipe));
+ if (sharp & FILTER_EN) {
+ if (drm_WARN_ON(display->drm,
+ REG_FIELD_GET(FILTER_STRENGTH_MASK, sharp) < 16))
+ crtc_state->hw.casf_params.strength = 0;
+ else
+ crtc_state->hw.casf_params.strength =
+ REG_FIELD_GET(FILTER_STRENGTH_MASK, sharp);
+ crtc_state->hw.casf_params.casf_enable = true;
+ crtc_state->hw.casf_params.win_size =
+ REG_FIELD_GET(FILTER_SIZE_MASK, sharp);
+ }
+}
+
+bool intel_casf_needs_scaler(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->hw.casf_params.casf_enable)
+ return true;
+
+ return false;
+}
+
+static int casf_coeff_tap(int i)
+{
+ return i % SCALER_FILTER_NUM_TAPS;
+}
+
+static u32 casf_coeff(struct intel_crtc_state *crtc_state, int t)
+{
+ struct scaler_filter_coeff value;
+ u32 coeff;
+
+ value = crtc_state->hw.casf_params.coeff[t];
+ value.sign = 0;
+
+ coeff = value.sign << 15 | value.exp << 12 | value.mantissa << 3;
+ return coeff;
+}
+
+/*
+ * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
+ * To enable casf: program scaler coefficients with the coeffients
+ * that are calculated and stored in hw.casf_params.coeff as per
+ * SCALER_COEFFICIENT_FORMAT
+ */
+static void intel_casf_write_coeff(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int id = crtc_state->scaler_state.scaler_id;
+ int i;
+
+ if (id != 1) {
+ drm_WARN(display->drm, 0, "Second scaler not enabled\n");
+ return;
+ }
+
+ intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(crtc->pipe, id, 0),
+ PS_COEF_INDEX_AUTO_INC);
+
+ for (i = 0; i < 17 * SCALER_FILTER_NUM_TAPS; i += 2) {
+ u32 tmp;
+ int t;
+
+ t = casf_coeff_tap(i);
+ tmp = casf_coeff(crtc_state, t);
+
+ t = casf_coeff_tap(i + 1);
+ tmp |= casf_coeff(crtc_state, t) << 16;
+
+ intel_de_write_fw(display, GLK_PS_COEF_DATA_SET(crtc->pipe, id, 0),
+ tmp);
+ }
+}
+
+static void convert_sharpness_coef_binary(struct scaler_filter_coeff *coeff,
+ u16 coefficient)
+{
+ if (coefficient < 25) {
+ coeff->mantissa = (coefficient * 2048) / 100;
+ coeff->exp = 3;
+ } else if (coefficient < 50) {
+ coeff->mantissa = (coefficient * 1024) / 100;
+ coeff->exp = 2;
+ } else if (coefficient < 100) {
+ coeff->mantissa = (coefficient * 512) / 100;
+ coeff->exp = 1;
+ } else {
+ coeff->mantissa = (coefficient * 256) / 100;
+ coeff->exp = 0;
+ }
+}
+
+void intel_casf_scaler_compute_config(struct intel_crtc_state *crtc_state)
+{
+ const u16 *filtercoeff;
+ u16 filter_coeff[SCALER_FILTER_NUM_TAPS];
+ u16 sumcoeff = 0;
+ int i;
+
+ if (crtc_state->hw.casf_params.win_size == 0)
+ filtercoeff = filtercoeff_1;
+ else if (crtc_state->hw.casf_params.win_size == 1)
+ filtercoeff = filtercoeff_2;
+ else
+ filtercoeff = filtercoeff_3;
+
+ for (i = 0; i < SCALER_FILTER_NUM_TAPS; i++)
+ sumcoeff += *(filtercoeff + i);
+
+ for (i = 0; i < SCALER_FILTER_NUM_TAPS; i++) {
+ filter_coeff[i] = (*(filtercoeff + i) * 100 / sumcoeff);
+ convert_sharpness_coef_binary(&crtc_state->hw.casf_params.coeff[i],
+ filter_coeff[i]);
+ }
+}
+
+void intel_casf_enable(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 sharpness_ctl;
+
+ intel_casf_filter_lut_load(crtc, crtc_state);
+
+ intel_casf_write_coeff(crtc_state);
+
+ sharpness_ctl = FILTER_EN | FILTER_STRENGTH(crtc_state->hw.casf_params.strength);
+
+ sharpness_ctl |= crtc_state->hw.casf_params.win_size;
+
+ intel_de_write(display, SHARPNESS_CTL(crtc->pipe), sharpness_ctl);
+
+ skl_scaler_setup_casf(crtc_state);
+}
+
+void intel_casf_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ intel_de_write(display, SKL_PS_CTRL(crtc->pipe, 1), 0);
+ intel_de_write(display, SKL_PS_WIN_POS(crtc->pipe, 1), 0);
+ intel_de_write(display, SHARPNESS_CTL(crtc->pipe), 0);
+ intel_de_write(display, SKL_PS_WIN_SZ(crtc->pipe, 1), 0);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_casf.h b/drivers/gpu/drm/i915/display/intel_casf.h
new file mode 100644
index 000000000000..b3fb0bcb3f5b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_casf.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CASF_H__
+#define __INTEL_CASF_H__
+
+#include <linux/types.h>
+
+struct intel_crtc_state;
+
+int intel_casf_compute_config(struct intel_crtc_state *crtc_state);
+void intel_casf_update_strength(struct intel_crtc_state *new_crtc_state);
+void intel_casf_sharpness_get_config(struct intel_crtc_state *crtc_state);
+void intel_casf_enable(struct intel_crtc_state *crtc_state);
+void intel_casf_disable(const struct intel_crtc_state *crtc_state);
+void intel_casf_scaler_compute_config(struct intel_crtc_state *crtc_state);
+bool intel_casf_needs_scaler(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_CASF_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_casf_regs.h b/drivers/gpu/drm/i915/display/intel_casf_regs.h
new file mode 100644
index 000000000000..87803cca510f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_casf_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CASF_REGS_H__
+#define __INTEL_CASF_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _SHARPNESS_CTL_A 0x682B0
+#define _SHARPNESS_CTL_B 0x68AB0
+#define SHARPNESS_CTL(pipe) _MMIO_PIPE(pipe, _SHARPNESS_CTL_A, _SHARPNESS_CTL_B)
+#define FILTER_EN REG_BIT(31)
+#define FILTER_STRENGTH_MASK REG_GENMASK(15, 8)
+#define FILTER_STRENGTH(x) REG_FIELD_PREP(FILTER_STRENGTH_MASK, (x))
+#define FILTER_SIZE_MASK REG_GENMASK(1, 0)
+#define SHARPNESS_FILTER_SIZE_3X3 REG_FIELD_PREP(FILTER_SIZE_MASK, 0)
+#define SHARPNESS_FILTER_SIZE_5X5 REG_FIELD_PREP(FILTER_SIZE_MASK, 1)
+#define SHARPNESS_FILTER_SIZE_7X7 REG_FIELD_PREP(FILTER_SIZE_MASK, 2)
+
+#define _SHRPLUT_DATA_A 0x682B8
+#define _SHRPLUT_DATA_B 0x68AB8
+#define SHRPLUT_DATA(pipe) _MMIO_PIPE(pipe, _SHRPLUT_DATA_A, _SHRPLUT_DATA_B)
+
+#define _SHRPLUT_INDEX_A 0x682B4
+#define _SHRPLUT_INDEX_B 0x68AB4
+#define SHRPLUT_INDEX(pipe) _MMIO_PIPE(pipe, _SHRPLUT_INDEX_A, _SHRPLUT_INDEX_B)
+#define INDEX_AUTO_INCR REG_BIT(10)
+#define INDEX_VALUE_MASK REG_GENMASK(4, 0)
+#define INDEX_VALUE(x) REG_FIELD_PREP(INDEX_VALUE_MASK, (x))
+
+#endif /* __INTEL_CASF_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 9725eebe5706..50dca70a0665 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -32,15 +32,15 @@
#include "hsw_ips.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
-#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
+#include "intel_dbuf_bw.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
@@ -49,6 +49,7 @@
#include "intel_vdsc.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
+#include "vlv_clock.h"
#include "vlv_dsi.h"
#include "vlv_sideband.h"
@@ -132,8 +133,8 @@ struct intel_cdclk_state {
*/
struct intel_cdclk_config actual;
- /* minimum acceptable cdclk to satisfy bandwidth requirements */
- int bw_min_cdclk;
+ /* minimum acceptable cdclk to satisfy DBUF bandwidth requirements */
+ int dbuf_bw_min_cdclk;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
@@ -145,6 +146,9 @@ struct intel_cdclk_state {
/* forced minimum cdclk for glk+ audio w/a */
int force_min_cdclk;
+ /* bitmask of enabled pipes */
+ u8 enabled_pipes;
+
/* bitmask of active pipes */
u8 active_pipes;
@@ -563,8 +567,7 @@ static void hsw_get_cdclk(struct intel_display *display,
static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ?
+ int freq_320 = (vlv_clock_get_hpll_vco(display->drm) << 1) % 320000 != 0 ?
333333 : 320000;
/*
@@ -584,8 +587,6 @@ static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk)
static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (display->platform.valleyview) {
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
return 2;
@@ -599,7 +600,7 @@ static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
* hardware has shown that we just need to write the desired
* CCK divider into the Punit register.
*/
- return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+ return DIV_ROUND_CLOSEST(vlv_clock_get_hpll_vco(display->drm) << 1, cdclk) - 1;
}
}
@@ -608,17 +609,12 @@ static void vlv_get_cdclk(struct intel_display *display,
{
u32 val;
- vlv_iosf_sb_get(display->drm, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
-
- cdclk_config->vco = vlv_get_hpll_vco(display->drm);
- cdclk_config->cdclk = vlv_get_cck_clock(display->drm, "cdclk",
- CCK_DISPLAY_CLOCK_CONTROL,
- cdclk_config->vco);
+ cdclk_config->vco = vlv_clock_get_hpll_vco(display->drm);
+ cdclk_config->cdclk = vlv_clock_get_cdclk(display->drm);
+ vlv_punit_get(display->drm);
val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
-
- vlv_iosf_sb_put(display->drm,
- BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+ vlv_punit_put(display->drm);
if (display->platform.valleyview)
cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >>
@@ -630,7 +626,6 @@ static void vlv_get_cdclk(struct intel_display *display,
static void vlv_program_pfi_credits(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
unsigned int credits, default_credits;
if (display->platform.cherryview)
@@ -638,7 +633,7 @@ static void vlv_program_pfi_credits(struct intel_display *display)
else
default_credits = PFI_CREDIT(8);
- if (display->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
+ if (display->cdclk.hw.cdclk >= vlv_clock_get_czclk(display->drm)) {
/* CHV suggested value is 31 or 63 */
if (display->platform.cherryview)
credits = PFI_CREDIT_63;
@@ -670,7 +665,6 @@ static void vlv_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
@@ -715,7 +709,7 @@ static void vlv_set_cdclk(struct intel_display *display,
if (cdclk == 400000) {
u32 divider;
- divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1,
+ divider = DIV_ROUND_CLOSEST(vlv_clock_get_hpll_vco(display->drm) << 1,
cdclk) - 1;
/* adjust cdclk divider */
@@ -1567,7 +1561,7 @@ static int bxt_calc_cdclk(struct intel_display *display, int min_cdclk)
drm_WARN(display->drm, 1,
"Cannot satisfy minimum cdclk %d with refclk %u\n",
min_cdclk, display->cdclk.hw.ref);
- return 0;
+ return display->cdclk.max_cdclk_freq;
}
static int bxt_calc_cdclk_pll_vco(struct intel_display *display, int cdclk)
@@ -2600,6 +2594,12 @@ static void intel_set_cdclk(struct intel_display *display,
}
}
+static bool dg2_power_well_count(struct intel_display *display,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ return display->platform.dg2 ? hweight8(cdclk_state->active_pipes) : 0;
+}
+
static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
@@ -2612,16 +2612,16 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual) &&
- new_cdclk_state->active_pipes ==
- old_cdclk_state->active_pipes)
+ dg2_power_well_count(display, old_cdclk_state) ==
+ dg2_power_well_count(display, new_cdclk_state))
return;
/* According to "Sequence Before Frequency Change", voltage level set to 0x3 */
voltage_level = DISPLAY_TO_PCODE_VOLTAGE_MAX;
change_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk;
- update_pipe_count = hweight8(new_cdclk_state->active_pipes) >
- hweight8(old_cdclk_state->active_pipes);
+ update_pipe_count = dg2_power_well_count(display, new_cdclk_state) >
+ dg2_power_well_count(display, old_cdclk_state);
/*
* According to "Sequence Before Frequency Change",
@@ -2639,7 +2639,7 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
* no action if it is decreasing, before the change
*/
if (update_pipe_count)
- num_active_pipes = hweight8(new_cdclk_state->active_pipes);
+ num_active_pipes = dg2_power_well_count(display, new_cdclk_state);
intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk,
change_cdclk, update_pipe_count);
@@ -2659,8 +2659,8 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
voltage_level = new_cdclk_state->actual.voltage_level;
update_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk;
- update_pipe_count = hweight8(new_cdclk_state->active_pipes) <
- hweight8(old_cdclk_state->active_pipes);
+ update_pipe_count = dg2_power_well_count(display, new_cdclk_state) <
+ dg2_power_well_count(display, old_cdclk_state);
/*
* According to "Sequence After Frequency Change",
@@ -2676,7 +2676,7 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
* no action if it is increasing, after the change
*/
if (update_pipe_count)
- num_active_pipes = hweight8(new_cdclk_state->active_pipes);
+ num_active_pipes = dg2_power_well_count(display, new_cdclk_state);
intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk,
update_cdclk, update_pipe_count);
@@ -2711,6 +2711,9 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
struct intel_cdclk_config cdclk_config;
enum pipe pipe;
+ if (!new_cdclk_state)
+ return;
+
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
return;
@@ -2763,6 +2766,9 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_new_cdclk_state(state);
enum pipe pipe;
+ if (!new_cdclk_state)
+ return;
+
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
return;
@@ -2800,16 +2806,20 @@ static int intel_cdclk_guardband(struct intel_display *display)
return 90;
}
-static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+static int _intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state, int pixel_rate)
{
struct intel_display *display = to_intel_display(crtc_state);
int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
int guardband = intel_cdclk_guardband(display);
- int pixel_rate = crtc_state->pixel_rate;
return DIV_ROUND_UP(pixel_rate * 100, guardband * ppc);
}
+static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ return _intel_pixel_rate_to_cdclk(crtc_state, crtc_state->pixel_rate);
+}
+
static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -2818,12 +2828,12 @@ static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
int min_cdclk = 0;
for_each_intel_plane_on_crtc(display->drm, crtc, plane)
- min_cdclk = max(min_cdclk, crtc_state->min_cdclk[plane->id]);
+ min_cdclk = max(min_cdclk, crtc_state->plane_min_cdclk[plane->id]);
return min_cdclk;
}
-static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
+int intel_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
{
int min_cdclk;
@@ -2831,6 +2841,8 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat
return 0;
min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
+ min_cdclk = max(min_cdclk, intel_crtc_bw_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_fbc_min_cdclk(crtc_state));
min_cdclk = max(min_cdclk, hsw_ips_min_cdclk(crtc_state));
min_cdclk = max(min_cdclk, intel_audio_min_cdclk(crtc_state));
min_cdclk = max(min_cdclk, vlv_dsi_min_cdclk(crtc_state));
@@ -2840,51 +2852,110 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat
return min_cdclk;
}
-static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+static int intel_cdclk_update_crtc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ int old_min_cdclk, int new_min_cdclk,
+ bool *need_cdclk_calc)
{
struct intel_display *display = to_intel_display(state);
- struct intel_cdclk_state *cdclk_state =
- intel_atomic_get_new_cdclk_state(state);
- const struct intel_bw_state *bw_state;
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- int min_cdclk, i;
- enum pipe pipe;
+ struct intel_cdclk_state *cdclk_state;
+ bool allow_cdclk_decrease = intel_any_crtc_needs_modeset(state);
+ int ret;
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- int ret;
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
- min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (min_cdclk < 0)
- return min_cdclk;
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
- if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk)
- continue;
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
- cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
+ old_min_cdclk = cdclk_state->min_cdclk[crtc->pipe];
- ret = intel_atomic_lock_global_state(&cdclk_state->base);
- if (ret)
- return ret;
- }
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
- bw_state = intel_atomic_get_new_bw_state(state);
- if (bw_state) {
- min_cdclk = intel_bw_min_cdclk(display, bw_state);
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
- if (cdclk_state->bw_min_cdclk != min_cdclk) {
- int ret;
+ cdclk_state->min_cdclk[crtc->pipe] = new_min_cdclk;
- cdclk_state->bw_min_cdclk = min_cdclk;
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
- ret = intel_atomic_lock_global_state(&cdclk_state->base);
- if (ret)
- return ret;
- }
- }
+ *need_cdclk_calc = true;
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] min cdclk: %d kHz -> %d kHz\n",
+ crtc->base.base.id, crtc->base.name,
+ old_min_cdclk, new_min_cdclk);
+
+ return 0;
+}
- min_cdclk = max(cdclk_state->force_min_cdclk,
- cdclk_state->bw_min_cdclk);
+int intel_cdclk_update_dbuf_bw_min_cdclk(struct intel_atomic_state *state,
+ int old_min_cdclk, int new_min_cdclk,
+ bool *need_cdclk_calc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_cdclk_state *cdclk_state;
+ bool allow_cdclk_decrease = intel_any_crtc_needs_modeset(state);
+ int ret;
+
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
+
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ old_min_cdclk = cdclk_state->dbuf_bw_min_cdclk;
+
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
+
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
+
+ cdclk_state->dbuf_bw_min_cdclk = new_min_cdclk;
+
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
+
+ *need_cdclk_calc = true;
+
+ drm_dbg_kms(display->drm,
+ "dbuf bandwidth min cdclk: %d kHz -> %d kHz\n",
+ old_min_cdclk, new_min_cdclk);
+
+ return 0;
+}
+
+static bool glk_cdclk_audio_wa_needed(struct intel_display *display,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ return display->platform.geminilake &&
+ cdclk_state->enabled_pipes &&
+ !is_power_of_2(cdclk_state->enabled_pipes);
+}
+
+static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_cdclk_state *cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ enum pipe pipe;
+ int min_cdclk;
+
+ min_cdclk = cdclk_state->force_min_cdclk;
+ min_cdclk = max(min_cdclk, cdclk_state->dbuf_bw_min_cdclk);
for_each_pipe(display, pipe)
min_cdclk = max(min_cdclk, cdclk_state->min_cdclk[pipe]);
@@ -2896,8 +2967,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
* by changing the cd2x divider (see glk_cdclk_table[]) and
* thus a full modeset won't be needed then.
*/
- if (display->platform.geminilake && cdclk_state->active_pipes &&
- !is_power_of_2(cdclk_state->active_pipes))
+ if (glk_cdclk_audio_wa_needed(display, cdclk_state))
min_cdclk = max(min_cdclk, 2 * 96000);
if (min_cdclk > display->cdclk.max_cdclk_freq) {
@@ -3183,41 +3253,69 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
return to_intel_cdclk_state(cdclk_state);
}
-int intel_cdclk_atomic_check(struct intel_atomic_state *state,
- bool *need_cdclk_calc)
+static int intel_cdclk_modeset_checks(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_cdclk_state *old_cdclk_state;
- const struct intel_cdclk_state *new_cdclk_state;
- struct intel_plane_state __maybe_unused *plane_state;
- struct intel_plane *plane;
+ struct intel_cdclk_state *new_cdclk_state;
int ret;
- int i;
- /*
- * active_planes bitmask has been updated, and potentially affected
- * planes are part of the state. We can now compute the minimum cdclk
- * for each plane.
- */
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
- if (ret)
- return ret;
- }
+ if (!intel_any_crtc_enable_changed(state) &&
+ !intel_any_crtc_active_changed(state))
+ return 0;
- ret = intel_bw_calc_min_cdclk(state, need_cdclk_calc);
+ new_cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(new_cdclk_state))
+ return PTR_ERR(new_cdclk_state);
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+ new_cdclk_state->enabled_pipes =
+ intel_calc_enabled_pipes(state, old_cdclk_state->enabled_pipes);
+
+ new_cdclk_state->active_pipes =
+ intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
+
+ ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
if (ret)
return ret;
- old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ if (!old_cdclk_state->active_pipes != !new_cdclk_state->active_pipes)
+ *need_cdclk_calc = true;
- if (new_cdclk_state &&
- old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
+ if (glk_cdclk_audio_wa_needed(display, old_cdclk_state) !=
+ glk_cdclk_audio_wa_needed(display, new_cdclk_state))
+ *need_cdclk_calc = true;
+
+ if (dg2_power_well_count(display, old_cdclk_state) !=
+ dg2_power_well_count(display, new_cdclk_state))
*need_cdclk_calc = true;
return 0;
}
+static int intel_crtcs_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = intel_cdclk_update_crtc_min_cdclk(state, crtc,
+ old_crtc_state->min_cdclk,
+ new_crtc_state->min_cdclk,
+ need_cdclk_calc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus)
{
struct intel_cdclk_state *cdclk_state;
@@ -3250,18 +3348,17 @@ static bool intel_cdclk_need_serialize(struct intel_display *display,
const struct intel_cdclk_state *old_cdclk_state,
const struct intel_cdclk_state *new_cdclk_state)
{
- bool power_well_cnt_changed = hweight8(old_cdclk_state->active_pipes) !=
- hweight8(new_cdclk_state->active_pipes);
- bool cdclk_changed = intel_cdclk_changed(&old_cdclk_state->actual,
- &new_cdclk_state->actual);
/*
- * We need to poke hw for gen >= 12, because we notify PCode if
+ * We need to poke hw for DG2, because we notify PCode if
* pipe power well count changes.
*/
- return cdclk_changed || (display->platform.dg2 && power_well_cnt_changed);
+ return intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual) ||
+ dg2_power_well_count(display, old_cdclk_state) !=
+ dg2_power_well_count(display, new_cdclk_state);
}
-int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
const struct intel_cdclk_state *old_cdclk_state;
@@ -3275,9 +3372,6 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- new_cdclk_state->active_pipes =
- intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
-
ret = intel_cdclk_modeset_calc_cdclk(state);
if (ret)
return ret;
@@ -3290,9 +3384,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
ret = intel_atomic_serialize_global_state(&new_cdclk_state->base);
if (ret)
return ret;
- } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes ||
- old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk ||
- intel_cdclk_changed(&old_cdclk_state->logical,
+ } else if (intel_cdclk_changed(&old_cdclk_state->logical,
&new_cdclk_state->logical)) {
ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
if (ret)
@@ -3374,14 +3466,55 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
return 0;
}
+int intel_cdclk_atomic_check(struct intel_atomic_state *state)
+{
+ const struct intel_cdclk_state *old_cdclk_state;
+ struct intel_cdclk_state *new_cdclk_state;
+ bool need_cdclk_calc = false;
+ int ret;
+
+ ret = intel_cdclk_modeset_checks(state, &need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ ret = intel_crtcs_calc_min_cdclk(state, &need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ ret = intel_dbuf_bw_calc_min_cdclk(state, &need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+
+ if (new_cdclk_state &&
+ old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) {
+ ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
+ if (ret)
+ return ret;
+
+ need_cdclk_calc = true;
+ }
+
+ if (need_cdclk_calc) {
+ ret = intel_modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
void intel_cdclk_update_hw_state(struct intel_display *display)
{
- const struct intel_bw_state *bw_state =
- to_intel_bw_state(display->bw.obj.state);
+ const struct intel_dbuf_bw_state *dbuf_bw_state =
+ to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(display->cdclk.obj.state);
struct intel_crtc *crtc;
+ cdclk_state->enabled_pipes = 0;
cdclk_state->active_pipes = 0;
for_each_intel_crtc(display->drm, crtc) {
@@ -3389,14 +3522,16 @@ void intel_cdclk_update_hw_state(struct intel_display *display)
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
+ if (crtc_state->hw.enable)
+ cdclk_state->enabled_pipes |= BIT(pipe);
if (crtc_state->hw.active)
cdclk_state->active_pipes |= BIT(pipe);
- cdclk_state->min_cdclk[pipe] = intel_crtc_compute_min_cdclk(crtc_state);
+ cdclk_state->min_cdclk[pipe] = crtc_state->min_cdclk;
cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level;
}
- cdclk_state->bw_min_cdclk = intel_bw_min_cdclk(display, bw_state);
+ cdclk_state->dbuf_bw_min_cdclk = intel_dbuf_bw_min_cdclk(display, dbuf_bw_state);
}
void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc)
@@ -3565,13 +3700,6 @@ static int pch_rawclk(struct intel_display *display)
return (intel_de_read(display, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
}
-static int vlv_hrawclk(struct intel_display *display)
-{
- /* RAWCLK_FREQ_VLV register updated from power well code */
- return vlv_get_cck_clock_hpll(display->drm, "hrawclk",
- CCK_DISPLAY_REF_CLOCK_CONTROL);
-}
-
static int i9xx_hrawclk(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
@@ -3605,7 +3733,7 @@ u32 intel_read_rawclk(struct intel_display *display)
else if (HAS_PCH_SPLIT(display))
freq = pch_rawclk(display);
else if (display->platform.valleyview || display->platform.cherryview)
- freq = vlv_hrawclk(display);
+ freq = vlv_clock_get_hrawclk(display->drm);
else if (DISPLAY_VER(display) >= 3)
freq = i9xx_hrawclk(display);
else
@@ -3897,11 +4025,6 @@ int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe
return cdclk_state->min_cdclk[pipe];
}
-int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state)
-{
- return cdclk_state->bw_min_cdclk;
-}
-
bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state)
{
const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
@@ -3933,3 +4056,75 @@ void intel_cdclk_read_hw(struct intel_display *display)
cdclk_state->actual = display->cdclk.hw;
cdclk_state->logical = display->cdclk.hw;
}
+
+static int calc_cdclk(const struct intel_crtc_state *crtc_state, int min_cdclk)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) >= 10 || display->platform.broxton) {
+ return bxt_calc_cdclk(display, min_cdclk);
+ } else if (DISPLAY_VER(display) == 9) {
+ int vco;
+
+ vco = display->cdclk.skl_preferred_vco_freq;
+ if (vco == 0)
+ vco = 8100000;
+
+ return skl_calc_cdclk(min_cdclk, vco);
+ } else if (display->platform.broadwell) {
+ return bdw_calc_cdclk(min_cdclk);
+ } else if (display->platform.cherryview || display->platform.valleyview) {
+ return vlv_calc_cdclk(display, min_cdclk);
+ } else {
+ return display->cdclk.max_cdclk_freq;
+ }
+}
+
+static unsigned int _intel_cdclk_prefill_adj(const struct intel_crtc_state *crtc_state,
+ int clock, int min_cdclk)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
+ int cdclk = calc_cdclk(crtc_state, min_cdclk);
+
+ return min(0x10000, DIV_ROUND_UP_ULL((u64)clock << 16, ppc * cdclk));
+}
+
+unsigned int intel_cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ /* FIXME use the actual min_cdclk for the pipe here */
+ return intel_cdclk_prefill_adjustment_worst(crtc_state);
+}
+
+unsigned int intel_cdclk_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state)
+{
+ int clock = crtc_state->hw.pipe_mode.crtc_clock;
+ int min_cdclk;
+
+ /*
+ * FIXME could perhaps consider a few more of the factors
+ * that go the per-crtc min_cdclk. Namely anything that
+ * only changes during full modesets.
+ *
+ * FIXME this assumes 1:1 scaling, but the other _worst() stuff
+ * assumes max downscaling, so the final result will be
+ * unrealistically bad. Figure out where the actual maximum value
+ * lies and use that to compute a more realistic worst case
+ * estimate...
+ */
+ min_cdclk = _intel_pixel_rate_to_cdclk(crtc_state, clock);
+
+ return _intel_cdclk_prefill_adj(crtc_state, clock, min_cdclk);
+}
+
+int intel_cdclk_min_cdclk_for_prefill(const struct intel_crtc_state *crtc_state,
+ unsigned int prefill_lines_unadjusted,
+ unsigned int prefill_lines_available)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pipe_mode->crtc_clock, prefill_lines_unadjusted),
+ ppc * prefill_lines_available);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index cacee598af0e..1ff7d078b42c 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -38,16 +38,17 @@ void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
void intel_cdclk_dump_config(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
const char *context);
-int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
void intel_cdclk_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config);
-int intel_cdclk_atomic_check(struct intel_atomic_state *state,
- bool *need_cdclk_calc);
+int intel_cdclk_atomic_check(struct intel_atomic_state *state);
int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus);
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
void intel_cdclk_update_hw_state(struct intel_display *display);
void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc);
+int intel_cdclk_update_dbuf_bw_min_cdclk(struct intel_atomic_state *state,
+ int old_min_cdclk, int new_min_cdclk,
+ bool *need_cdclk_calc);
#define to_intel_cdclk_state(global_state) \
container_of_const((global_state), struct intel_cdclk_state, base)
@@ -64,9 +65,16 @@ int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state);
int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state);
int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state);
int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe);
-int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state);
bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state);
void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk);
void intel_cdclk_read_hw(struct intel_display *display);
+unsigned int intel_cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state);
+unsigned int intel_cdclk_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state);
+int intel_cdclk_min_cdclk_for_prefill(const struct intel_crtc_state *crtc_state,
+ unsigned int prefill_lines_unadjusted,
+ unsigned int prefill_lines_available);
+
+int intel_crtc_min_cdclk(const struct intel_crtc_state *crtc_state);
+
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 671db6926e4c..1e97020e7304 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -24,12 +24,12 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dsb.h"
#include "intel_vrr.h"
@@ -2013,7 +2013,7 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
if (crtc_state->use_dsb && intel_color_uses_chained_dsb(crtc_state)) {
intel_vrr_send_push(crtc_state->dsb_color, crtc_state);
- intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color);
+ intel_dsb_wait_for_delayed_vblank(state, crtc_state->dsb_color);
intel_vrr_check_push_sent(crtc_state->dsb_color, crtc_state);
intel_dsb_interrupt(crtc_state->dsb_color);
}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 112749f97c26..f401558ac14e 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -5,12 +5,12 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#define for_each_combo_phy(__display, __phy) \
for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 6a55854db5b6..647e6e1f8c68 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -31,7 +31,7 @@
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
-#include "i915_utils.h"
+#include "i915_utils.h" /* for i915_inject_probe_failure() */
#include "intel_connector.h"
#include "intel_display_core.h"
#include "intel_display_debugfs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index a187db6df2d3..802ae5aaece1 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -84,8 +84,13 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
if (!crtc->active)
return 0;
- if (!vblank->max_vblank_count)
- return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+ if (!vblank->max_vblank_count) {
+ /* On preempt-rt we cannot take the vblank spinlock since this function is called from tracepoints */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ return (u32)drm_crtc_vblank_count(&crtc->base);
+ else
+ return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+ }
return crtc->base.funcs->get_vblank_counter(&crtc->base);
}
@@ -390,6 +395,9 @@ int intel_crtc_init(struct intel_display *display, enum pipe pipe)
drm_WARN_ON(display->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
+ if (HAS_CASF(display))
+ drm_crtc_create_sharpness_strength_property(&crtc->base);
+
return 0;
fail:
@@ -748,3 +756,89 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
out:
intel_psr_unlock(new_crtc_state);
}
+
+bool intel_crtc_enable_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return old_crtc_state->hw.enable != new_crtc_state->hw.enable;
+}
+
+bool intel_any_crtc_enable_changed(struct intel_atomic_state *state)
+{
+ const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_enable_changed(old_crtc_state, new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+bool intel_crtc_active_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return old_crtc_state->hw.active != new_crtc_state->hw.active;
+}
+
+bool intel_any_crtc_active_changed(struct intel_atomic_state *state)
+{
+ const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_active_changed(old_crtc_state, new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned int intel_crtc_bw_num_active_planes(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
+}
+
+unsigned int intel_crtc_bw_data_rate(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int data_rate = 0;
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ data_rate += crtc_state->data_rate[plane_id];
+
+ if (DISPLAY_VER(display) < 11)
+ data_rate += crtc_state->data_rate_y[plane_id];
+ }
+
+ return data_rate;
+}
+
+/* "Maximum Pipe Read Bandwidth" */
+int intel_crtc_bw_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) < 12)
+ return 0;
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(intel_crtc_bw_data_rate(crtc_state), 10), 512);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
index 8c14ff8b391e..07917e8a9ae3 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.h
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -58,4 +58,15 @@ void intel_wait_for_vblank_if_active(struct intel_display *display,
enum pipe pipe);
void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc);
+bool intel_any_crtc_enable_changed(struct intel_atomic_state *state);
+bool intel_crtc_enable_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state);
+bool intel_any_crtc_active_changed(struct intel_atomic_state *state);
+bool intel_crtc_active_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state);
+
+unsigned int intel_crtc_bw_num_active_planes(const struct intel_crtc_state *crtc_state);
+unsigned int intel_crtc_bw_data_rate(const struct intel_crtc_state *crtc_state);
+int intel_crtc_bw_min_cdclk(const struct intel_crtc_state *crtc_state);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 0c7f91046996..c2a6217c2262 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -289,10 +289,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "scanline offset: %d\n",
intel_crtc_scanline_offset(pipe_config));
- drm_printf(&p, "vblank delay: %d, framestart delay: %d, MSA timing delay: %d\n",
- pipe_config->hw.adjusted_mode.crtc_vblank_start -
- pipe_config->hw.adjusted_mode.crtc_vdisplay,
- pipe_config->framestart_delay, pipe_config->msa_timing_delay);
+ drm_printf(&p, "framestart delay: %d, MSA timing delay: %d, set context latency: %d\n",
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay,
+ pipe_config->set_context_latency);
drm_printf(&p, "vrr: %s, fixed rr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
str_yes_no(pipe_config->vrr.enable),
@@ -313,9 +312,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "pipe mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
intel_dump_crtc_timings(&p, &pipe_config->hw.pipe_mode);
- drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
+ drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d, min cdclk %d\n",
pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
- pipe_config->pixel_rate);
+ pipe_config->pixel_rate, pipe_config->min_cdclk);
drm_printf(&p, "linetime: %d, ips linetime: %d\n",
pipe_config->linetime, pipe_config->ips_linetime);
@@ -373,6 +372,11 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_vdsc_state_dump(&p, 0, pipe_config);
+ drm_printf(&p, "sharpness strength: %d, sharpness tap size: %d, sharpness enable: %d\n",
+ pipe_config->hw.casf_params.strength,
+ pipe_config->hw.casf_params.win_size,
+ pipe_config->hw.casf_params.casf_enable);
+
dump_planes:
if (!state)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index d4d181f9dca5..7aa14348aa6d 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -12,13 +12,13 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_cursor.h"
#include "intel_cursor_regs.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
@@ -662,7 +662,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
cntl = plane_state->ctl |
i9xx_cursor_ctl_crtc(crtc_state);
- if (width != height)
+ if (DISPLAY_VER(display) < 14 && width != height)
fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1);
base = plane_state->surf;
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 801235a5bc0a..b3b506d0e040 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -8,7 +8,6 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_alpm.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
@@ -16,16 +15,15 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_hdmi.h"
+#include "intel_lt_phy.h"
#include "intel_panel.h"
#include "intel_psr.h"
#include "intel_snps_hdmi_pll.h"
#include "intel_tc.h"
-#define MB_WRITE_COMMITTED true
-#define MB_WRITE_UNCOMMITTED false
-
#define for_each_cx0_lane_in_mask(__lane_mask, __lane) \
for ((__lane) = 0; (__lane) < 2; (__lane)++) \
for_each_if((__lane_mask) & BIT(__lane))
@@ -39,14 +37,12 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- /* PTL doesn't have a PHY connected to PORT B; as such,
- * there will never be a case where PTL uses PHY B.
- * WCL uses PORT A and B with the C10 PHY.
- * Reusing the condition for WCL and extending it for PORT B
- * should not cause any issues for PTL.
- */
- if (display->platform.pantherlake && phy < PHY_C)
- return true;
+ if (display->platform.pantherlake) {
+ if (display->platform.pantherlake_wildcatlake)
+ return phy <= PHY_B;
+ else
+ return phy == PHY_A;
+ }
if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C)
return true;
@@ -130,8 +126,8 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w
intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
}
-static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
- int lane)
+void intel_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane)
{
struct intel_display *display = to_intel_display(encoder);
@@ -140,7 +136,7 @@ static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
}
-static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
+void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -161,8 +157,8 @@ static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
intel_clear_response_ready_flag(encoder, lane);
}
-static int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
- int command, int lane, u32 *val)
+int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
+ int command, int lane, u32 *val)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -273,8 +269,7 @@ static u8 __intel_cx0_read(struct intel_encoder *encoder,
return 0;
}
-static u8 intel_cx0_read(struct intel_encoder *encoder,
- u8 lane_mask, u16 addr)
+u8 intel_cx0_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr)
{
int lane = lane_mask_to_lane(lane_mask);
@@ -361,8 +356,8 @@ static void __intel_cx0_write(struct intel_encoder *encoder,
"PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
}
-static void intel_cx0_write(struct intel_encoder *encoder,
- u8 lane_mask, u16 addr, u8 data, bool committed)
+void intel_cx0_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data, bool committed)
{
int lane;
@@ -414,8 +409,8 @@ static void __intel_cx0_rmw(struct intel_encoder *encoder,
__intel_cx0_write(encoder, lane, addr, val, committed);
}
-static void intel_cx0_rmw(struct intel_encoder *encoder,
- u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
+void intel_cx0_rmw(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
{
u8 lane;
@@ -2105,6 +2100,9 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
return 0;
}
+static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c10pll_state *pll_state);
+
static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_c10pll_state *pll_state)
{
@@ -2129,6 +2127,8 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
pll_state->tx = intel_cx0_read(encoder, lane, PHY_C10_VDR_TX(0));
intel_cx0_phy_transaction_end(encoder, wakeref);
+
+ pll_state->clock = intel_c10pll_calc_port_clock(encoder, pll_state);
}
static void intel_c10_pll_program(struct intel_display *display,
@@ -2587,20 +2587,6 @@ static bool is_dp2(u32 clock)
return false;
}
-static bool is_hdmi_frl(u32 clock)
-{
- switch (clock) {
- case 300000: /* 3 Gbps */
- case 600000: /* 6 Gbps */
- case 800000: /* 8 Gbps */
- case 1000000: /* 10 Gbps */
- case 1200000: /* 12 Gbps */
- return true;
- default:
- return false;
- }
-}
-
static bool intel_c20_protocol_switch_valid(struct intel_encoder *encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -2614,7 +2600,7 @@ static int intel_get_c20_custom_width(u32 clock, bool dp)
{
if (dp && is_dp2(clock))
return 2;
- else if (is_hdmi_frl(clock))
+ else if (intel_hdmi_is_frl(clock))
return 1;
else
return 0;
@@ -2626,11 +2612,13 @@ static void intel_c20_pll_program(struct intel_display *display,
bool is_dp, int port_clock)
{
u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
+ u8 serdes;
bool cntx;
int i;
/* 1. Read current context selection */
- cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
+ cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) &
+ PHY_C20_CONTEXT_TOGGLE;
/*
* 2. If there is a protocol switch from HDMI to DP or vice versa, clear
@@ -2700,28 +2688,31 @@ static void intel_c20_pll_program(struct intel_display *display,
MB_WRITE_COMMITTED);
/* 5. For DP or 6. For HDMI */
- if (is_dp) {
- intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
- BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
- BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(port_clock)),
- MB_WRITE_COMMITTED);
- } else {
- intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
- BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
- is_hdmi_frl(port_clock) ? BIT(7) : 0,
- MB_WRITE_COMMITTED);
+ serdes = 0;
+ if (is_dp)
+ serdes = PHY_C20_IS_DP |
+ PHY_C20_DP_RATE(intel_c20_get_dp_rate(port_clock));
+ else if (intel_hdmi_is_frl(port_clock))
+ serdes = PHY_C20_IS_HDMI_FRL;
- intel_cx0_write(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
- intel_c20_get_hdmi_rate(port_clock),
- MB_WRITE_COMMITTED);
- }
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ PHY_C20_IS_DP | PHY_C20_DP_RATE_MASK | PHY_C20_IS_HDMI_FRL,
+ serdes,
+ MB_WRITE_COMMITTED);
+
+ if (!is_dp)
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
+ PHY_C20_HDMI_RATE_MASK,
+ intel_c20_get_hdmi_rate(port_clock),
+ MB_WRITE_COMMITTED);
/*
* 7. Write Vendor specific registers to toggle context setting to load
* the updated programming toggle context bit
*/
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
- BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
+ PHY_C20_CONTEXT_TOGGLE, cntx ? 0 : PHY_C20_CONTEXT_TOGGLE,
+ MB_WRITE_COMMITTED);
}
static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
@@ -2768,7 +2759,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
- if (!is_dp && is_hdmi_frl(port_clock))
+ if (!is_dp && intel_hdmi_is_frl(port_clock))
val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
@@ -2808,8 +2799,8 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state)
return val;
}
-static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
- u8 lane_mask, u8 state)
+void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
+ u8 lane_mask, u8 state)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -2839,24 +2830,24 @@ static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
/* Update Timeout Value */
if (intel_de_wait_custom(display, buf_ctl2_reg,
intel_cx0_get_powerdown_update(lane_mask), 0,
- XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
+ XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 2, NULL))
drm_warn(display->drm,
"PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
}
-static void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
+void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
XELPDP_POWER_STATE_READY_MASK,
- XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
+ XELPDP_POWER_STATE_READY(XELPDP_P2_STATE_READY));
intel_de_rmw(display, XELPDP_PORT_BUF_CTL3(display, port),
XELPDP_POWER_STATE_ACTIVE_MASK |
XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
- XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
+ XELPDP_POWER_STATE_ACTIVE(XELPDP_P0_STATE_ACTIVE) |
XELPDP_PLL_LANE_STAGGERING_DELAY(0));
}
@@ -2929,7 +2920,7 @@ static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder,
phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
- CX0_P2_STATE_RESET);
+ XELPDP_P2_STATE_RESET);
intel_cx0_setup_powerdown(encoder);
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 0);
@@ -3034,7 +3025,7 @@ static void __intel_cx0pll_enable(struct intel_encoder *encoder,
* TODO: For DP alt mode use only one lane.
*/
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
- CX0_P2_STATE_READY);
+ XELPDP_P2_STATE_READY);
/*
* 4. Program PORT_MSGBUS_TIMER register's Message Bus Timer field to 0xA000.
@@ -3160,8 +3151,8 @@ static int intel_mtl_tbt_clock_select(struct intel_display *display,
}
}
-static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -3275,13 +3266,13 @@ static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
if (intel_encoder_is_c10phy(encoder))
- return CX0_P2PG_STATE_DISABLE;
+ return XELPDP_P2PG_STATE_DISABLE;
if ((display->platform.battlemage && encoder->port == PORT_A) ||
(DISPLAY_VER(display) >= 30 && encoder->type == INTEL_OUTPUT_EDP))
- return CX0_P2PG_STATE_DISABLE;
+ return XELPDP_P2PG_STATE_DISABLE;
- return CX0_P4PG_STATE_DISABLE;
+ return XELPDP_P4PG_STATE_DISABLE;
}
static void intel_cx0pll_disable(struct intel_encoder *encoder)
@@ -3345,7 +3336,7 @@ static bool intel_cx0_pll_is_enabled(struct intel_encoder *encoder)
intel_cx0_get_pclk_pll_request(lane);
}
-static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
+void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -3584,7 +3575,7 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_encoder *encoder;
struct intel_cx0pll_state mpll_hw_state = {};
- if (DISPLAY_VER(display) < 14)
+ if (!IS_DISPLAY_VER(display, 14, 30))
return;
if (!new_crtc_state->hw.active)
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index c5a7b529955b..84d334b865f7 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -8,6 +8,9 @@
#include <linux/types.h>
+#define MB_WRITE_COMMITTED true
+#define MB_WRITE_UNCOMMITTED false
+
enum icl_port_dpll_id;
struct intel_atomic_state;
struct intel_c10pll_state;
@@ -19,6 +22,8 @@ struct intel_display;
struct intel_encoder;
struct intel_hdmi;
+void intel_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane);
bool intel_encoder_is_c10phy(struct intel_encoder *encoder);
void intel_mtl_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
@@ -41,9 +46,25 @@ bool intel_cx0pll_compare_hw_state(const struct intel_cx0pll_state *a,
const struct intel_cx0pll_state *b);
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
+ u8 lane_mask, u8 state);
+int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
+void intel_cx0_setup_powerdown(struct intel_encoder *encoder);
+bool intel_cx0_is_hdmi_frl(u32 clock);
+u8 intel_cx0_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr);
+void intel_cx0_rmw(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed);
+void intel_cx0_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data, bool committed);
+int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
+ int command, int lane, u32 *val);
+void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
void intel_cx0_pll_power_save_wa(struct intel_display *display);
void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder);
#endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 77eae1d845f7..635b35669348 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -50,6 +50,7 @@
#define XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)
#define XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x2)
#define XELPDP_PORT_M2P_COMMAND_READ REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x3)
+#define XELPDP_PORT_P2P_TRANSACTION_PENDING REG_BIT(24)
#define XELPDP_PORT_M2P_DATA_MASK REG_GENMASK(23, 16)
#define XELPDP_PORT_M2P_DATA(val) REG_FIELD_PREP(XELPDP_PORT_M2P_DATA_MASK, val)
#define XELPDP_PORT_M2P_TRANSACTION_RESET REG_BIT(15)
@@ -104,6 +105,8 @@
#define XELPDP_PORT_BUF_PORT_DATA_20BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 1)
#define XELPDP_PORT_BUF_PORT_DATA_40BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 2)
#define XELPDP_PORT_REVERSAL REG_BIT(16)
+#define XE3PLPDP_PHY_MODE_MASK REG_GENMASK(15, 12)
+#define XE3PLPDP_PHY_MODE_DP REG_FIELD_PREP(XE3PLPDP_PHY_MODE_MASK, 0x3)
#define XELPDP_PORT_BUF_IO_SELECT_TBT REG_BIT(11)
#define XELPDP_PORT_BUF_PHY_IDLE REG_BIT(7)
#define XELPDP_TC_PHY_OWNERSHIP REG_BIT(6)
@@ -124,6 +127,7 @@
_XELPDP_PORT_BUF_CTL2(port))
#define XELPDP_LANE_PIPE_RESET(lane) _PICK(lane, REG_BIT(31), REG_BIT(30))
#define XELPDP_LANE_PHY_CURRENT_STATUS(lane) _PICK(lane, REG_BIT(29), REG_BIT(28))
+#define XE3PLPDP_LANE_PHY_PULSE_STATUS(lane) _PICK(lane, REG_BIT(27), REG_BIT(26))
#define XELPDP_LANE_POWERDOWN_UPDATE(lane) _PICK(lane, REG_BIT(25), REG_BIT(24))
#define _XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK REG_GENMASK(23, 20)
#define _XELPDP_LANE0_POWERDOWN_NEW_STATE(val) REG_FIELD_PREP(_XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK, val)
@@ -149,11 +153,12 @@
#define XELPDP_PLL_LANE_STAGGERING_DELAY(val) REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val)
#define XELPDP_POWER_STATE_ACTIVE_MASK REG_GENMASK(3, 0)
#define XELPDP_POWER_STATE_ACTIVE(val) REG_FIELD_PREP(XELPDP_POWER_STATE_ACTIVE_MASK, val)
-#define CX0_P0_STATE_ACTIVE 0x0
-#define CX0_P2_STATE_READY 0x2
-#define CX0_P2PG_STATE_DISABLE 0x9
-#define CX0_P4PG_STATE_DISABLE 0xC
-#define CX0_P2_STATE_RESET 0x2
+#define XELPDP_P0_STATE_ACTIVE 0x0
+#define XELPDP_P2_STATE_READY 0x2
+#define XE3PLPD_P4_STATE_DISABLE 0x4
+#define XELPDP_P2PG_STATE_DISABLE 0x9
+#define XELPDP_P4PG_STATE_DISABLE 0xC
+#define XELPDP_P2_STATE_RESET 0x2
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_A 0x640d8
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_B 0x641d8
@@ -298,10 +303,14 @@
#define PHY_C20_RD_DATA_L 0xC08
#define PHY_C20_RD_DATA_H 0xC09
#define PHY_C20_VDR_CUSTOM_SERDES_RATE 0xD00
-#define PHY_C20_VDR_HDMI_RATE 0xD01
+#define PHY_C20_IS_HDMI_FRL REG_BIT8(7)
+#define PHY_C20_IS_DP REG_BIT8(6)
+#define PHY_C20_DP_RATE_MASK REG_GENMASK8(4, 1)
+#define PHY_C20_DP_RATE(val) REG_FIELD_PREP8(PHY_C20_DP_RATE_MASK, val)
#define PHY_C20_CONTEXT_TOGGLE REG_BIT8(0)
-#define PHY_C20_CUSTOM_SERDES_MASK REG_GENMASK8(4, 1)
-#define PHY_C20_CUSTOM_SERDES(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_SERDES_MASK, val)
+#define PHY_C20_VDR_HDMI_RATE 0xD01
+#define PHY_C20_HDMI_RATE_MASK REG_GENMASK8(1, 0)
+#define PHY_C20_HDMI_RATE(val) REG_FIELD_PREP8(PHY_C20_HDMI_RATE_MASK, val)
#define PHY_C20_VDR_CUSTOM_WIDTH 0xD02
#define PHY_C20_CUSTOM_WIDTH_MASK REG_GENMASK(1, 0)
#define PHY_C20_CUSTOM_WIDTH(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_WIDTH_MASK, val)
diff --git a/drivers/gpu/drm/i915/display/intel_dbuf_bw.c b/drivers/gpu/drm/i915/display/intel_dbuf_bw.c
new file mode 100644
index 000000000000..8b8894c37f63
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dbuf_bw.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "intel_dbuf_bw.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "skl_watermark.h"
+
+struct intel_dbuf_bw {
+ unsigned int max_bw[I915_MAX_DBUF_SLICES];
+ u8 active_planes[I915_MAX_DBUF_SLICES];
+};
+
+struct intel_dbuf_bw_state {
+ struct intel_global_state base;
+ struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
+};
+
+struct intel_dbuf_bw_state *to_intel_dbuf_bw_state(struct intel_global_state *obj_state)
+{
+ return container_of(obj_state, struct intel_dbuf_bw_state, base);
+}
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_old_dbuf_bw_state(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_global_state *dbuf_bw_state;
+
+ dbuf_bw_state = intel_atomic_get_old_global_obj_state(state, &display->dbuf_bw.obj);
+
+ return to_intel_dbuf_bw_state(dbuf_bw_state);
+}
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_new_dbuf_bw_state(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_global_state *dbuf_bw_state;
+
+ dbuf_bw_state = intel_atomic_get_new_global_obj_state(state, &display->dbuf_bw.obj);
+
+ return to_intel_dbuf_bw_state(dbuf_bw_state);
+}
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_dbuf_bw_state(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_global_state *dbuf_bw_state;
+
+ dbuf_bw_state = intel_atomic_get_global_obj_state(state, &display->dbuf_bw.obj);
+ if (IS_ERR(dbuf_bw_state))
+ return ERR_CAST(dbuf_bw_state);
+
+ return to_intel_dbuf_bw_state(dbuf_bw_state);
+}
+
+static bool intel_dbuf_bw_changed(struct intel_display *display,
+ const struct intel_dbuf_bw *old_dbuf_bw,
+ const struct intel_dbuf_bw *new_dbuf_bw)
+{
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
+ old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_dbuf_bw_state_changed(struct intel_display *display,
+ const struct intel_dbuf_bw_state *old_dbuf_bw_state,
+ const struct intel_dbuf_bw_state *new_dbuf_bw_state)
+{
+ enum pipe pipe;
+
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *old_dbuf_bw =
+ &old_dbuf_bw_state->dbuf_bw[pipe];
+ const struct intel_dbuf_bw *new_dbuf_bw =
+ &new_dbuf_bw_state->dbuf_bw[pipe];
+
+ if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
+ return true;
+ }
+
+ return false;
+}
+
+static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
+ struct intel_crtc *crtc,
+ enum plane_id plane_id,
+ const struct skl_ddb_entry *ddb,
+ unsigned int data_rate)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
+ enum dbuf_slice slice;
+
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
+ dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
+ dbuf_bw->active_planes[slice] |= BIT(plane_id);
+ }
+}
+
+static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+
+ memset(dbuf_bw, 0, sizeof(*dbuf_bw));
+
+ if (!crtc_state->hw.active)
+ return;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb[plane_id],
+ crtc_state->data_rate[plane_id]);
+
+ if (DISPLAY_VER(display) < 11)
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb_y[plane_id],
+ crtc_state->data_rate[plane_id]);
+ }
+}
+
+/* "Maximum Data Buffer Bandwidth" */
+int intel_dbuf_bw_min_cdclk(struct intel_display *display,
+ const struct intel_dbuf_bw_state *dbuf_bw_state)
+{
+ unsigned int total_max_bw = 0;
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ int num_active_planes = 0;
+ unsigned int max_bw = 0;
+ enum pipe pipe;
+
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *dbuf_bw = &dbuf_bw_state->dbuf_bw[pipe];
+
+ max_bw = max(dbuf_bw->max_bw[slice], max_bw);
+ num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
+ }
+ max_bw *= num_active_planes;
+
+ total_max_bw = max(total_max_bw, max_bw);
+ }
+
+ return DIV_ROUND_UP(total_max_bw, 64);
+}
+
+int intel_dbuf_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_dbuf_bw_state *new_dbuf_bw_state = NULL;
+ const struct intel_dbuf_bw_state *old_dbuf_bw_state = NULL;
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
+
+ skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
+ skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
+
+ if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
+ continue;
+
+ new_dbuf_bw_state = intel_atomic_get_dbuf_bw_state(state);
+ if (IS_ERR(new_dbuf_bw_state))
+ return PTR_ERR(new_dbuf_bw_state);
+
+ old_dbuf_bw_state = intel_atomic_get_old_dbuf_bw_state(state);
+
+ new_dbuf_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
+ }
+
+ if (!old_dbuf_bw_state)
+ return 0;
+
+ if (intel_dbuf_bw_state_changed(display, old_dbuf_bw_state, new_dbuf_bw_state)) {
+ ret = intel_atomic_lock_global_state(&new_dbuf_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_cdclk_update_dbuf_bw_min_cdclk(state,
+ intel_dbuf_bw_min_cdclk(display, old_dbuf_bw_state),
+ intel_dbuf_bw_min_cdclk(display, new_dbuf_bw_state),
+ need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void intel_dbuf_bw_update_hw_state(struct intel_display *display)
+{
+ struct intel_dbuf_bw_state *dbuf_bw_state =
+ to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
+ struct intel_crtc *crtc;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ for_each_intel_crtc(display->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ skl_crtc_calc_dbuf_bw(&dbuf_bw_state->dbuf_bw[crtc->pipe], crtc_state);
+ }
+}
+
+void intel_dbuf_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_dbuf_bw_state *dbuf_bw_state =
+ to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ memset(&dbuf_bw_state->dbuf_bw[pipe], 0, sizeof(dbuf_bw_state->dbuf_bw[pipe]));
+}
+
+static struct intel_global_state *
+intel_dbuf_bw_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_dbuf_bw_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ return &state->base;
+}
+
+static void intel_dbuf_bw_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_dbuf_bw_funcs = {
+ .atomic_duplicate_state = intel_dbuf_bw_duplicate_state,
+ .atomic_destroy_state = intel_dbuf_bw_destroy_state,
+};
+
+int intel_dbuf_bw_init(struct intel_display *display)
+{
+ struct intel_dbuf_bw_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(display, &display->dbuf_bw.obj,
+ &state->base, &intel_dbuf_bw_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dbuf_bw.h b/drivers/gpu/drm/i915/display/intel_dbuf_bw.h
new file mode 100644
index 000000000000..61875b9d5969
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dbuf_bw.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_DBUF_BW_H__
+#define __INTEL_DBUF_BW_H__
+
+#include <drm/drm_atomic.h>
+
+struct intel_atomic_state;
+struct intel_dbuf_bw_state;
+struct intel_crtc;
+struct intel_display;
+struct intel_global_state;
+
+struct intel_dbuf_bw_state *
+to_intel_dbuf_bw_state(struct intel_global_state *obj_state);
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_old_dbuf_bw_state(struct intel_atomic_state *state);
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_new_dbuf_bw_state(struct intel_atomic_state *state);
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_dbuf_bw_state(struct intel_atomic_state *state);
+
+int intel_dbuf_bw_init(struct intel_display *display);
+int intel_dbuf_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc);
+int intel_dbuf_bw_min_cdclk(struct intel_display *display,
+ const struct intel_dbuf_bw_state *dbuf_bw_state);
+void intel_dbuf_bw_update_hw_state(struct intel_display *display);
+void intel_dbuf_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
+
+#endif /* __INTEL_DBUF_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index c09aa759f4d4..733ef4559131 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -35,7 +35,6 @@
#include <drm/drm_privacy_screen_consumer.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "icl_dsi.h"
#include "intel_alpm.h"
#include "intel_audio.h"
@@ -53,6 +52,7 @@
#include "intel_display_power.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
@@ -72,6 +72,7 @@
#include "intel_hotplug.h"
#include "intel_hti.h"
#include "intel_lspcon.h"
+#include "intel_lt_phy.h"
#include "intel_mg_phy_regs.h"
#include "intel_modeset_lock.h"
#include "intel_panel.h"
@@ -1466,10 +1467,15 @@ static int translate_signal_level(struct intel_dp *intel_dp,
u8 signal_levels)
{
struct intel_display *display = to_intel_display(intel_dp);
+ const u8 *signal_array;
+ size_t array_size;
int i;
- for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
- if (index_to_dp_signal_levels[i] == signal_levels)
+ signal_array = index_to_dp_signal_levels;
+ array_size = ARRAY_SIZE(index_to_dp_signal_levels);
+
+ for (i = 0; i < array_size; i++) {
+ if (signal_array[i] == signal_levels)
return i;
}
@@ -4240,6 +4246,19 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
&crtc_state->dpll_hw_state);
}
+static void xe3plpd_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_lt_phy_pll_readout_hw_state(encoder, crtc_state, &crtc_state->dpll_hw_state.ltpll);
+
+ if (crtc_state->dpll_hw_state.ltpll.tbt_mode)
+ crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
+ else
+ crtc_state->port_clock =
+ intel_lt_phy_calc_port_clock(encoder, crtc_state);
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
static void mtl_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
@@ -4559,6 +4578,13 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = conn_state->connector;
u8 port_sync_transcoders = 0;
+ int ret = 0;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ ret = intel_dp_compute_config_late(encoder, crtc_state, conn_state);
+
+ if (ret)
+ return ret;
drm_dbg_kms(display->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
encoder->base.base.id, encoder->base.name,
@@ -5224,7 +5250,12 @@ void intel_ddi_init(struct intel_display *display,
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
- if (DISPLAY_VER(display) >= 14) {
+ if (HAS_LT_PHY(display)) {
+ encoder->enable_clock = intel_xe3plpd_pll_enable;
+ encoder->disable_clock = intel_xe3plpd_pll_disable;
+ encoder->port_pll_type = intel_mtl_port_pll_type;
+ encoder->get_config = xe3plpd_ddi_get_config;
+ } else if (DISPLAY_VER(display) >= 14) {
encoder->enable_clock = intel_mtl_pll_enable;
encoder->disable_clock = intel_mtl_pll_disable;
encoder->port_pll_type = intel_mtl_port_pll_type;
@@ -5289,7 +5320,9 @@ void intel_ddi_init(struct intel_display *display,
encoder->get_config = hsw_ddi_get_config;
}
- if (DISPLAY_VER(display) >= 14) {
+ if (HAS_LT_PHY(display)) {
+ encoder->set_signal_levels = intel_lt_phy_set_signal_levels;
+ } else if (DISPLAY_VER(display) >= 14) {
encoder->set_signal_levels = intel_cx0_phy_set_signal_levels;
} else if (display->platform.dg2) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index a238be5bc455..395dba8c9e4d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -3,13 +3,14 @@
* Copyright © 2020 Intel Corporation
*/
-#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
+#include "intel_lt_phy.h"
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
@@ -1115,6 +1116,69 @@ static const struct intel_ddi_buf_trans mtl_c20_trans_uhbr = {
.num_entries = ARRAY_SIZE(_mtl_c20_trans_uhbr),
};
+/* DP1.4 */
+static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_dp14[] = {
+ { .lt = { 1, 0, 0, 21, 0 } },
+ { .lt = { 1, 1, 0, 24, 3 } },
+ { .lt = { 1, 2, 0, 28, 7 } },
+ { .lt = { 0, 3, 0, 35, 13 } },
+ { .lt = { 1, 1, 0, 27, 0 } },
+ { .lt = { 1, 2, 0, 31, 4 } },
+ { .lt = { 0, 3, 0, 39, 9 } },
+ { .lt = { 1, 2, 0, 35, 0 } },
+ { .lt = { 0, 3, 0, 41, 7 } },
+ { .lt = { 0, 3, 0, 48, 0 } },
+};
+
+/* DP2.1 */
+static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_uhbr[] = {
+ { .lt = { 0, 0, 0, 48, 0 } },
+ { .lt = { 0, 0, 0, 43, 5 } },
+ { .lt = { 0, 0, 0, 40, 8 } },
+ { .lt = { 0, 0, 0, 37, 11 } },
+ { .lt = { 0, 0, 0, 33, 15 } },
+ { .lt = { 0, 0, 2, 46, 0 } },
+ { .lt = { 0, 0, 2, 42, 4 } },
+ { .lt = { 0, 0, 2, 38, 8 } },
+ { .lt = { 0, 0, 2, 35, 11 } },
+ { .lt = { 0, 0, 2, 33, 13 } },
+ { .lt = { 0, 0, 4, 44, 0 } },
+ { .lt = { 0, 0, 4, 40, 4 } },
+ { .lt = { 0, 0, 4, 37, 7 } },
+ { .lt = { 0, 0, 4, 33, 11 } },
+ { .lt = { 0, 0, 8, 40, 0 } },
+ { .lt = { 1, 0, 2, 26, 2 } },
+};
+
+/* eDp */
+static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_edp[] = {
+ { .lt = { 1, 0, 0, 12, 0 } },
+ { .lt = { 1, 1, 0, 13, 1 } },
+ { .lt = { 1, 2, 0, 15, 3 } },
+ { .lt = { 1, 3, 0, 19, 7 } },
+ { .lt = { 1, 1, 0, 14, 0 } },
+ { .lt = { 1, 2, 0, 16, 2 } },
+ { .lt = { 1, 3, 0, 21, 5 } },
+ { .lt = { 1, 2, 0, 18, 0 } },
+ { .lt = { 1, 3, 0, 22, 4 } },
+ { .lt = { 1, 3, 0, 26, 0 } },
+};
+
+static const struct intel_ddi_buf_trans xe3plpd_lt_trans_dp14 = {
+ .entries = _xe3plpd_lt_trans_dp14,
+ .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_dp14),
+};
+
+static const struct intel_ddi_buf_trans xe3plpd_lt_trans_uhbr = {
+ .entries = _xe3plpd_lt_trans_uhbr,
+ .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_uhbr),
+};
+
+static const struct intel_ddi_buf_trans xe3plpd_lt_trans_edp = {
+ .entries = _xe3plpd_lt_trans_edp,
+ .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_edp),
+};
+
bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
{
return table == &tgl_combo_phy_trans_edp_hbr2_hobl;
@@ -1707,11 +1771,26 @@ mtl_get_c20_buf_trans(struct intel_encoder *encoder,
return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
}
+static const struct intel_ddi_buf_trans *
+xe3plpd_get_lt_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state) && intel_dp_is_uhbr(crtc_state))
+ return intel_get_buf_trans(&xe3plpd_lt_trans_uhbr, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return intel_get_buf_trans(&xe3plpd_lt_trans_edp, n_entries);
+ else
+ return intel_get_buf_trans(&xe3plpd_lt_trans_dp14, n_entries);
+}
+
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(display) >= 14) {
+ if (HAS_LT_PHY(display)) {
+ encoder->get_buf_trans = xe3plpd_get_lt_buf_trans;
+ } else if (DISPLAY_VER(display) >= 14) {
if (intel_encoder_is_c10phy(encoder))
encoder->get_buf_trans = mtl_get_c10_buf_trans;
else
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index 29a190390192..cec332090a20 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -50,6 +50,14 @@ struct dg2_snps_phy_buf_trans {
u8 post_cursor;
};
+struct xe3plpd_lt_phy_buf_trans {
+ u8 txswing;
+ u8 txswing_level;
+ u8 pre_cursor;
+ u8 main_cursor;
+ u8 post_cursor;
+};
+
union intel_ddi_buf_trans_entry {
struct hsw_ddi_buf_trans hsw;
struct bxt_ddi_buf_trans bxt;
@@ -57,6 +65,7 @@ union intel_ddi_buf_trans_entry {
struct icl_mg_phy_ddi_buf_trans mg;
struct tgl_dkl_phy_ddi_buf_trans dkl;
struct dg2_snps_phy_buf_trans snps;
+ struct xe3plpd_lt_phy_buf_trans lt;
};
struct intel_ddi_buf_trans {
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 5dca7f96b425..e6d18e2551e2 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -51,7 +51,6 @@
#include "i915_config.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "i9xx_wm.h"
@@ -60,6 +59,7 @@
#include "intel_audio.h"
#include "intel_bo.h"
#include "intel_bw.h"
+#include "intel_casf.h"
#include "intel_cdclk.h"
#include "intel_clock_gating.h"
#include "intel_color.h"
@@ -76,6 +76,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -99,6 +100,7 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_link_bw.h"
+#include "intel_lt_phy.h"
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_modeset_setup.h"
@@ -129,11 +131,9 @@
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
-#include "vlv_dpio_phy_regs.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_dsi_regs.h"
-#include "vlv_sideband.h"
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
@@ -141,65 +141,6 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipe_misc(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
-/* returns HPLL frequency in kHz */
-int vlv_get_hpll_vco(struct drm_device *drm)
-{
- int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
-
- /* Obtain SKU information */
- hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) &
- CCK_FUSE_HPLL_FREQ_MASK;
-
- return vco_freq[hpll_freq] * 1000;
-}
-
-int vlv_get_cck_clock(struct drm_device *drm,
- const char *name, u32 reg, int ref_freq)
-{
- u32 val;
- int divider;
-
- val = vlv_cck_read(drm, reg);
- divider = val & CCK_FREQUENCY_VALUES;
-
- drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) !=
- (divider << CCK_FREQUENCY_STATUS_SHIFT),
- "%s change in progress\n", name);
-
- return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
-}
-
-int vlv_get_cck_clock_hpll(struct drm_device *drm,
- const char *name, u32 reg)
-{
- struct drm_i915_private *dev_priv = to_i915(drm);
- int hpll;
-
- vlv_cck_get(drm);
-
- if (dev_priv->hpll_freq == 0)
- dev_priv->hpll_freq = vlv_get_hpll_vco(drm);
-
- hpll = vlv_get_cck_clock(drm, name, reg, dev_priv->hpll_freq);
-
- vlv_cck_put(drm);
-
- return hpll;
-}
-
-void intel_update_czclk(struct intel_display *display)
-{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (!display->platform.valleyview && !display->platform.cherryview)
- return;
-
- dev_priv->czclk_freq = vlv_get_cck_clock_hpll(display->drm, "czclk",
- CCK_CZ_CLOCK_CONTROL);
-
- drm_dbg_kms(display->drm, "CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
-}
-
static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
{
return (crtc_state->active_planes &
@@ -891,9 +832,8 @@ static void intel_async_flip_vtd_wa(struct intel_display *display,
static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
+ return crtc_state->uapi.async_flip && intel_display_vtd_active(display) &&
(DISPLAY_VER(display) == 9 || display->platform.broadwell ||
display->platform.haswell);
}
@@ -1040,6 +980,24 @@ static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
}
+static bool intel_casf_enabling(const struct intel_crtc_state *new_crtc_state,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_enabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state);
+}
+
+static bool intel_casf_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_disabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state);
+}
+
#undef is_disabling
#undef is_enabling
@@ -1195,6 +1153,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
if (audio_disabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_disable(state, crtc);
+ if (intel_casf_disabling(old_crtc_state, new_crtc_state))
+ intel_casf_disable(new_crtc_state);
+
intel_drrs_deactivate(old_crtc_state);
if (hsw_ips_pre_update(state, crtc))
@@ -1642,8 +1603,7 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
}
intel_set_transcoder_timings(crtc_state);
- if (HAS_VRR(display))
- intel_vrr_set_transcoder_timings(crtc_state);
+ intel_vrr_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
@@ -2422,39 +2382,44 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
-static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
+static int intel_crtc_set_context_latency(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- int vblank_delay = 0;
+ int set_context_latency = 0;
if (!HAS_DSB(display))
return 0;
- vblank_delay = max(vblank_delay, intel_psr_min_vblank_delay(crtc_state));
+ set_context_latency = max(set_context_latency,
+ intel_psr_min_set_context_latency(crtc_state));
- return vblank_delay;
+ return set_context_latency;
}
-static int intel_crtc_compute_vblank_delay(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int intel_crtc_compute_set_context_latency(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- int vblank_delay, max_vblank_delay;
+ int set_context_latency, max_vblank_delay;
+
+ set_context_latency = intel_crtc_set_context_latency(crtc_state);
- vblank_delay = intel_crtc_vblank_delay(crtc_state);
max_vblank_delay = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start - 1;
- if (vblank_delay > max_vblank_delay) {
- drm_dbg_kms(display->drm, "[CRTC:%d:%s] vblank delay (%d) exceeds max (%d)\n",
- crtc->base.base.id, crtc->base.name, vblank_delay, max_vblank_delay);
+ if (set_context_latency > max_vblank_delay) {
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] set context latency (%d) exceeds max (%d)\n",
+ crtc->base.base.id, crtc->base.name,
+ set_context_latency,
+ max_vblank_delay);
return -EINVAL;
}
- adjusted_mode->crtc_vblank_start += vblank_delay;
+ crtc_state->set_context_latency = set_context_latency;
+ adjusted_mode->crtc_vblank_start += set_context_latency;
return 0;
}
@@ -2466,11 +2431,11 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- ret = intel_crtc_compute_vblank_delay(state, crtc);
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
if (ret)
return ret;
- ret = intel_dpll_crtc_compute_clock(state, crtc);
+ ret = intel_crtc_compute_set_context_latency(state, crtc);
if (ret)
return ret;
@@ -2487,6 +2452,8 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state,
if (crtc_state->has_pch_encoder)
return ilk_fdi_compute_config(crtc, crtc_state);
+ intel_vrr_compute_guardband(crtc_state);
+
return 0;
}
@@ -2678,13 +2645,16 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
if (DISPLAY_VER(display) >= 13) {
intel_de_write(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder),
- crtc_vblank_start - crtc_vdisplay);
+ crtc_state->set_context_latency);
/*
* VBLANK_START not used by hw, just clear it
* to make it stand out in register dumps.
*/
crtc_vblank_start = 1;
+ } else if (DISPLAY_VER(display) == 12) {
+ /* VBLANK_START - VACTIVE defines SCL on TGL */
+ crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency;
}
if (DISPLAY_VER(display) >= 4)
@@ -2768,13 +2738,16 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc
if (DISPLAY_VER(display) >= 13) {
intel_de_write(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder),
- crtc_vblank_start - crtc_vdisplay);
+ crtc_state->set_context_latency);
/*
* VBLANK_START not used by hw, just clear it
* to make it stand out in register dumps.
*/
crtc_vblank_start = 1;
+ } else if (DISPLAY_VER(display) == 12) {
+ /* VBLANK_START - VACTIVE defines SCL on TGL */
+ crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency;
}
/*
@@ -2881,11 +2854,24 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
adjusted_mode->crtc_vblank_end += 1;
}
- if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder))
- adjusted_mode->crtc_vblank_start =
- adjusted_mode->crtc_vdisplay +
+ if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder)) {
+ pipe_config->set_context_latency =
intel_de_read(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder));
+ adjusted_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vdisplay +
+ pipe_config->set_context_latency;
+ } else if (DISPLAY_VER(display) == 12) {
+ /*
+ * TGL doesn't have a dedicated register for SCL.
+ * Instead, the hardware derives SCL from the difference between
+ * TRANS_VBLANK.vblank_start and TRANS_VTOTAL.vactive.
+ * To reflect the HW behaviour, readout the value for SCL as
+ * Vblank start - Vactive.
+ */
+ pipe_config->set_context_latency =
+ adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
+ }
if (DISPLAY_VER(display) >= 30)
pipe_config->min_hblank = intel_de_read(display,
@@ -3952,6 +3938,20 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_joiner_get_config(pipe_config);
intel_dsc_get_config(pipe_config);
+ /* intel_vrr_get_config() depends on .framestart_delay */
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder));
+
+ pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
+ } else {
+ /* no idea if this is correct */
+ pipe_config->framestart_delay = 1;
+ }
+
+ /*
+ * intel_vrr_get_config() depends on TRANS_SET_CONTEXT_LATENCY
+ * readout done by intel_get_transcoder_timings().
+ */
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
DISPLAY_VER(display) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
@@ -4003,15 +4003,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
- if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder));
-
- pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
- } else {
- /* no idea if this is correct */
- pipe_config->framestart_delay = 1;
- }
-
out:
intel_display_power_put_all_in_set(display, &crtc->hw_readout_power_domains);
@@ -4258,9 +4249,14 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
+ ret = intel_casf_compute_config(crtc_state);
+ if (ret)
+ return ret;
+
if (DISPLAY_VER(display) >= 9) {
if (intel_crtc_needs_modeset(crtc_state) ||
- intel_crtc_needs_fastset(crtc_state)) {
+ intel_crtc_needs_fastset(crtc_state) ||
+ intel_casf_needs_scaler(crtc_state)) {
ret = skl_update_scaler_crtc(crtc_state);
if (ret)
return ret;
@@ -4639,7 +4635,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret)
return ret;
- crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
+ crtc_state->dsc.compression_enabled_on_link = limits->link_dsc_pipes & BIT(crtc->pipe);
crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) {
@@ -4760,8 +4756,6 @@ intel_modeset_pipe_config_late(struct intel_atomic_state *state,
struct drm_connector *connector;
int i;
- intel_vrr_compute_config_late(crtc_state);
-
for_each_new_connector_in_state(&state->base, connector,
conn_state, i) {
struct intel_encoder *encoder =
@@ -4996,9 +4990,33 @@ static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_s
* Allow fastboot to fix up vblank delay (handled via LRR
* codepaths), a bit dodgy as the registers aren't
* double buffered but seems to be working more or less...
+ *
+ * Also allow this when the VRR timing generator is always on,
+ * and optimized guardband is used. In such cases,
+ * vblank delay may vary even without inherited state, but it's
+ * still safe as VRR guardband is still same.
*/
- return HAS_LRR(display) && old_crtc_state->inherited &&
- !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI);
+ return HAS_LRR(display) &&
+ (old_crtc_state->inherited || intel_vrr_always_use_vrr_tg(display)) &&
+ !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI);
+}
+
+static void
+pipe_config_lt_phy_pll_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
+ const struct intel_lt_phy_pll_state *a,
+ const struct intel_lt_phy_pll_state *b)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ char *chipname = "LTPHY";
+
+ pipe_config_mismatch(p, fastset, crtc, name, chipname);
+
+ drm_printf(p, "expected:\n");
+ intel_lt_phy_dump_hw_state(display, a);
+ drm_printf(p, "found:\n");
+ intel_lt_phy_dump_hw_state(display, b);
}
bool
@@ -5125,6 +5143,16 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_PLL_LT(name) do { \
+ if (!intel_lt_phy_pll_compare_hw_state(&current_config->name, \
+ &pipe_config->name)) { \
+ pipe_config_lt_phy_pll_mismatch(&p, fastset, crtc, __stringify(name), \
+ &current_config->name, \
+ &pipe_config->name); \
+ ret = false; \
+ } \
+} while (0)
+
#define PIPE_CONF_CHECK_TIMINGS(name) do { \
PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
PIPE_CONF_CHECK_I(name.crtc_htotal); \
@@ -5319,6 +5347,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
PIPE_CONF_CHECK_I(pixel_rate);
+ PIPE_CONF_CHECK_BOOL(hw.casf_params.casf_enable);
+ PIPE_CONF_CHECK_I(hw.casf_params.win_size);
+ PIPE_CONF_CHECK_I(hw.casf_params.strength);
PIPE_CONF_CHECK_X(gamma_mode);
if (display->platform.cherryview)
@@ -5349,7 +5380,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_PLL(dpll_hw_state);
/* FIXME convert MTL+ platforms over to dpll_mgr */
- if (DISPLAY_VER(display) >= 14)
+ if (HAS_LT_PHY(display))
+ PIPE_CONF_CHECK_PLL_LT(dpll_hw_state.ltpll);
+ else if (DISPLAY_VER(display) >= 14)
PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
@@ -5443,6 +5476,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.guardband);
}
+ PIPE_CONF_CHECK_I(set_context_latency);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_LLI
@@ -5689,6 +5724,23 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
+u8 intel_calc_enabled_pipes(struct intel_atomic_state *state,
+ u8 enabled_pipes)
+{
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->hw.enable)
+ enabled_pipes |= BIT(crtc->pipe);
+ else
+ enabled_pipes &= ~BIT(crtc->pipe);
+ }
+
+ return enabled_pipes;
+}
+
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes)
{
@@ -5718,12 +5770,16 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
return 0;
}
-static bool lrr_params_changed(const struct drm_display_mode *old_adjusted_mode,
- const struct drm_display_mode *new_adjusted_mode)
+static bool lrr_params_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
{
+ const struct drm_display_mode *old_adjusted_mode = &old_crtc_state->hw.adjusted_mode;
+ const struct drm_display_mode *new_adjusted_mode = &new_crtc_state->hw.adjusted_mode;
+
return old_adjusted_mode->crtc_vblank_start != new_adjusted_mode->crtc_vblank_start ||
old_adjusted_mode->crtc_vblank_end != new_adjusted_mode->crtc_vblank_end ||
- old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal;
+ old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal ||
+ old_crtc_state->set_context_latency != new_crtc_state->set_context_latency;
}
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
@@ -5749,8 +5805,7 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
&new_crtc_state->dp_m_n))
new_crtc_state->update_m_n = false;
- if (!lrr_params_changed(&old_crtc_state->hw.adjusted_mode,
- &new_crtc_state->hw.adjusted_mode))
+ if (!lrr_params_changed(old_crtc_state, new_crtc_state))
new_crtc_state->update_lrr = false;
if (intel_crtc_needs_modeset(new_crtc_state))
@@ -6342,7 +6397,6 @@ int intel_atomic_check(struct drm_device *dev,
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- bool any_ms = false;
if (!intel_display_driver_check_access(display))
return -ENODEV;
@@ -6450,14 +6504,11 @@ int intel_atomic_check(struct drm_device *dev,
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- any_ms = true;
-
intel_dpll_release(state, crtc);
}
- if (any_ms && !check_digital_port_conflicts(state)) {
- drm_dbg_kms(display->drm,
- "rejecting conflicting digital port configuration\n");
+ if (intel_any_crtc_needs_modeset(state) && !check_digital_port_conflicts(state)) {
+ drm_dbg_kms(display->drm, "rejecting conflicting digital port configuration\n");
ret = -EINVAL;
goto fail;
}
@@ -6466,29 +6517,25 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ new_crtc_state->min_cdclk = intel_crtc_min_cdclk(new_crtc_state);
+
ret = intel_compute_global_watermarks(state);
if (ret)
goto fail;
- ret = intel_bw_atomic_check(state, any_ms);
+ ret = intel_bw_atomic_check(state);
if (ret)
goto fail;
- ret = intel_cdclk_atomic_check(state, &any_ms);
+ ret = intel_cdclk_atomic_check(state);
if (ret)
goto fail;
- if (intel_any_crtc_needs_modeset(state))
- any_ms = true;
-
- if (any_ms) {
+ if (intel_any_crtc_needs_modeset(state)) {
ret = intel_modeset_checks(state);
if (ret)
goto fail;
-
- ret = intel_modeset_calc_cdclk(state);
- if (ret)
- return ret;
}
ret = intel_pmdemand_atomic_check(state);
@@ -6739,6 +6786,11 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
intel_vrr_set_transcoder_timings(new_crtc_state);
}
+ if (intel_casf_enabling(new_crtc_state, old_crtc_state))
+ intel_casf_enable(new_crtc_state);
+ else if (new_crtc_state->hw.casf_params.strength != old_crtc_state->hw.casf_params.strength)
+ intel_casf_update_strength(new_crtc_state);
+
intel_fbc_update(state, crtc);
drm_WARN_ON(display->drm, !intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF));
@@ -7307,7 +7359,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
- intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
+ intel_dsb_wait_for_delayed_vblank(state, new_crtc_state->dsb_commit);
intel_vrr_check_push_sent(new_crtc_state->dsb_commit,
new_crtc_state);
intel_dsb_interrupt(new_crtc_state->dsb_commit);
@@ -7397,13 +7449,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*/
intel_pmdemand_pre_plane_update(state);
- if (state->modeset) {
+ if (state->modeset)
drm_atomic_helper_update_legacy_modeset_state(display->drm, &state->base);
- intel_set_cdclk_pre_plane_update(state);
+ intel_set_cdclk_pre_plane_update(state);
+ if (state->modeset)
intel_modeset_verify_disabled(state);
- }
intel_sagv_pre_plane_update(state);
@@ -7516,8 +7568,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_verify_planes(state);
intel_sagv_post_plane_update(state);
- if (state->modeset)
- intel_set_cdclk_post_plane_update(state);
+ intel_set_cdclk_post_plane_update(state);
intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
@@ -8003,6 +8054,14 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
mode->vtotal > vtotal_max)
return MODE_V_ILLEGAL;
+ /*
+ * WM_LINETIME only goes up to (almost) 64 usec, and also
+ * knowing that the linetime is always bounded will ease the
+ * mind during various calculations.
+ */
+ if (DIV_ROUND_UP(mode->htotal * 1000, mode->clock) > 64)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -8327,7 +8386,5 @@ void i830_disable_pipe(struct intel_display *display, enum pipe pipe)
bool intel_scanout_needs_vtd_wa(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- return IS_DISPLAY_VER(display, 6, 11) && i915_vtd_active(i915);
+ return IS_DISPLAY_VER(display, 6, 11) && intel_display_vtd_active(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 37e2ab301a80..fc2ef92ccf68 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -394,6 +394,8 @@ enum phy_fia {
i)
int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
+u8 intel_calc_enabled_pipes(struct intel_atomic_state *state,
+ u8 enabled_pipes);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
@@ -435,11 +437,6 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct intel_display *display, enum pipe pipe);
void i830_disable_pipe(struct intel_display *display, enum pipe pipe);
-int vlv_get_hpll_vco(struct drm_device *drm);
-int vlv_get_cck_clock(struct drm_device *drm,
- const char *name, u32 reg, int ref_freq);
-int vlv_get_cck_clock_hpll(struct drm_device *drm,
- const char *name, u32 reg);
bool intel_has_pending_fb_unpin(struct intel_display *display);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
@@ -528,7 +525,6 @@ void intel_init_display_hooks(struct intel_display *display);
void intel_setup_outputs(struct intel_display *display);
int intel_initial_commit(struct intel_display *display);
void intel_panel_sanitize_ssc(struct intel_display *display);
-void intel_update_czclk(struct intel_display *display);
enum drm_mode_status intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode);
int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c
index d56065f22655..9a47aa38cf82 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.c
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c
@@ -1,15 +1,21 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
-#include "i915_drv.h"
-#include "intel_display_conversion.h"
+#include <drm/intel/display_member.h>
-static struct intel_display *__i915_to_display(struct drm_i915_private *i915)
-{
- return i915->display;
-}
+#include "intel_display_conversion.h"
struct intel_display *__drm_to_display(struct drm_device *drm)
{
- return __i915_to_display(to_i915(drm));
+ /*
+ * Note: This relies on both struct drm_i915_private and struct
+ * xe_device having the struct drm_device and struct intel_display *
+ * members at the same relative offsets, as defined by struct
+ * __intel_generic_device.
+ *
+ * See also INTEL_DISPLAY_MEMBER_STATIC_ASSERT().
+ */
+ struct __intel_generic_device *d = container_of(drm, struct __intel_generic_device, drm);
+
+ return d->display;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 8c226406c5cd..893279be8409 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -41,6 +41,7 @@ struct intel_cdclk_vals;
struct intel_color_funcs;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display_parent_interface;
struct intel_dmc;
struct intel_dpll_global_funcs;
struct intel_dpll_mgr;
@@ -291,6 +292,9 @@ struct intel_display {
/* Intel PCH: where the south display engine lives */
enum intel_pch pch_type;
+ /* Parent, or core, driver functions exposed to display */
+ const struct intel_display_parent_interface *parent;
+
/* Display functions */
struct {
/* Top level crtc-ish functions */
@@ -370,6 +374,10 @@ struct intel_display {
} dbuf;
struct {
+ struct intel_global_obj obj;
+ } dbuf_bw;
+
+ struct {
/*
* dkl.phy_lock protects against concurrent access of the
* Dekel TypeC PHYs.
@@ -475,7 +483,21 @@ struct intel_display {
struct work_struct vblank_notify_work;
- u32 de_irq_mask[I915_MAX_PIPES];
+ /*
+ * Cached value of VLV/CHV IMR to avoid reads in updating the
+ * bitfield.
+ */
+ u32 vlv_imr_mask;
+ /*
+ * Cached value of gen 5-7 DE IMR to avoid reads in updating the
+ * bitfield.
+ */
+ u32 ilk_de_imr_mask;
+ /*
+ * Cached value of BDW+ DE pipe IMR to avoid reads in updating
+ * the bitfield.
+ */
+ u32 de_pipe_imr_mask[I915_MAX_PIPES];
u32 pipestat_irq_mask[I915_MAX_PIPES];
} irq;
@@ -568,6 +590,11 @@ struct intel_display {
} state;
struct {
+ unsigned int hpll_freq;
+ unsigned int czclk_freq;
+ } vlv_clock;
+
+ struct {
/* ordered wq for modesets */
struct workqueue_struct *modeset;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 10dddec3796f..7014331108aa 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -47,6 +47,7 @@
#include "intel_psr_regs.h"
#include "intel_vdsc.h"
#include "intel_wm.h"
+#include "intel_tc.h"
static struct intel_display *node_to_intel_display(struct drm_info_node *node)
{
@@ -246,6 +247,8 @@ static void intel_connector_info(struct seq_file *m,
{
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *mode;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct intel_digital_port *dig_port = NULL;
seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
connector->base.id, connector->name,
@@ -268,14 +271,19 @@ static void intel_connector_info(struct seq_file *m,
intel_dp_mst_info(m, intel_connector);
else
intel_dp_info(m, intel_connector);
+ dig_port = dp_to_dig_port(intel_attached_dp(intel_connector));
break;
case DRM_MODE_CONNECTOR_HDMIA:
intel_hdmi_info(m, intel_connector);
+ dig_port = hdmi_to_dig_port(intel_attached_hdmi(intel_connector));
break;
default:
break;
}
+ if (dig_port != NULL && intel_encoder_is_tc(&dig_port->base))
+ intel_tc_info(&p, dig_port);
+
intel_hdcp_info(m, intel_connector);
seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc);
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index a002bc6ce7b0..328447a5e5e8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -1404,8 +1404,20 @@ static const struct platform_desc bmg_desc = {
PLATFORM_GROUP(dgfx),
};
+static const u16 wcl_ids[] = {
+ INTEL_WCL_IDS(ID),
+ 0
+};
+
static const struct platform_desc ptl_desc = {
PLATFORM(pantherlake),
+ .subplatforms = (const struct subplatform_desc[]) {
+ {
+ SUBPLATFORM(pantherlake, wildcatlake),
+ .pciidlist = wcl_ids,
+ },
+ {},
+ }
};
__diag_pop();
@@ -1482,6 +1494,7 @@ static const struct {
INTEL_LNL_IDS(INTEL_DISPLAY_DEVICE, &lnl_desc),
INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc),
INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
+ INTEL_WCL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
};
static const struct {
@@ -1634,7 +1647,8 @@ static void display_platforms_or(struct intel_display_platforms *dst,
bitmap_or(dst->bitmap, dst->bitmap, src->bitmap, display_platforms_num_bits());
}
-struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev,
+ const struct intel_display_parent_interface *parent)
{
struct intel_display *display;
const struct intel_display_device_info *info;
@@ -1650,6 +1664,8 @@ struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
/* Add drm device backpointer as early as possible. */
display->drm = pci_get_drvdata(pdev);
+ display->parent = parent;
+
intel_display_params_copy(&display->params);
if (has_no_display(pdev)) {
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index f329f1beafef..b559ef43d547 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -13,6 +13,7 @@
struct drm_printer;
struct intel_display;
+struct intel_display_parent_interface;
struct pci_dev;
/*
@@ -101,7 +102,9 @@ struct pci_dev;
/* Display ver 14.1 (based on GMD ID) */ \
func(battlemage) \
/* Display ver 30 (based on GMD ID) */ \
- func(pantherlake)
+ func(pantherlake) \
+ func(pantherlake_wildcatlake)
+
#define __MEMBER(name) unsigned long name:1;
#define __COUNT(x) 1 +
@@ -140,10 +143,13 @@ struct intel_display_platforms {
func(overlay_needs_physical); \
func(supports_tv);
+#define HAS_128B_Y_TILING(__display) (!(__display)->platform.i915g && !(__display)->platform.i915gm)
#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_AUX_CCS(__display) (IS_DISPLAY_VER(__display, 9, 12) || (__display)->platform.alderlake_p || (__display)->platform.meteorlake)
#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
+#define HAS_CASF(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
@@ -155,7 +161,7 @@ struct intel_display_platforms {
#define HAS_DISPLAY(__display) (DISPLAY_RUNTIME_INFO(__display)->pipe_mask != 0)
#define HAS_DMC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dmc)
#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
-#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
+#define HAS_DOUBLE_BUFFERED_M_N(__display) (IS_DISPLAY_VER((__display), 9, 14) || (__display)->platform.broadwell)
#define HAS_DOUBLE_BUFFERED_LUT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
@@ -308,7 +314,8 @@ struct intel_display_device_info {
bool intel_display_device_present(struct intel_display *display);
bool intel_display_device_enabled(struct intel_display *display);
-struct intel_display *intel_display_device_probe(struct pci_dev *pdev);
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev,
+ const struct intel_display_parent_interface *parent);
void intel_display_device_remove(struct intel_display *display);
void intel_display_device_info_runtime_init(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index cf1c14412abe..eb0727b9a0f6 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -18,7 +18,7 @@
#include <drm/drm_vblank.h>
#include "i915_drv.h"
-#include "i915_utils.h"
+#include "i915_utils.h" /* for i915_inject_probe_failure() */
#include "i9xx_wm.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
@@ -28,12 +28,14 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
+#include "intel_dbuf_bw.h"
#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_display_wa.h"
#include "intel_dkl_phy.h"
#include "intel_dmc.h"
@@ -285,6 +287,10 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
if (ret)
goto cleanup_wq_unordered;
+ ret = intel_dbuf_bw_init(display);
+ if (ret)
+ goto cleanup_wq_unordered;
+
ret = intel_bw_init(display);
if (ret)
goto cleanup_wq_unordered;
@@ -482,7 +488,6 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_dpll_init(display);
intel_fdi_pll_freq_update(display);
- intel_update_czclk(display);
intel_display_driver_init_hw(display);
intel_dpll_update_ref_clks(display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 123e054affbe..e1a812f6159b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -140,14 +140,14 @@ void ilk_update_display_irq(struct intel_display *display,
lockdep_assert_held(&display->irq.lock);
drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- new_val = dev_priv->irq_mask;
+ new_val = display->irq.ilk_de_imr_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->irq_mask &&
+ if (new_val != display->irq.ilk_de_imr_mask &&
!drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) {
- dev_priv->irq_mask = new_val;
- intel_de_write(display, DEIMR, dev_priv->irq_mask);
+ display->irq.ilk_de_imr_mask = new_val;
+ intel_de_write(display, DEIMR, display->irq.ilk_de_imr_mask);
intel_de_posting_read(display, DEIMR);
}
}
@@ -215,13 +215,13 @@ static void bdw_update_pipe_irq(struct intel_display *display,
if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
- new_val = display->irq.de_irq_mask[pipe];
+ new_val = display->irq.de_pipe_imr_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != display->irq.de_irq_mask[pipe]) {
- display->irq.de_irq_mask[pipe] = new_val;
- intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]);
+ if (new_val != display->irq.de_pipe_imr_mask[pipe]) {
+ display->irq.de_pipe_imr_mask[pipe] = new_val;
+ intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_pipe_imr_mask[pipe]);
intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
}
}
@@ -872,7 +872,7 @@ static void ilk_gtt_fault_irq_handler(struct intel_display *display)
}
}
-void ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
+static void _ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
{
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
@@ -923,7 +923,7 @@ void ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
ilk_display_rps_irq_handler(display);
}
-void ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
+static void _ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
{
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
@@ -972,6 +972,53 @@ void ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
}
}
+void ilk_display_irq_master_disable(struct intel_display *display, u32 *de_ier, u32 *sde_ier)
+{
+ /* disable master interrupt before clearing iir */
+ *de_ier = intel_de_read_fw(display, DEIER);
+ intel_de_write_fw(display, DEIER, *de_ier & ~DE_MASTER_IRQ_CONTROL);
+
+ /*
+ * Disable south interrupts. We'll only write to SDEIIR once, so further
+ * interrupts will be stored on its back queue, and then we'll be able
+ * to process them after we restore SDEIER (as soon as we restore it,
+ * we'll get an interrupt if SDEIIR still has something to process due
+ * to its back queue).
+ */
+ if (!HAS_PCH_NOP(display)) {
+ *sde_ier = intel_de_read_fw(display, SDEIER);
+ intel_de_write_fw(display, SDEIER, 0);
+ } else {
+ *sde_ier = 0;
+ }
+}
+
+void ilk_display_irq_master_enable(struct intel_display *display, u32 de_ier, u32 sde_ier)
+{
+ intel_de_write_fw(display, DEIER, de_ier);
+
+ if (sde_ier)
+ intel_de_write_fw(display, SDEIER, sde_ier);
+}
+
+bool ilk_display_irq_handler(struct intel_display *display)
+{
+ u32 de_iir;
+ bool handled = false;
+
+ de_iir = intel_de_read_fw(display, DEIIR);
+ if (de_iir) {
+ intel_de_write_fw(display, DEIIR, de_iir);
+ if (DISPLAY_VER(display) >= 7)
+ _ivb_display_irq_handler(display, de_iir);
+ else
+ _ilk_display_irq_handler(display, de_iir);
+ handled = true;
+ }
+
+ return handled;
+}
+
static u32 gen8_de_port_aux_mask(struct intel_display *display)
{
u32 mask;
@@ -1865,8 +1912,6 @@ void vlv_display_error_irq_handler(struct intel_display *display,
static void _vlv_display_irq_reset(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (display->platform.cherryview)
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
@@ -1881,7 +1926,7 @@ static void _vlv_display_irq_reset(struct intel_display *display)
i9xx_pipestat_irq_reset(display);
intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
+ display->irq.vlv_imr_mask = ~0u;
}
void vlv_display_irq_reset(struct intel_display *display)
@@ -1902,6 +1947,22 @@ void i9xx_display_irq_reset(struct intel_display *display)
i9xx_pipestat_irq_reset(display);
}
+u32 i9xx_display_irq_enable_mask(struct intel_display *display)
+{
+ u32 enable_mask;
+
+ enable_mask = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+
+ if (DISPLAY_VER(display) >= 3)
+ enable_mask |= I915_ASLE_INTERRUPT;
+
+ if (HAS_HOTPLUG(display))
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+
+ return enable_mask;
+}
+
void i915_display_irq_postinstall(struct intel_display *display)
{
/*
@@ -1939,7 +2000,6 @@ static u32 vlv_error_mask(void)
static void _vlv_display_irq_postinstall(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
@@ -1973,11 +2033,11 @@ static void _vlv_display_irq_postinstall(struct intel_display *display)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u);
+ drm_WARN_ON(display->drm, display->irq.vlv_imr_mask != ~0u);
- dev_priv->irq_mask = ~enable_mask;
+ display->irq.vlv_imr_mask = ~enable_mask;
- intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
+ intel_display_irq_regs_init(display, VLV_IRQ_REGS, display->irq.vlv_imr_mask, enable_mask);
}
void vlv_display_irq_postinstall(struct intel_display *display)
@@ -1988,7 +2048,7 @@ void vlv_display_irq_postinstall(struct intel_display *display)
spin_unlock_irq(&display->irq.lock);
}
-void ibx_display_irq_reset(struct intel_display *display)
+static void ibx_display_irq_reset(struct intel_display *display)
{
if (HAS_PCH_NOP(display))
return;
@@ -1999,6 +2059,24 @@ void ibx_display_irq_reset(struct intel_display *display)
intel_de_write(display, SERR_INT, 0xffffffff);
}
+void ilk_display_irq_reset(struct intel_display *display)
+{
+ struct intel_uncore *uncore = to_intel_uncore(display->drm);
+
+ gen2_irq_reset(uncore, DE_IRQ_REGS);
+ display->irq.ilk_de_imr_mask = ~0u;
+
+ if (DISPLAY_VER(display) == 7)
+ intel_de_write(display, GEN7_ERR_INT, 0xffffffff);
+
+ if (display->platform.haswell) {
+ intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
+ }
+
+ ibx_display_irq_reset(display);
+}
+
void gen8_display_irq_reset(struct intel_display *display)
{
enum pipe pipe;
@@ -2088,8 +2166,8 @@ void gen8_irq_power_well_post_enable(struct intel_display *display,
for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- display->irq.de_irq_mask[pipe],
- ~display->irq.de_irq_mask[pipe] | extra_ier);
+ display->irq.de_pipe_imr_mask[pipe],
+ ~display->irq.de_pipe_imr_mask[pipe] | extra_ier);
spin_unlock_irq(&display->irq.lock);
}
@@ -2183,8 +2261,6 @@ out:
void ilk_de_irq_postinstall(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
u32 display_mask, extra_mask;
if (DISPLAY_VER(display) >= 7) {
@@ -2216,11 +2292,11 @@ void ilk_de_irq_postinstall(struct intel_display *display)
if (display->platform.ironlake && display->platform.mobile)
extra_mask |= DE_PCU_EVENT;
- i915->irq_mask = ~display_mask;
+ display->irq.ilk_de_imr_mask = ~display_mask;
ibx_irq_postinstall(display);
- intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask,
+ intel_display_irq_regs_init(display, DE_IRQ_REGS, display->irq.ilk_de_imr_mask,
display_mask | extra_mask);
}
@@ -2305,12 +2381,12 @@ void gen8_de_irq_postinstall(struct intel_display *display)
}
for_each_pipe(display, pipe) {
- display->irq.de_irq_mask[pipe] = ~de_pipe_masked;
+ display->irq.de_pipe_imr_mask[pipe] = ~de_pipe_masked;
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- display->irq.de_irq_mask[pipe],
+ display->irq.de_pipe_imr_mask[pipe],
de_pipe_enables);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index c66db3851da4..84acd31948cf 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -47,8 +47,9 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void ivb_display_irq_handler(struct intel_display *display, u32 de_iir);
-void ilk_display_irq_handler(struct intel_display *display, u32 de_iir);
+void ilk_display_irq_master_disable(struct intel_display *display, u32 *de_ier, u32 *sde_ier);
+void ilk_display_irq_master_enable(struct intel_display *display, u32 de_ier, u32 sde_ier);
+bool ilk_display_irq_handler(struct intel_display *display);
void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl);
void gen11_display_irq_handler(struct intel_display *display);
@@ -56,11 +57,12 @@ u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl);
void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir);
void i9xx_display_irq_reset(struct intel_display *display);
-void ibx_display_irq_reset(struct intel_display *display);
+void ilk_display_irq_reset(struct intel_display *display);
void vlv_display_irq_reset(struct intel_display *display);
void gen8_display_irq_reset(struct intel_display *display);
void gen11_display_irq_reset(struct intel_display *display);
+u32 i9xx_display_irq_enable_mask(struct intel_display *display);
void i915_display_irq_postinstall(struct intel_display *display);
void i965_display_irq_postinstall(struct intel_display *display);
void vlv_display_irq_postinstall(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_jiffies.h b/drivers/gpu/drm/i915/display/intel_display_jiffies.h
new file mode 100644
index 000000000000..c060c567e262
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_jiffies.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_JIFFIES_H__
+#define __INTEL_DISPLAY_JIFFIES_H__
+
+#include <linux/jiffies.h>
+
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+ unsigned long j = msecs_to_jiffies(m);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+/*
+ * If you need to wait X milliseconds between events A and B, but event B
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+ * when event A happened, then just before event B you call this function and
+ * pass the timestamp as the first argument, and X as the second argument.
+ */
+static inline void
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+{
+ unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+
+ /*
+ * Don't re-read the value of "jiffies" every time since it may change
+ * behind our back and break the math.
+ */
+ tmp_jiffies = jiffies;
+ target_jiffies = timestamp_jiffies +
+ msecs_to_jiffies_timeout(to_wait_ms);
+
+ if (time_after(target_jiffies, tmp_jiffies)) {
+ remaining_jiffies = target_jiffies - tmp_jiffies;
+ while (remaining_jiffies)
+ remaining_jiffies =
+ schedule_timeout_uninterruptible(remaining_jiffies);
+ }
+}
+
+#endif /* __INTEL_DISPLAY_JIFFIES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index da4babfd6bcb..ebe6225470d0 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -11,7 +11,6 @@
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
#include "intel_clock_gating.h"
@@ -23,6 +22,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 39b71fffa2cd..9b49952994ce 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1516,7 +1516,11 @@ static const struct i915_power_well_desc xelpdp_power_wells_main[] = {
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_D),
.has_fuses = true,
- }, {
+ },
+};
+
+static const struct i915_power_well_desc xelpdp_power_wells_aux[] = {
+ {
.instances = &I915_PW_INSTANCES(
I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
@@ -1534,6 +1538,7 @@ static const struct i915_power_well_desc_list xelpdp_power_wells[] = {
I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
I915_PW_DESCRIPTORS(xelpd_power_wells_dc_off),
I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_aux),
};
I915_DECL_PW_DOMAINS(xe2lpd_pwdoms_pica_tc,
@@ -1584,6 +1589,7 @@ static const struct i915_power_well_desc_list xe2lpd_power_wells[] = {
I915_PW_DESCRIPTORS(xe2lpd_power_wells_dcoff),
I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_aux),
};
/*
@@ -1677,16 +1683,6 @@ static const struct i915_power_well_desc xe3lpd_power_wells_main[] = {
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_D),
.has_fuses = true,
- }, {
- .instances = &I915_PW_INSTANCES(
- I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
- I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
- I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
- I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
- I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3),
- I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4),
- ),
- .ops = &xelpdp_aux_power_well_ops,
},
};
@@ -1715,6 +1711,7 @@ static const struct i915_power_well_desc_list xe3lpd_power_wells[] = {
I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff),
I915_PW_DESCRIPTORS(xe3lpd_power_wells_main),
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_aux),
};
static const struct i915_power_well_desc wcl_power_wells_main[] = {
@@ -1751,7 +1748,11 @@ static const struct i915_power_well_desc wcl_power_wells_main[] = {
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_C),
.has_fuses = true,
- }, {
+ },
+};
+
+static const struct i915_power_well_desc wcl_power_wells_aux[] = {
+ {
.instances = &I915_PW_INSTANCES(
I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
@@ -1768,6 +1769,7 @@ static const struct i915_power_well_desc_list wcl_power_wells[] = {
I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff),
I915_PW_DESCRIPTORS(wcl_power_wells_main),
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+ I915_PW_DESCRIPTORS(wcl_power_wells_aux),
};
static void init_power_well_domains(const struct i915_power_well_instance *inst,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 5e88b930f5aa..e1d45ef0eedd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -1864,18 +1864,36 @@ static void xelpdp_aux_power_well_enable(struct intel_display *display,
* expected to just wait a fixed 600us after raising the request
* bit.
*/
- usleep_range(600, 1200);
+ if (DISPLAY_VER(display) >= 35) {
+ if (intel_de_wait_for_set(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 2))
+ drm_warn(display->drm,
+ "Timeout waiting for PHY %c AUX channel power to be up\n",
+ phy_name(phy));
+ } else {
+ usleep_range(600, 1200);
+ }
}
static void xelpdp_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
0);
- usleep_range(10, 30);
+
+ if (DISPLAY_VER(display) >= 35) {
+ if (intel_de_wait_for_clear(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 1))
+ drm_warn(display->drm,
+ "Timeout waiting for PHY %c AUX channel to powerdown\n",
+ phy_name(phy));
+ } else {
+ usleep_range(10, 30);
+ }
}
static bool xelpdp_aux_power_well_enabled(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c
index 56c4024201c1..0a331f89b4db 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rpm.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c
@@ -1,69 +1,62 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */
-#include "i915_drv.h"
+#include <drm/intel/display_parent_interface.h>
+
#include "intel_display_core.h"
#include "intel_display_rpm.h"
-#include "intel_runtime_pm.h"
-
-static struct intel_runtime_pm *display_to_rpm(struct intel_display *display)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- return &i915->runtime_pm;
-}
struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
{
- return intel_runtime_pm_get_raw(display_to_rpm(display));
+ return display->parent->rpm->get_raw(display->drm);
}
void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
{
- intel_runtime_pm_put_raw(display_to_rpm(display), wakeref);
+ display->parent->rpm->put_raw(display->drm, wakeref);
}
struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
{
- return intel_runtime_pm_get(display_to_rpm(display));
+ return display->parent->rpm->get(display->drm);
}
struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
{
- return intel_runtime_pm_get_if_in_use(display_to_rpm(display));
+ return display->parent->rpm->get_if_in_use(display->drm);
}
struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
{
- return intel_runtime_pm_get_noresume(display_to_rpm(display));
+ return display->parent->rpm->get_noresume(display->drm);
}
void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
{
- intel_runtime_pm_put(display_to_rpm(display), wakeref);
+ display->parent->rpm->put(display->drm, wakeref);
}
void intel_display_rpm_put_unchecked(struct intel_display *display)
{
- intel_runtime_pm_put_unchecked(display_to_rpm(display));
+ display->parent->rpm->put_unchecked(display->drm);
}
bool intel_display_rpm_suspended(struct intel_display *display)
{
- return intel_runtime_pm_suspended(display_to_rpm(display));
+ return display->parent->rpm->suspended(display->drm);
}
void assert_display_rpm_held(struct intel_display *display)
{
- assert_rpm_wakelock_held(display_to_rpm(display));
+ display->parent->rpm->assert_held(display->drm);
}
void intel_display_rpm_assert_block(struct intel_display *display)
{
- disable_rpm_wakeref_asserts(display_to_rpm(display));
+ display->parent->rpm->assert_block(display->drm);
}
void intel_display_rpm_assert_unblock(struct intel_display *display)
{
- enable_rpm_wakeref_asserts(display_to_rpm(display));
+ display->parent->rpm->assert_unblock(display->drm);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 358ab922d7a7..00600134bda0 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -551,7 +551,16 @@ struct intel_connector {
u8 fec_capability;
u8 dsc_hblank_expansion_quirk:1;
+ u8 dsc_throughput_quirk:1;
u8 dsc_decompression_enabled:1;
+
+ struct {
+ struct {
+ int rgb_yuv444;
+ int yuv422_420;
+ } overall_throughput;
+ int max_line_width;
+ } dsc_branch_caps;
} dp;
struct {
@@ -946,6 +955,26 @@ struct intel_csc_matrix {
u16 postoff[3];
};
+enum intel_panel_replay_dsc_support {
+ INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED,
+ INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY,
+ INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE,
+};
+
+struct scaler_filter_coeff {
+ u16 sign;
+ u16 exp;
+ u16 mantissa;
+};
+
+struct intel_casf {
+ #define SCALER_FILTER_NUM_TAPS 7
+ struct scaler_filter_coeff coeff[SCALER_FILTER_NUM_TAPS];
+ u8 strength;
+ u8 win_size;
+ bool casf_enable;
+};
+
struct intel_crtc_state {
/*
* uapi (drm) state. This is the software state shown to userspace.
@@ -982,6 +1011,7 @@ struct intel_crtc_state {
struct drm_property_blob *degamma_lut, *gamma_lut, *ctm;
struct drm_display_mode mode, pipe_mode, adjusted_mode;
enum drm_scaling_filter scaling_filter;
+ struct intel_casf casf_params;
} hw;
/* actual state of LUTs */
@@ -1124,9 +1154,12 @@ struct intel_crtc_state {
bool has_panel_replay;
bool wm_level_disabled;
bool pkg_c_latency_used;
+ /* Only used for state verification. */
+ enum intel_panel_replay_dsc_support panel_replay_dsc_support;
u32 dc3co_exitline;
u16 su_y_granularity;
u8 active_non_psr_pipes;
+ const char *no_psr_reason;
/*
* Frequency the dpll for the port should run at. Differs from the
@@ -1183,7 +1216,9 @@ struct intel_crtc_state {
struct intel_crtc_wm_state wm;
- int min_cdclk[I915_MAX_PLANES];
+ int min_cdclk;
+
+ int plane_min_cdclk[I915_MAX_PLANES];
/* for packed/planar CbCr */
u32 data_rate[I915_MAX_PLANES];
@@ -1268,6 +1303,8 @@ struct intel_crtc_state {
/* Display Stream compression state */
struct {
+ /* Only used for state computation, not read out from the HW. */
+ bool compression_enabled_on_link;
bool compression_enable;
int num_streams;
/* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */
@@ -1341,6 +1378,20 @@ struct intel_crtc_state {
/* LOBF flag */
bool has_lobf;
+
+ /* W2 window or 'set context latency' lines */
+ u16 set_context_latency;
+
+ struct {
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
+
+ /* LNL and beyond */
+ u8 check_entry_lines;
+ u8 aux_less_wake_lines;
+ u8 silence_period_sym_clocks;
+ u8 lfps_half_cycle_num_of_syms;
+ } alpm_state;
};
enum intel_pipe_crc_source {
@@ -1679,16 +1730,22 @@ struct intel_psr {
bool source_panel_replay_support;
bool sink_panel_replay_support;
bool sink_panel_replay_su_support;
+ enum intel_panel_replay_dsc_support sink_panel_replay_dsc_support;
bool panel_replay_enabled;
u32 dc3co_exitline;
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
u8 entry_setup_frames;
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
+
bool link_ok;
bool pkg_c_latency_used;
u8 active_non_psr_pipes;
+
+ const char *no_psr_reason;
};
struct intel_dp {
@@ -1844,19 +1901,12 @@ struct intel_dp {
bool colorimetry_support;
struct {
- u8 io_wake_lines;
- u8 fast_wake_lines;
enum transcoder transcoder;
struct mutex lock;
- /* LNL and beyond */
- u8 check_entry_lines;
- u8 aux_less_wake_lines;
- u8 silence_period_sym_clocks;
- u8 lfps_half_cycle_num_of_syms;
bool lobf_disable_debug;
bool sink_alpm_error;
- } alpm_parameters;
+ } alpm;
u8 alpm_dpcd;
diff --git a/drivers/gpu/drm/i915/display/intel_display_utils.c b/drivers/gpu/drm/i915/display/intel_display_utils.c
new file mode 100644
index 000000000000..04d010f7c23e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_utils.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <linux/device.h>
+
+#include <drm/drm_device.h>
+
+#ifdef CONFIG_X86
+#include <asm/hypervisor.h>
+#endif
+
+#include "intel_display_core.h"
+#include "intel_display_utils.h"
+
+bool intel_display_run_as_guest(struct intel_display *display)
+{
+#if IS_ENABLED(CONFIG_X86)
+ return !hypervisor_is_type(X86_HYPER_NATIVE);
+#else
+ /* Not supported yet */
+ return false;
+#endif
+}
+
+bool intel_display_vtd_active(struct intel_display *display)
+{
+ if (device_iommu_mapped(display->drm->dev))
+ return true;
+
+ /* Running as a guest, we assume the host is enforcing VT'd */
+ return intel_display_run_as_guest(display);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_utils.h b/drivers/gpu/drm/i915/display/intel_display_utils.h
new file mode 100644
index 000000000000..2a18f160320c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_utils.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_UTILS__
+#define __INTEL_DISPLAY_UTILS__
+
+#include <linux/bug.h>
+#include <linux/types.h>
+
+struct intel_display;
+
+#ifndef MISSING_CASE
+#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
+ __stringify(x), (long)(x))
+#endif
+
+#ifndef fetch_and_zero
+#define fetch_and_zero(ptr) ({ \
+ typeof(*ptr) __T = *(ptr); \
+ *(ptr) = (typeof(*ptr))0; \
+ __T; \
+})
+#endif
+
+#define KHz(x) (1000 * (x))
+#define MHz(x) KHz(1000 * (x))
+
+bool intel_display_run_as_guest(struct intel_display *display);
+bool intel_display_vtd_active(struct intel_display *display);
+
+#endif /* __INTEL_DISPLAY_UTILS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index 31cd2c9cd488..c528aaa679ca 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -67,6 +67,8 @@ bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa,
return intel_display_needs_wa_16025573575(display);
case INTEL_DISPLAY_WA_14011503117:
return DISPLAY_VER(display) == 13;
+ case INTEL_DISPLAY_WA_22014263786:
+ return IS_DISPLAY_VERx100(display, 1100, 1400);
default:
drm_WARN(display->drm, 1, "Missing Wa number: %s\n", name);
break;
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index abc1df83f066..3644e8e2b724 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -25,6 +25,7 @@ enum intel_display_wa {
INTEL_DISPLAY_WA_16023588340,
INTEL_DISPLAY_WA_16025573575,
INTEL_DISPLAY_WA_14011503117,
+ INTEL_DISPLAY_WA_22014263786,
};
bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 77a0199f9ea5..0bddb20a7c86 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -30,13 +30,13 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_flipq.h"
@@ -127,6 +127,9 @@ static bool dmc_firmware_param_disabled(struct intel_display *display)
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define XE3LPD_3002_DMC_PATH DMC_PATH(xe3lpd_3002)
+MODULE_FIRMWARE(XE3LPD_3002_DMC_PATH);
+
#define XE3LPD_DMC_PATH DMC_PATH(xe3lpd)
MODULE_FIRMWARE(XE3LPD_DMC_PATH);
@@ -183,9 +186,10 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
{
const char *fw_path = NULL;
u32 max_fw_size = 0;
-
- if (DISPLAY_VERx100(display) == 3002 ||
- DISPLAY_VERx100(display) == 3000) {
+ if (DISPLAY_VERx100(display) == 3002) {
+ fw_path = XE3LPD_3002_DMC_PATH;
+ max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VERx100(display) == 3000) {
fw_path = XE3LPD_DMC_PATH;
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VERx100(display) == 2000) {
@@ -509,10 +513,16 @@ static u32 pipedmc_interrupt_mask(struct intel_display *display)
PIPEDMC_ATS_FAULT;
}
-static u32 dmc_evt_ctl_disable(void)
+static u32 dmc_evt_ctl_disable(u32 dmc_evt_ctl)
{
- return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ /*
+ * DMC_EVT_CTL_ENABLE cannot be cleared once set. Always
+ * configure it based on the original event definition to
+ * avoid mismatches in assert_dmc_loaded().
+ */
+ return (dmc_evt_ctl & DMC_EVT_CTL_ENABLE) |
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
DMC_EVENT_FALSE);
}
@@ -546,6 +556,51 @@ static bool is_event_handler(struct intel_display *display,
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
}
+static bool fixup_dmc_evt(struct intel_display *display,
+ enum intel_dmc_id dmc_id,
+ i915_reg_t reg_ctl, u32 *data_ctl,
+ i915_reg_t reg_htp, u32 *data_htp)
+{
+ if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl))
+ return false;
+
+ if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp))
+ return false;
+
+ /* make sure reg_ctl and reg_htp are for the same event */
+ if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) !=
+ i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0)))
+ return false;
+
+ /*
+ * On ADL-S the HRR event handler is not restored after DC6.
+ * Clear it to zero from the beginning to avoid mismatches later.
+ */
+ if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN &&
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
+ *data_ctl = 0;
+ *data_htp = 0;
+ return true;
+ }
+
+ /*
+ * TGL/ADL-S DMC firmware incorrectly uses the undelayed vblank
+ * event for the HRR handler, when it should be using the delayed
+ * vblank event instead. Fixed firmware was never released
+ * so the Windows driver just hacks around it by overriding
+ * the event ID. Do the same.
+ */
+ if ((display->platform.tigerlake || display->platform.alderlake_s) &&
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
+ *data_ctl &= ~DMC_EVT_CTL_EVENT_ID_MASK;
+ *data_ctl |= REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ MAINDMC_EVENT_VBLANK_DELAYED_A);
+ return true;
+ }
+
+ return false;
+}
+
static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
@@ -564,7 +619,7 @@ static bool disable_dmc_evt(struct intel_display *display,
/* also disable the HRR event on the main DMC on TGL/ADLS */
if ((display->platform.tigerlake || display->platform.alderlake_s) &&
- is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg, data))
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_DELAYED_A, reg, data))
return true;
return false;
@@ -577,7 +632,7 @@ static u32 dmc_mmiodata(struct intel_display *display,
if (disable_dmc_evt(display, dmc_id,
dmc->dmc_info[dmc_id].mmioaddr[i],
dmc->dmc_info[dmc_id].mmiodata[i]))
- return dmc_evt_ctl_disable();
+ return dmc_evt_ctl_disable(dmc->dmc_info[dmc_id].mmiodata[i]);
else
return dmc->dmc_info[dmc_id].mmiodata[i];
}
@@ -636,12 +691,6 @@ static void assert_dmc_loaded(struct intel_display *display,
found = intel_de_read(display, reg);
expected = dmc_mmiodata(display, dmc, dmc_id, i);
- /* once set DMC_EVT_CTL_ENABLE can't be cleared :/ */
- if (is_dmc_evt_ctl_reg(display, dmc_id, reg)) {
- found &= ~DMC_EVT_CTL_ENABLE;
- expected &= ~DMC_EVT_CTL_ENABLE;
- }
-
drm_WARN(display->drm, found != expected,
"DMC %d mmio[%d]/0x%x incorrect (expected 0x%x, current 0x%x)\n",
dmc_id, i, i915_mmio_reg_offset(reg), expected, found);
@@ -794,7 +843,7 @@ static void dmc_configure_event(struct intel_display *display,
if (!is_event_handler(display, dmc_id, event_id, reg, data))
continue;
- intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable());
+ intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable(data));
num_handlers++;
}
@@ -1064,9 +1113,32 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
+ }
+
+ for (i = 0; i < mmio_count - 1; i++) {
+ u32 orig_mmiodata[2] = {
+ dmc_info->mmiodata[i],
+ dmc_info->mmiodata[i+1],
+ };
+ if (!fixup_dmc_evt(display, dmc_id,
+ dmc_info->mmioaddr[i], &dmc_info->mmiodata[i],
+ dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1]))
+ continue;
+
+ drm_dbg_kms(display->drm,
+ " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n",
+ i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]),
+ orig_mmiodata[0], dmc_info->mmiodata[i]);
+ drm_dbg_kms(display->drm,
+ " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n",
+ i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]),
+ orig_mmiodata[1], dmc_info->mmiodata[i+1]);
+ }
+
+ for (i = 0; i < mmio_count; i++) {
drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
- i, mmioaddr[i], mmiodata[i],
+ i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i],
is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],
@@ -1141,7 +1213,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
}
num_entries = package_header->num_entries;
- if (WARN_ON(package_header->num_entries > max_entries))
+ if (WARN_ON(num_entries > max_entries))
num_entries = max_entries;
fw_info = (const struct intel_fw_info *)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 2eab591a8ef5..0ec82fcbcf48 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -51,7 +51,6 @@
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
-#include "i915_utils.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -64,6 +63,8 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_jiffies.h"
+#include "intel_display_utils.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
@@ -93,14 +94,10 @@
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_tc.h"
+#include "intel_vblank.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
-/* DP DSC throughput values used for slice count calculations KPixels/s */
-#define DP_DSC_PEAK_PIXEL_RATE 2720000
-#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
-#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
-
/* Max DSC line buffer depth supported by HW. */
#define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH 13
@@ -1018,13 +1015,43 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
struct intel_display *display = to_intel_display(connector);
u8 min_slice_count, i;
int max_slice_width;
+ int tp_rgb_yuv444;
+ int tp_yuv422_420;
- if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
- min_slice_count = DIV_ROUND_UP(mode_clock,
- DP_DSC_MAX_ENC_THROUGHPUT_0);
- else
- min_slice_count = DIV_ROUND_UP(mode_clock,
- DP_DSC_MAX_ENC_THROUGHPUT_1);
+ /*
+ * TODO: Use the throughput value specific to the actual RGB/YUV
+ * format of the output.
+ * The RGB/YUV444 throughput value should be always either equal
+ * or smaller than the YUV422/420 value, but let's not depend on
+ * this assumption.
+ */
+ if (mode_clock > max(connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444,
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420))
+ return 0;
+
+ if (mode_hdisplay > connector->dp.dsc_branch_caps.max_line_width)
+ return 0;
+
+ /*
+ * TODO: Pass the total pixel rate of all the streams transferred to
+ * an MST tiled display, calculate the total slice count for all tiles
+ * from this and the per-tile slice count from the total slice count.
+ */
+ tp_rgb_yuv444 = drm_dp_dsc_sink_max_slice_throughput(connector->dp.dsc_dpcd,
+ mode_clock, true);
+ tp_yuv422_420 = drm_dp_dsc_sink_max_slice_throughput(connector->dp.dsc_dpcd,
+ mode_clock, false);
+
+ /*
+ * TODO: Use the throughput value specific to the actual RGB/YUV
+ * format of the output.
+ * For now use the smaller of these, which is ok, potentially
+ * resulting in a higher than required minimum slice count.
+ * The RGB/YUV444 throughput value should be always either equal
+ * or smaller than the YUV422/420 value, but let's not depend on
+ * this assumption.
+ */
+ min_slice_count = DIV_ROUND_UP(mode_clock, min(tp_rgb_yuv444, tp_yuv422_420));
/*
* Due to some DSC engine BW limitations, we need to enable second
@@ -2340,24 +2367,26 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
return 0;
}
-static void intel_dp_fec_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
+/*
+ * Return whether FEC must be enabled for 8b10b SST or MST links. On 128b132b
+ * links FEC is always enabled implicitly by the HW, so this function returns
+ * false for that case.
+ */
+bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state,
+ bool dsc_enabled_on_crtc)
{
- if (crtc_state->fec_enable)
- return;
+ if (intel_dp_is_uhbr(crtc_state))
+ return false;
/*
* Though eDP v1.5 supports FEC with DSC, unlike DP, it is optional.
* Since, FEC is a bandwidth overhead, continue to not enable it for
* eDP. Until, there is a good reason to do so.
*/
- if (intel_dp_is_edp(intel_dp))
- return;
-
- if (intel_dp_is_uhbr(crtc_state))
- return;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return false;
- crtc_state->fec_enable = true;
+ return dsc_enabled_on_crtc || intel_dsc_enabled_on_link(crtc_state);
}
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
@@ -2375,7 +2404,11 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
bool is_mst = intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST);
int ret;
- intel_dp_fec_compute_config(intel_dp, pipe_config);
+ /*
+ * FIXME: set the FEC enabled state once pipe_config->port_clock is
+ * already known, so the UHBR/non-UHBR mode can be determined.
+ */
+ pipe_config->fec_enable = intel_dp_needs_8b10b_fec(pipe_config, true);
if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format))
return -EINVAL;
@@ -2450,7 +2483,8 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return ret;
}
- pipe_config->dsc.compression_enable = true;
+ intel_dsc_enable_on_crtc(pipe_config);
+
drm_dbg_kms(display->drm, "DP DSC computed with Input Bpp = %d "
"Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
@@ -2460,6 +2494,40 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return 0;
}
+static int
+dsc_throughput_quirk_max_bpp_x16(const struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!connector->dp.dsc_throughput_quirk)
+ return INT_MAX;
+
+ /*
+ * Synaptics Panamera branch devices have a problem decompressing a
+ * stream with a compressed link-bpp higher than 12, if the pixel
+ * clock is higher than ~50 % of the maximum overall throughput
+ * reported by the branch device. Work around this by limiting the
+ * maximum link bpp for such pixel clocks.
+ *
+ * TODO: Use the throughput value specific to the actual RGB/YUV
+ * format of the output, after determining the pixel clock limit for
+ * YUV modes. For now use the smaller of the throughput values, which
+ * may result in limiting the link-bpp value already at a lower than
+ * required mode clock in case of native YUV422/420 output formats.
+ * The RGB/YUV444 throughput value should be always either equal or
+ * smaller than the YUV422/420 value, but let's not depend on this
+ * assumption.
+ */
+ if (adjusted_mode->crtc_clock <
+ min(connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444,
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420) / 2)
+ return INT_MAX;
+
+ return fxp_q4_from_int(12);
+}
+
/*
* Calculate the output link min, max bpp values in limits based on the pipe bpp
* range, crtc_state and dsc mode. Return true on success.
@@ -2491,6 +2559,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
} else {
int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
+ int throughput_max_bpp_x16;
dsc_src_min_bpp = intel_dp_dsc_min_src_compressed_bpp();
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
@@ -2505,6 +2574,19 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
max_link_bpp_x16 = min(max_link_bpp_x16, fxp_q4_from_int(dsc_max_bpp));
+
+ throughput_max_bpp_x16 = dsc_throughput_quirk_max_bpp_x16(connector, crtc_state);
+ throughput_max_bpp_x16 = clamp(throughput_max_bpp_x16,
+ limits->link.min_bpp_x16, max_link_bpp_x16);
+ if (throughput_max_bpp_x16 < max_link_bpp_x16) {
+ max_link_bpp_x16 = throughput_max_bpp_x16;
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] Decreasing link max bpp to " FXP_Q4_FMT " due to DSC throughput quirk\n",
+ crtc->base.base.id, crtc->base.name,
+ connector->base.base.id, connector->base.name,
+ FXP_Q4_ARGS(max_link_bpp_x16));
+ }
}
limits->link.max_bpp_x16 = max_link_bpp_x16;
@@ -4169,7 +4251,36 @@ static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
dsc_dpcd);
}
-void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
+static void init_dsc_overall_throughput_limits(struct intel_connector *connector, bool is_branch)
+{
+ u8 branch_caps[DP_DSC_BRANCH_CAP_SIZE];
+ int line_width;
+
+ connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444 = INT_MAX;
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420 = INT_MAX;
+ connector->dp.dsc_branch_caps.max_line_width = INT_MAX;
+
+ if (!is_branch)
+ return;
+
+ if (drm_dp_dpcd_read_data(connector->dp.dsc_decompression_aux,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, branch_caps,
+ sizeof(branch_caps)) != 0)
+ return;
+
+ connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444 =
+ drm_dp_dsc_branch_max_overall_throughput(branch_caps, true) ? : INT_MAX;
+
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420 =
+ drm_dp_dsc_branch_max_overall_throughput(branch_caps, false) ? : INT_MAX;
+
+ line_width = drm_dp_dsc_branch_max_line_width(branch_caps);
+ connector->dp.dsc_branch_caps.max_line_width = line_width > 0 ? line_width : INT_MAX;
+}
+
+void intel_dp_get_dsc_sink_cap(u8 dpcd_rev,
+ const struct drm_dp_desc *desc, bool is_branch,
+ struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
@@ -4182,6 +4293,9 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
/* Clear fec_capable to avoid using stale values */
connector->dp.fec_capability = 0;
+ memset(&connector->dp.dsc_branch_caps, 0, sizeof(connector->dp.dsc_branch_caps));
+ connector->dp.dsc_throughput_quirk = false;
+
if (dpcd_rev < DP_DPCD_REV_14)
return;
@@ -4196,6 +4310,19 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
drm_dbg_kms(display->drm, "FEC CAPABILITY: %x\n",
connector->dp.fec_capability);
+
+ if (!(connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
+ return;
+
+ init_dsc_overall_throughput_limits(connector, is_branch);
+
+ /*
+ * TODO: Move the HW rev check as well to the DRM core quirk table if
+ * that's required after clarifying the list of affected devices.
+ */
+ if (drm_dp_has_quirk(desc, DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) &&
+ desc->ident.hw_rev == 0x10)
+ connector->dp.dsc_throughput_quirk = true;
}
static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector)
@@ -4204,6 +4331,9 @@ static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *
return;
intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd);
+
+ if (connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED)
+ init_dsc_overall_throughput_limits(connector, false);
}
static void
@@ -4220,6 +4350,7 @@ intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *conn
connector);
else
intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV],
+ &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd),
connector);
}
@@ -5553,7 +5684,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
if (intel_alpm_get_error(intel_dp)) {
intel_alpm_disable(intel_dp);
- intel_dp->alpm_parameters.sink_alpm_error = true;
+ intel_dp->alpm.sink_alpm_error = true;
}
if (intel_dp_test_short_pulse(intel_dp))
@@ -5921,6 +6052,8 @@ intel_dp_detect(struct drm_connector *_connector,
memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd));
intel_dp->psr.sink_panel_replay_support = false;
intel_dp->psr.sink_panel_replay_su_support = false;
+ intel_dp->psr.sink_panel_replay_dsc_support =
+ INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED;
intel_dp_mst_disconnect(intel_dp);
@@ -6857,3 +6990,81 @@ void intel_dp_mst_resume(struct intel_display *display)
}
}
}
+
+static
+int intel_dp_sdp_compute_config_late(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int guardband = intel_crtc_vblank_length(crtc_state);
+ int min_sdp_guardband = intel_dp_sdp_min_guardband(crtc_state, false);
+
+ if (guardband < min_sdp_guardband) {
+ drm_dbg_kms(display->drm, "guardband %d < min sdp guardband %d\n",
+ guardband, min_sdp_guardband);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int intel_dp_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int ret;
+
+ intel_psr_compute_config_late(intel_dp, crtc_state);
+
+ ret = intel_dp_sdp_compute_config_late(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static
+int intel_dp_get_lines_for_sdp(const struct intel_crtc_state *crtc_state, u32 type)
+{
+ switch (type) {
+ case DP_SDP_VSC_EXT_VESA:
+ case DP_SDP_VSC_EXT_CEA:
+ return 10;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return 8;
+ case DP_SDP_PPS:
+ return 7;
+ case DP_SDP_ADAPTIVE_SYNC:
+ return crtc_state->vrr.vsync_start + 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state,
+ bool assume_all_enabled)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int sdp_guardband = 0;
+
+ if (assume_all_enabled ||
+ crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
+ sdp_guardband = max(sdp_guardband,
+ intel_dp_get_lines_for_sdp(crtc_state,
+ HDMI_PACKET_TYPE_GAMUT_METADATA));
+
+ if (assume_all_enabled ||
+ crtc_state->dsc.compression_enable)
+ sdp_guardband = max(sdp_guardband,
+ intel_dp_get_lines_for_sdp(crtc_state, DP_SDP_PPS));
+
+ if ((assume_all_enabled && HAS_AS_SDP(display)) ||
+ crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC))
+ sdp_guardband = max(sdp_guardband,
+ intel_dp_get_lines_for_sdp(crtc_state, DP_SDP_ADAPTIVE_SYNC));
+
+ return sdp_guardband;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index f90cfd1dbbd0..200a8b267f64 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -12,6 +12,7 @@ enum intel_output_format;
enum pipe;
enum port;
struct drm_connector_state;
+struct drm_dp_desc;
struct drm_dp_vsc_sdp;
struct drm_encoder;
struct drm_modeset_acquire_ctx;
@@ -72,6 +73,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
int intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
+bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state,
+ bool dsc_enabled_on_crtc);
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -199,7 +202,9 @@ bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
bool dsc,
struct link_config_limits *limits);
-void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector);
+void intel_dp_get_dsc_sink_cap(u8 dpcd_rev,
+ const struct drm_dp_desc *desc, bool is_branch,
+ struct intel_connector *connector);
bool intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder);
bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
@@ -215,5 +220,10 @@ int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector);
void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external);
bool intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state);
+int intel_dp_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state,
+ bool assume_all_enabled);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 829a7c0fbe4f..2e7dbaf511b9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -5,9 +5,9 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dp_aux_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 27f3716bdc1f..aad5fe14962f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -27,9 +27,10 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_display_core.h"
+#include "intel_display_jiffies.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_encoder.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 352f7ef29c28..4c0b943fe86f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -33,7 +33,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -43,6 +42,7 @@
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
#include "intel_dp_link_training.h"
@@ -293,12 +293,22 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
mst_stream_update_slots(crtc_state, mst_state);
}
- if (dsc) {
- if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
- return -EINVAL;
-
- crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
- }
+ /*
+ * NOTE: The following must reset crtc_state->fec_enable for UHBR/DSC
+ * after it was set by intel_dp_dsc_compute_config() ->
+ * intel_dp_needs_8b10b_fec().
+ */
+ crtc_state->fec_enable = intel_dp_needs_8b10b_fec(crtc_state, dsc);
+ /*
+ * If FEC gets enabled only because of another compressed stream, FEC
+ * may not be supported for this uncompressed stream on the whole link
+ * path until the sink DPRX. In this case a downstream branch device
+ * will disable FEC for the uncompressed stream as expected and so the
+ * FEC support doesn't need to be checked for this uncompressed stream.
+ */
+ if (crtc_state->fec_enable && dsc &&
+ !intel_dp_supports_fec(intel_dp, connector, crtc_state))
+ return -EINVAL;
max_dpt_bpp_x16 = fxp_q4_from_int(intel_dp_mst_max_dpt_bpp(crtc_state, dsc));
if (max_dpt_bpp_x16 && max_bpp_x16 > max_dpt_bpp_x16) {
@@ -811,14 +821,14 @@ static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
return mask;
}
-static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
+static int intel_dp_mst_check_dsc_change(struct intel_atomic_state *state,
struct drm_dp_mst_topology_mgr *mst_mgr,
struct intel_link_bw_limits *limits)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
u8 mst_pipe_mask;
- u8 fec_pipe_mask = 0;
+ u8 dsc_pipe_mask = 0;
int ret;
mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
@@ -831,16 +841,16 @@ static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
if (drm_WARN_ON(display->drm, !crtc_state))
return -EINVAL;
- if (crtc_state->fec_enable)
- fec_pipe_mask |= BIT(crtc->pipe);
+ if (intel_dsc_enabled_on_link(crtc_state))
+ dsc_pipe_mask |= BIT(crtc->pipe);
}
- if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask)
+ if (!dsc_pipe_mask || mst_pipe_mask == dsc_pipe_mask)
return 0;
- limits->force_fec_pipes |= mst_pipe_mask;
+ limits->link_dsc_pipes |= mst_pipe_mask;
- ret = intel_modeset_pipes_in_mask_early(state, "MST FEC",
+ ret = intel_modeset_pipes_in_mask_early(state, "MST DSC",
mst_pipe_mask);
return ret ? : -EAGAIN;
@@ -894,7 +904,7 @@ int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
int i;
for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) {
- ret = intel_dp_mst_check_fec_change(state, mgr, limits);
+ ret = intel_dp_mst_check_dsc_change(state, mgr, limits);
if (ret)
return ret;
@@ -1658,6 +1668,7 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
struct intel_connector *connector)
{
u8 dpcd_caps[DP_RECEIVER_CAP_SIZE];
+ struct drm_dp_desc desc;
if (!connector->dp.dsc_decompression_aux)
return;
@@ -1665,7 +1676,13 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd_caps) < 0)
return;
- intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector);
+ if (drm_dp_read_desc(connector->dp.dsc_decompression_aux, &desc,
+ drm_dp_is_branch(dpcd_caps)) < 0)
+ return;
+
+ intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV],
+ &desc, drm_dp_is_branch(dpcd_caps),
+ connector);
}
static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 3f77ad92c156..5df6347a420d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -24,13 +24,13 @@
#include <drm/drm_print.h>
#include "bxt_dpio_phy_regs.h"
-#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
#include "vlv_dpio_phy_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index f969c5399a51..2e1f67be8eda 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -17,6 +17,7 @@
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
+#include "intel_lt_phy.h"
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_panel.h"
@@ -1232,6 +1233,28 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
+static int xe3plpd_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+ int ret;
+
+ ret = intel_lt_phy_pll_calc_state(crtc_state, encoder);
+ if (ret)
+ return ret;
+
+ /* TODO: Do the readback via intel_compute_shared_dplls() */
+ crtc_state->port_clock =
+ intel_lt_phy_calc_port_clock(encoder, crtc_state);
+
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
+}
+
static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -1691,6 +1714,10 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
+static const struct intel_dpll_global_funcs xe3plpd_dpll_funcs = {
+ .crtc_compute_clock = xe3plpd_crtc_compute_clock,
+};
+
static const struct intel_dpll_global_funcs mtl_dpll_funcs = {
.crtc_compute_clock = mtl_crtc_compute_clock,
};
@@ -1789,7 +1816,9 @@ int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
void
intel_dpll_init_clock_hook(struct intel_display *display)
{
- if (DISPLAY_VER(display) >= 14)
+ if (HAS_LT_PHY(display))
+ display->funcs.dpll = &xe3plpd_dpll_funcs;
+ else if (DISPLAY_VER(display) >= 14)
display->funcs.dpll = &mtl_dpll_funcs;
else if (display->platform.dg2)
display->funcs.dpll = &dg2_dpll_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 8ea96cc524a1..92c433f7b7e2 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -27,11 +27,11 @@
#include <drm/drm_print.h>
#include "bxt_dpio_phy_regs.h"
-#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dpio_phy.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index f131bdd1c975..6183da90b28d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -267,6 +267,16 @@ struct intel_cx0pll_state {
bool tbt_mode;
};
+struct intel_lt_phy_pll_state {
+ u32 clock; /* in kHz */
+ u8 addr_msb[13];
+ u8 addr_lsb[13];
+ u8 data[13][4];
+ u8 config[3];
+ bool ssc_enabled;
+ bool tbt_mode;
+};
+
struct intel_dpll_hw_state {
union {
struct i9xx_dpll_hw_state i9xx;
@@ -276,6 +286,7 @@ struct intel_dpll_hw_state {
struct icl_dpll_hw_state icl;
struct intel_mpllb_state mpllb;
struct intel_cx0pll_state cx0pll;
+ struct intel_lt_phy_pll_state ltpll;
};
};
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index dee44d45b668..4ad4efbf9253 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -115,24 +115,6 @@ static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc);
}
-static int dsb_vblank_delay(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- const struct intel_crtc_state *crtc_state =
- intel_pre_commit_crtc_state(state, crtc);
-
- if (pre_commit_is_vrr_active(state, crtc))
- /*
- * When the push is sent during vblank it will trigger
- * on the next scanline, hence we have up to one extra
- * scanline until the delayed vblank occurs after
- * TRANS_PUSH has been written.
- */
- return intel_vrr_vblank_delay(crtc_state) + 1;
- else
- return intel_mode_vblank_delay(&crtc_state->hw.adjusted_mode);
-}
-
static int dsb_vtotal(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -723,7 +705,7 @@ void intel_dsb_vblank_evade(struct intel_atomic_state *state,
intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT, 0, 0);
if (pre_commit_is_vrr_active(state, crtc)) {
- int vblank_delay = intel_vrr_vblank_delay(crtc_state);
+ int vblank_delay = crtc_state->set_context_latency;
end = intel_vrr_vmin_vblank_start(crtc_state);
start = end - vblank_delay - latency;
@@ -815,16 +797,43 @@ void intel_dsb_chain(struct intel_atomic_state *state,
wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0);
}
-void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
- struct intel_dsb *dsb)
+void intel_dsb_wait_for_delayed_vblank(struct intel_atomic_state *state,
+ struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
const struct intel_crtc_state *crtc_state =
intel_pre_commit_crtc_state(state, crtc);
- int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode,
- dsb_vblank_delay(state, crtc));
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int wait_scanlines;
+
+ if (pre_commit_is_vrr_active(state, crtc)) {
+ /*
+ * If the push happened before the vmin decision boundary
+ * we don't know how far we are from the undelayed vblank.
+ * Wait until we're past the vmin safe window, at which
+ * point we're SCL lines away from the delayed vblank.
+ *
+ * If the push happened after the vmin decision boundary
+ * the hardware itself guarantees that we're SCL lines
+ * away from the delayed vblank, and we won't be inside
+ * the vmin safe window so this extra wait does nothing.
+ */
+ intel_dsb_wait_scanline_out(state, dsb,
+ intel_vrr_safe_window_start(crtc_state),
+ intel_vrr_vmin_safe_window_end(crtc_state));
+ /*
+ * When the push is sent during vblank it will trigger
+ * on the next scanline, hence we have up to one extra
+ * scanline until the delayed vblank occurs after
+ * TRANS_PUSH has been written.
+ */
+ wait_scanlines = crtc_state->set_context_latency + 1;
+ } else {
+ wait_scanlines = intel_mode_vblank_delay(adjusted_mode);
+ }
- intel_dsb_wait_usec(dsb, usecs);
+ intel_dsb_wait_usec(dsb, intel_scanlines_to_usecs(adjusted_mode, wait_scanlines));
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index c8f4499916eb..2f31f2c1d0c5 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -48,8 +48,8 @@ void intel_dsb_nonpost_end(struct intel_dsb *dsb);
void intel_dsb_interrupt(struct intel_dsb *dsb);
void intel_dsb_wait_usec(struct intel_dsb *dsb, int count);
void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count);
-void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
- struct intel_dsb *dsb);
+void intel_dsb_wait_for_delayed_vblank(struct intel_atomic_state *state,
+ struct intel_dsb *dsb);
void intel_dsb_wait_scanline_in(struct intel_atomic_state *state,
struct intel_dsb *dsb,
int lower, int upper);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 23402408e172..31edf57a296f 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -38,10 +38,10 @@
#include <drm/drm_print.h>
#include <video/mipi_display.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_gmbus_regs.h"
@@ -106,8 +106,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
u8 type, flags, seq_port;
u16 len;
enum port port;
-
- drm_dbg_kms(display->drm, "\n");
+ ssize_t ret;
+ bool hs_mode;
flags = *data++;
type = *data++;
@@ -129,45 +129,53 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
goto out;
}
- if ((flags >> MIPI_TRANSFER_MODE_SHIFT) & 1)
+ hs_mode = (flags >> MIPI_TRANSFER_MODE_SHIFT) & 1;
+ if (hs_mode)
dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
else
dsi_device->mode_flags |= MIPI_DSI_MODE_LPM;
dsi_device->channel = (flags >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 3;
+ drm_dbg_kms(display->drm, "DSI packet: Port %c (seq %u), Flags 0x%02x, VC %u, %s, Type 0x%02x, Length %u, Data %*ph\n",
+ port_name(port), seq_port, flags, dsi_device->channel,
+ hs_mode ? "HS" : "LP", type, len, (int)len, data);
+
switch (type) {
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
- mipi_dsi_generic_write(dsi_device, NULL, 0);
+ ret = mipi_dsi_generic_write(dsi_device, NULL, 0);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
- mipi_dsi_generic_write(dsi_device, data, 1);
+ ret = mipi_dsi_generic_write(dsi_device, data, 1);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
- mipi_dsi_generic_write(dsi_device, data, 2);
+ ret = mipi_dsi_generic_write(dsi_device, data, 2);
break;
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- drm_dbg_kms(display->drm, "Generic Read not yet implemented or used\n");
+ ret = -EOPNOTSUPP;
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
- mipi_dsi_generic_write(dsi_device, data, len);
+ ret = mipi_dsi_generic_write(dsi_device, data, len);
break;
case MIPI_DSI_DCS_SHORT_WRITE:
- mipi_dsi_dcs_write_buffer(dsi_device, data, 1);
+ ret = mipi_dsi_dcs_write_buffer(dsi_device, data, 1);
break;
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
+ ret = mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
- drm_dbg_kms(display->drm, "DCS Read not yet implemented or used\n");
+ ret = -EOPNOTSUPP;
break;
case MIPI_DSI_DCS_LONG_WRITE:
- mipi_dsi_dcs_write_buffer(dsi_device, data, len);
+ ret = mipi_dsi_dcs_write_buffer(dsi_device, data, len);
break;
}
+ if (ret < 0)
+ drm_err(display->drm, "DSI send packet failed with %pe\n", ERR_PTR(ret));
+
if (DISPLAY_VER(display) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 08b48e36aca6..c2663d6e2c92 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -34,12 +34,12 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
#include "intel_dvo_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index b817ff44c043..705fcd42d358 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -10,12 +10,11 @@
#include <drm/drm_gem.h>
#include <drm/drm_modeset_helper.h>
-#include "i915_drv.h"
-#include "i915_utils.h"
#include "intel_bo.h"
#include "intel_display.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_bo.h"
@@ -547,8 +546,6 @@ static bool plane_has_modifier(struct intel_display *display,
u8 plane_caps,
const struct intel_modifier_desc *md)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!IS_DISPLAY_VER(display, md->display_ver.from, md->display_ver.until))
return false;
@@ -560,15 +557,15 @@ static bool plane_has_modifier(struct intel_display *display,
* where supported.
*/
if (intel_fb_is_ccs_modifier(md->modifier) &&
- HAS_FLAT_CCS(i915) != !md->ccs.packed_aux_planes)
+ HAS_AUX_CCS(display) != !!md->ccs.packed_aux_planes)
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS &&
- (GRAPHICS_VER(i915) < 20 || !display->platform.dgfx))
+ (DISPLAY_VER(display) < 14 || !display->platform.dgfx))
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_LNL_CCS &&
- (GRAPHICS_VER(i915) < 20 || display->platform.dgfx))
+ (DISPLAY_VER(display) < 20 || display->platform.dgfx))
return false;
return true;
@@ -777,7 +774,6 @@ unsigned int
intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
struct intel_display *display = to_intel_display(fb->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int cpp = fb->format->cpp[color_plane];
switch (fb->modifier) {
@@ -814,7 +810,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 64;
fallthrough;
case I915_FORMAT_MOD_Y_TILED:
- if (DISPLAY_VER(display) == 2 || HAS_128_BYTE_Y_TILING(i915))
+ if (HAS_128B_Y_TILING(display))
return 128;
else
return 512;
@@ -2117,6 +2113,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intel_frontbuffer_put(intel_fb->frontbuffer);
+ kfree(intel_fb->panic);
kfree(intel_fb);
}
@@ -2215,18 +2212,24 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct intel_display *display = to_intel_display(obj->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
- int ret = -EINVAL;
+ int ret;
int i;
+ intel_fb->panic = intel_panic_alloc();
+ if (!intel_fb->panic)
+ return -ENOMEM;
+
/*
* intel_frontbuffer_get() must be done before
* intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
*/
intel_fb->frontbuffer = intel_frontbuffer_get(obj);
- if (!intel_fb->frontbuffer)
- return -ENOMEM;
+ if (!intel_fb->frontbuffer) {
+ ret = -ENOMEM;
+ goto err_free_panic;
+ }
- ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd);
+ ret = intel_fb_bo_framebuffer_init(obj, mode_cmd);
if (ret)
goto err_frontbuffer_put;
@@ -2323,6 +2326,9 @@ err_bo_framebuffer_fini:
intel_fb_bo_framebuffer_fini(obj);
err_frontbuffer_put:
intel_frontbuffer_put(intel_fb->frontbuffer);
+err_free_panic:
+ kfree(intel_fb->panic);
+
return ret;
}
@@ -2349,20 +2355,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct intel_framebuffer *intel_framebuffer_alloc(void)
{
struct intel_framebuffer *intel_fb;
- struct intel_panic *panic;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb)
return NULL;
- panic = intel_panic_alloc();
- if (!panic) {
- kfree(intel_fb);
- return NULL;
- }
-
- intel_fb->panic = panic;
-
return intel_fb;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
index b0e8b89f7ce8..7336d7294a7b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c
@@ -18,8 +18,7 @@ void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
/* Nothing to do for i915 */
}
-int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
- struct drm_gem_object *_obj,
+int intel_fb_bo_framebuffer_init(struct drm_gem_object *_obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h
index eefcb05a99f0..d775773c6c03 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.h
@@ -14,8 +14,7 @@ struct drm_mode_fb_cmd2;
void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj);
-int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
- struct drm_gem_object *obj,
+int intel_fb_bo_framebuffer_init(struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_gem_object *
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 0d380c825791..54b23116ea3c 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -49,17 +49,16 @@
#include "gt/intel_gt_types.h"
#include "i915_drv.h"
-#include "i915_utils.h"
#include "i915_vgpu.h"
#include "i915_vma.h"
#include "i9xx_plane_regs.h"
-#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_device.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_display_wa.h"
#include "intel_fbc.h"
#include "intel_fbc_regs.h"
@@ -102,7 +101,8 @@ struct intel_fbc {
struct mutex lock;
unsigned int busy_bits;
- struct i915_stolen_fb compressed_fb, compressed_llb;
+ struct intel_stolen_node *compressed_fb;
+ struct intel_stolen_node *compressed_llb;
enum intel_fbc_id id;
@@ -141,15 +141,18 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane
return stride;
}
-static unsigned int intel_fbc_cfb_cpp(void)
+static unsigned int intel_fbc_cfb_cpp(const struct intel_plane_state *plane_state)
{
- return 4; /* FBC always 4 bytes per pixel */
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ return max(cpp, 4);
}
/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
static unsigned int intel_fbc_plane_cfb_stride(const struct intel_plane_state *plane_state)
{
- unsigned int cpp = intel_fbc_cfb_cpp();
+ unsigned int cpp = intel_fbc_cfb_cpp(plane_state);
return intel_fbc_plane_stride(plane_state) * cpp;
}
@@ -203,7 +206,7 @@ static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_s
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
unsigned int stride = intel_fbc_plane_cfb_stride(plane_state);
unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
- unsigned int cpp = intel_fbc_cfb_cpp();
+ unsigned int cpp = intel_fbc_cfb_cpp(plane_state);
return _intel_fbc_cfb_stride(display, cpp, width, stride);
}
@@ -376,20 +379,19 @@ static void i8xx_fbc_nuke(struct intel_fbc *fbc)
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
drm_WARN_ON(display->drm,
- range_end_overflows_t(u64, i915_gem_stolen_area_address(i915),
- i915_gem_stolen_node_offset(&fbc->compressed_fb),
+ range_end_overflows_t(u64, i915_gem_stolen_area_address(display->drm),
+ i915_gem_stolen_node_offset(fbc->compressed_fb),
U32_MAX));
drm_WARN_ON(display->drm,
- range_end_overflows_t(u64, i915_gem_stolen_area_address(i915),
- i915_gem_stolen_node_offset(&fbc->compressed_llb),
+ range_end_overflows_t(u64, i915_gem_stolen_area_address(display->drm),
+ i915_gem_stolen_node_offset(fbc->compressed_llb),
U32_MAX));
intel_de_write(display, FBC_CFB_BASE,
- i915_gem_stolen_node_address(i915, &fbc->compressed_fb));
+ i915_gem_stolen_node_address(fbc->compressed_fb));
intel_de_write(display, FBC_LL_BASE,
- i915_gem_stolen_node_address(i915, &fbc->compressed_llb));
+ i915_gem_stolen_node_address(fbc->compressed_llb));
}
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
@@ -497,7 +499,7 @@ static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
struct intel_display *display = fbc->display;
intel_de_write(display, DPFC_CB_BASE,
- i915_gem_stolen_node_offset(&fbc->compressed_fb));
+ i915_gem_stolen_node_offset(fbc->compressed_fb));
}
static const struct intel_fbc_funcs g4x_fbc_funcs = {
@@ -566,7 +568,7 @@ static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
struct intel_display *display = fbc->display;
intel_de_write(display, ILK_DPFC_CB_BASE(fbc->id),
- i915_gem_stolen_node_offset(&fbc->compressed_fb));
+ i915_gem_stolen_node_offset(fbc->compressed_fb));
}
static const struct intel_fbc_funcs ilk_fbc_funcs = {
@@ -797,7 +799,6 @@ static u64 intel_fbc_cfb_base_max(struct intel_display *display)
static u64 intel_fbc_stolen_end(struct intel_display *display)
{
- struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm);
u64 end;
/* The FBC hardware for BDW/SKL doesn't have access to the stolen
@@ -806,7 +807,7 @@ static u64 intel_fbc_stolen_end(struct intel_display *display)
* underruns, even if that range is not reserved by the BIOS. */
if (display->platform.broadwell ||
(DISPLAY_VER(display) == 9 && !display->platform.broxton))
- end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024;
+ end = i915_gem_stolen_area_size(display->drm) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -835,20 +836,19 @@ static int find_compression_limit(struct intel_fbc *fbc,
unsigned int size, int min_limit)
{
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
u64 end = intel_fbc_stolen_end(display);
int ret, limit = min_limit;
size /= limit;
/* Try to over-allocate to reduce reallocations and fragmentation. */
- ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
+ ret = i915_gem_stolen_insert_node_in_range(fbc->compressed_fb,
size <<= 1, 4096, 0, end);
if (ret == 0)
return limit;
for (; limit <= intel_fbc_max_limit(display); limit <<= 1) {
- ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
+ ret = i915_gem_stolen_insert_node_in_range(fbc->compressed_fb,
size >>= 1, 4096, 0, end);
if (ret == 0)
return limit;
@@ -861,17 +861,15 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
unsigned int size, int min_limit)
{
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
drm_WARN_ON(display->drm,
- i915_gem_stolen_node_allocated(&fbc->compressed_fb));
+ i915_gem_stolen_node_allocated(fbc->compressed_fb));
drm_WARN_ON(display->drm,
- i915_gem_stolen_node_allocated(&fbc->compressed_llb));
+ i915_gem_stolen_node_allocated(fbc->compressed_llb));
if (DISPLAY_VER(display) < 5 && !display->platform.g4x) {
- ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
- 4096, 4096);
+ ret = i915_gem_stolen_insert_node(fbc->compressed_llb, 4096, 4096);
if (ret)
goto err;
}
@@ -887,14 +885,14 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
drm_dbg_kms(display->drm,
"reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
- i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit);
+ i915_gem_stolen_node_size(fbc->compressed_fb), fbc->limit);
return 0;
err_llb:
- if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
- i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
+ if (i915_gem_stolen_node_allocated(fbc->compressed_llb))
+ i915_gem_stolen_remove_node(fbc->compressed_llb);
err:
- if (i915_gem_stolen_initialized(i915))
+ if (i915_gem_stolen_initialized(display->drm))
drm_info_once(display->drm,
"not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
@@ -932,9 +930,12 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
if (IS_DISPLAY_VER(display, 11, 12))
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
- /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
- if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
+ /*
+ * Wa_22014263786
+ * Fixes: Screen flicker with FBC and Package C state enabled
+ * Workaround: Forced SLB invalidation before start of new frame.
+ */
+ if (intel_display_wa(display, 22014263786))
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
@@ -945,16 +946,13 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
{
- struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (WARN_ON(intel_fbc_hw_is_active(fbc)))
return;
- if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
- i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
- if (i915_gem_stolen_node_allocated(&fbc->compressed_fb))
- i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
+ if (i915_gem_stolen_node_allocated(fbc->compressed_llb))
+ i915_gem_stolen_remove_node(fbc->compressed_llb);
+ if (i915_gem_stolen_node_allocated(fbc->compressed_fb))
+ i915_gem_stolen_remove_node(fbc->compressed_fb);
}
void intel_fbc_cleanup(struct intel_display *display)
@@ -967,6 +965,9 @@ void intel_fbc_cleanup(struct intel_display *display)
__intel_fbc_cleanup_cfb(fbc);
mutex_unlock(&fbc->lock);
+ i915_gem_stolen_node_free(fbc->compressed_fb);
+ i915_gem_stolen_node_free(fbc->compressed_llb);
+
kfree(fbc);
}
}
@@ -1083,11 +1084,57 @@ static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_
}
}
+static bool
+xe3p_lpd_fbc_fp16_format_is_valid(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool xe3p_lpd_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (lnl_fbc_pixel_format_is_valid(plane_state))
+ return true;
+
+ if (xe3p_lpd_fbc_fp16_format_is_valid(plane_state))
+ return true;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB16161616:
+ case DRM_FORMAT_XBGR16161616:
+ case DRM_FORMAT_ARGB16161616:
+ case DRM_FORMAT_ABGR16161616:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool
+intel_fbc_is_enable_pixel_normalizer(const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+
+ return DISPLAY_VER(display) >= 35 &&
+ xe3p_lpd_fbc_fp16_format_is_valid(plane_state);
+}
+
static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- if (DISPLAY_VER(display) >= 20)
+ if (DISPLAY_VER(display) >= 35)
+ return xe3p_lpd_fbc_pixel_format_is_valid(plane_state);
+ else if (DISPLAY_VER(display) >= 20)
return lnl_fbc_pixel_format_is_valid(plane_state);
else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return g4x_fbc_pixel_format_is_valid(plane_state);
@@ -1355,7 +1402,7 @@ static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
return intel_fbc_min_limit(plane_state) <= fbc->limit &&
intel_fbc_cfb_size(plane_state) <= fbc->limit *
- i915_gem_stolen_node_size(&fbc->compressed_fb);
+ i915_gem_stolen_node_size(fbc->compressed_fb);
}
static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
@@ -1421,6 +1468,18 @@ intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state,
}
}
+static int _intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ /* WaFbcExceedCdClockThreshold:hsw,bdw */
+ if (display->platform.haswell || display->platform.broadwell)
+ return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+
+ /* no FBC specific limits to worry about */
+ return 0;
+}
+
static int intel_fbc_check_plane(struct intel_atomic_state *state,
struct intel_plane *plane)
{
@@ -1436,7 +1495,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (!fbc)
return 0;
- if (!i915_gem_stolen_initialized(i915)) {
+ if (!i915_gem_stolen_initialized(display->drm)) {
plane_state->no_fbc_reason = "stolen memory not initialised";
return 0;
}
@@ -1462,7 +1521,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
}
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
- if (i915_vtd_active(i915) && (display->platform.skylake || display->platform.broxton)) {
+ if (intel_display_vtd_active(display) &&
+ (display->platform.skylake || display->platform.broxton)) {
plane_state->no_fbc_reason = "VT-d enabled";
return 0;
}
@@ -1560,18 +1620,9 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- /* WaFbcExceedCdClockThreshold:hsw,bdw */
- if (display->platform.haswell || display->platform.broadwell) {
- const struct intel_cdclk_state *cdclk_state;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- if (crtc_state->pixel_rate >= intel_cdclk_logical(cdclk_state) * 95 / 100) {
- plane_state->no_fbc_reason = "pixel rate too high";
- return 0;
- }
+ if (_intel_fbc_min_cdclk(crtc_state) > display->cdclk.max_cdclk_freq) {
+ plane_state->no_fbc_reason = "pixel rate too high";
+ return 0;
}
plane_state->no_fbc_reason = NULL;
@@ -1579,6 +1630,27 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
+int intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ int min_cdclk;
+
+ if (!plane->fbc)
+ return 0;
+
+ min_cdclk = _intel_fbc_min_cdclk(crtc_state);
+
+ /*
+ * Do not ask for more than the max CDCLK frequency,
+ * if that is not enough FBC will simply not be used.
+ */
+ if (min_cdclk > display->cdclk.max_cdclk_freq)
+ return 0;
+
+ return min_cdclk;
+}
static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
struct intel_crtc *crtc,
@@ -2083,6 +2155,13 @@ static struct intel_fbc *intel_fbc_create(struct intel_display *display,
if (!fbc)
return NULL;
+ fbc->compressed_fb = i915_gem_stolen_node_alloc(display->drm);
+ if (!fbc->compressed_fb)
+ goto err;
+ fbc->compressed_llb = i915_gem_stolen_node_alloc(display->drm);
+ if (!fbc->compressed_llb)
+ goto err;
+
fbc->id = fbc_id;
fbc->display = display;
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
@@ -2102,6 +2181,13 @@ static struct intel_fbc *intel_fbc_create(struct intel_display *display,
fbc->funcs = &i8xx_fbc_funcs;
return fbc;
+
+err:
+ i915_gem_stolen_node_free(fbc->compressed_llb);
+ i915_gem_stolen_node_free(fbc->compressed_fb);
+ kfree(fbc);
+
+ return NULL;
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 0e715cb6b4e6..91424563206a 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -28,6 +28,7 @@ enum intel_fbc_id {
};
int intel_fbc_atomic_check(struct intel_atomic_state *state);
+int intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state);
bool intel_fbc_pre_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_post_update(struct intel_atomic_state *state,
@@ -52,5 +53,7 @@ void intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_dirty_rect_update_noarm(struct intel_dsb *dsb,
struct intel_plane *plane);
+bool
+intel_fbc_is_enable_pixel_normalizer(const struct intel_plane_state *plane_state);
#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index bf5721856f3c..e5449c41cfa1 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -205,6 +205,62 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_set_suspend = intelfb_set_suspend,
};
+static void intel_fbdev_fill_mode_cmd(struct drm_fb_helper_surface_size *sizes,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ mode_cmd->flags = DRM_MODE_FB_MODIFIERS;
+ mode_cmd->width = sizes->surface_width;
+ mode_cmd->height = sizes->surface_height;
+
+ mode_cmd->pitches[0] = intel_fbdev_fb_pitch_align(mode_cmd->width * DIV_ROUND_UP(sizes->surface_bpp, 8));
+ mode_cmd->pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ mode_cmd->modifier[0] = DRM_FORMAT_MOD_LINEAR;
+}
+
+static struct intel_framebuffer *
+__intel_fbdev_fb_alloc(struct intel_display *display,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_mode_fb_cmd2 mode_cmd = {};
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *obj;
+ int size;
+
+ intel_fbdev_fill_mode_cmd(sizes, &mode_cmd);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = PAGE_ALIGN(size);
+
+ obj = intel_fbdev_fb_bo_create(display->drm, size);
+ if (IS_ERR(obj)) {
+ fb = ERR_CAST(obj);
+ goto err;
+ }
+
+ fb = intel_framebuffer_create(obj,
+ drm_get_format_info(display->drm,
+ mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd);
+ if (IS_ERR(fb)) {
+ intel_fbdev_fb_bo_destroy(obj);
+ goto err;
+ }
+
+ drm_gem_object_put(obj);
+
+ return to_intel_framebuffer(fb);
+
+err:
+ return ERR_CAST(fb);
+
+}
+
int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -235,7 +291,8 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
if (!fb || drm_WARN_ON(display->drm, !intel_fb_bo(&fb->base))) {
drm_dbg_kms(display->drm,
"no BIOS fb, allocating a new one\n");
- fb = intel_fbdev_fb_alloc(helper, sizes);
+
+ fb = __intel_fbdev_fb_alloc(display, sizes);
if (IS_ERR(fb))
return PTR_ERR(fb);
} else {
@@ -275,7 +332,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
obj = intel_fb_bo(&fb->base);
- ret = intel_fbdev_fb_fill_info(display, info, obj, vma);
+ ret = intel_fbdev_fb_fill_info(display->drm, info, obj, vma);
if (ret)
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index 210aee9ae88b..0838fdd37254 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -3,40 +3,22 @@
* Copyright © 2023 Intel Corporation
*/
-#include <drm/drm_fb_helper.h>
+#include <linux/fb.h>
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
-#include "intel_display_core.h"
-#include "intel_display_types.h"
-#include "intel_fb.h"
#include "intel_fbdev_fb.h"
-struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+u32 intel_fbdev_fb_pitch_align(u32 stride)
{
- struct intel_display *display = to_intel_display(helper->dev);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd = {};
- struct drm_i915_gem_object *obj;
- int size;
-
- /* we don't do packed 24bpp */
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
- DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
+ return ALIGN(stride, 64);
+}
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = PAGE_ALIGN(size);
+struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size)
+{
+ struct drm_i915_private *dev_priv = to_i915(drm);
+ struct drm_i915_gem_object *obj;
obj = ERR_PTR(-ENODEV);
if (HAS_LMEM(dev_priv)) {
@@ -51,31 +33,29 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
*
* Also skip stolen on MTL as Wa_22018444074 mitigation.
*/
- if (!display->platform.meteorlake && size * 2 < dev_priv->dsm.usable_size)
+ if (!IS_METEORLAKE(dev_priv) && size * 2 < dev_priv->dsm.usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
}
if (IS_ERR(obj)) {
- drm_err(display->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ drm_err(drm, "failed to allocate framebuffer (%pe)\n", obj);
return ERR_PTR(-ENOMEM);
}
- fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj),
- drm_get_format_info(display->drm,
- mode_cmd.pixel_format,
- mode_cmd.modifier[0]),
- &mode_cmd);
- i915_gem_object_put(obj);
+ return &obj->base;
+}
- return to_intel_framebuffer(fb);
+void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj)
+{
+ drm_gem_object_put(obj);
}
-int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info,
struct drm_gem_object *_obj, struct i915_vma *vma)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_i915_private *i915 = to_i915(drm);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
struct i915_gem_ww_ctx ww;
void __iomem *vaddr;
@@ -107,7 +87,7 @@ int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
- drm_err(display->drm,
+ drm_err(drm,
"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
ret = PTR_ERR(vaddr);
continue;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
index cb7957272715..fd0b3775dc1f 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -6,16 +6,18 @@
#ifndef __INTEL_FBDEV_FB_H__
#define __INTEL_FBDEV_FB_H__
-struct drm_fb_helper;
-struct drm_fb_helper_surface_size;
+#include <linux/types.h>
+
+struct drm_device;
struct drm_gem_object;
+struct drm_mode_fb_cmd2;
struct fb_info;
struct i915_vma;
-struct intel_display;
-struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes);
-int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+u32 intel_fbdev_fb_pitch_align(u32 stride);
+struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size);
+void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj);
+int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info,
struct drm_gem_object *obj, struct i915_vma *vma);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 59a36b3a22c1..5bb0090dd5ed 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -9,13 +9,13 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_flipq.c b/drivers/gpu/drm/i915/display/intel_flipq.c
index 6ab2272ab2df..f162614a925d 100644
--- a/drivers/gpu/drm/i915/display/intel_flipq.c
+++ b/drivers/gpu/drm/i915/display/intel_flipq.c
@@ -7,16 +7,16 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
-#include "intel_step.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
-#include "intel_flipq.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_dsb.h"
+#include "intel_flipq.h"
+#include "intel_step.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 531ee122bf82..7195e8cf671c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -19,9 +19,9 @@
#include <drm/intel/i915_component.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_jiffies.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
#include "intel_display_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 6a22862d6be1..17aa5e0cd51a 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -9,7 +9,6 @@
#include "gt/intel_gt.h"
#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
#include "i915_drv.h"
-#include "i915_utils.h"
#include "intel_hdcp_gsc.h"
struct intel_hdcp_gsc_context {
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 4ab7e2e3bfd4..5c637341b210 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -45,7 +45,6 @@
#include <media/cec-notifier.h>
#include "g4x_hdmi.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -55,6 +54,7 @@
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
@@ -68,6 +68,20 @@
#include "intel_snps_phy.h"
#include "intel_vrr.h"
+bool intel_hdmi_is_frl(u32 clock)
+{
+ switch (clock) {
+ case 300000: /* 3 Gbps */
+ case 600000: /* 6 Gbps */
+ case 800000: /* 8 Gbps */
+ case 1000000: /* 10 Gbps */
+ case 1200000: /* 12 Gbps */
+ return true;
+ default:
+ return false;
+ }
+}
+
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index dec2ad7dd8a2..be2fad57e4ad 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -60,6 +60,7 @@ int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
int src_max_slices, int src_max_slice_width,
int hdmi_max_slices, int hdmi_throughput);
int intel_hdmi_dsc_get_slice_height(int vactive);
+bool intel_hdmi_is_frl(u32 clock);
void hsw_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 4451a792600a..3e508bb9ecdd 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -28,12 +28,12 @@
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_utils.h"
#include "intel_connector.h"
-#include "intel_display_power.h"
#include "intel_display_core.h"
+#include "intel_display_power.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 4f72f3fb9af5..46c47b3d6f42 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -6,11 +6,11 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp_aux.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
@@ -420,6 +420,9 @@ u32 i9xx_hpd_irq_ack(struct intel_display *display)
u32 hotplug_status = 0, hotplug_status_mask;
int i;
+ if (!HAS_HOTPLUG(display))
+ return 0;
+
if (display->platform.g4x ||
display->platform.valleyview || display->platform.cherryview)
hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index f52dee0ea412..d2862de894fa 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -20,6 +20,7 @@
#include "intel_dp_tunnel.h"
#include "intel_fdi.h"
#include "intel_link_bw.h"
+#include "intel_vdsc.h"
static int get_forced_link_bpp_x16(struct intel_atomic_state *state,
const struct intel_crtc *crtc)
@@ -55,7 +56,7 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
enum pipe pipe;
- limits->force_fec_pipes = 0;
+ limits->link_dsc_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
for_each_pipe(display, pipe) {
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
@@ -65,8 +66,8 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
if (state->base.duplicated && crtc_state) {
limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16;
- if (crtc_state->fec_enable)
- limits->force_fec_pipes |= BIT(pipe);
+ if (intel_dsc_enabled_on_link(crtc_state))
+ limits->link_dsc_pipes |= BIT(pipe);
} else {
limits->max_bpp_x16[pipe] = INT_MAX;
}
@@ -265,10 +266,10 @@ assert_link_limit_change_valid(struct intel_display *display,
bool bpps_changed = false;
enum pipe pipe;
- /* FEC can't be forced off after it was forced on. */
+ /* DSC can't be disabled after it was enabled. */
if (drm_WARN_ON(display->drm,
- (old_limits->force_fec_pipes & new_limits->force_fec_pipes) !=
- old_limits->force_fec_pipes))
+ (old_limits->link_dsc_pipes & new_limits->link_dsc_pipes) !=
+ old_limits->link_dsc_pipes))
return false;
for_each_pipe(display, pipe) {
@@ -286,8 +287,8 @@ assert_link_limit_change_valid(struct intel_display *display,
/* At least one limit must change. */
if (drm_WARN_ON(display->drm,
!bpps_changed &&
- new_limits->force_fec_pipes ==
- old_limits->force_fec_pipes))
+ new_limits->link_dsc_pipes ==
+ old_limits->link_dsc_pipes))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index 95ab7c50c61d..cb18e171037c 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -15,7 +15,7 @@ struct intel_connector;
struct intel_crtc_state;
struct intel_link_bw_limits {
- u8 force_fec_pipes;
+ u8 link_dsc_pipes;
u8 bpp_limit_reached_pipes;
/* in 1/16 bpp units */
int max_bpp_x16[I915_MAX_PIPES];
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index d56026c4efdd..9ceabbc981a1 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -31,10 +31,10 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_hdmi.h"
#include "intel_lspcon.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.c b/drivers/gpu/drm/i915/display/intel_lt_phy.c
new file mode 100644
index 000000000000..af48d6cde226
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy.c
@@ -0,0 +1,2000 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "i915_reg.h"
+#include "i915_utils.h"
+#include "intel_cx0_phy.h"
+#include "intel_cx0_phy_regs.h"
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_dpll_mgr.h"
+#include "intel_hdmi.h"
+#include "intel_lt_phy.h"
+#include "intel_lt_phy_regs.h"
+#include "intel_panel.h"
+#include "intel_psr.h"
+#include "intel_tc.h"
+
+#define for_each_lt_phy_lane_in_mask(__lane_mask, __lane) \
+ for ((__lane) = 0; (__lane) < 2; (__lane)++) \
+ for_each_if((__lane_mask) & BIT(__lane))
+
+#define INTEL_LT_PHY_LANE0 BIT(0)
+#define INTEL_LT_PHY_LANE1 BIT(1)
+#define INTEL_LT_PHY_BOTH_LANES (INTEL_LT_PHY_LANE1 |\
+ INTEL_LT_PHY_LANE0)
+#define MODE_DP 3
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_rbr = {
+ .clock = 162000,
+ .config = {
+ 0x83,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x5, 0xa, 0x2a, 0x20 },
+ { 0x80, 0x0, 0x0, 0x0 },
+ { 0x4, 0x4, 0x82, 0x28 },
+ { 0xfa, 0x16, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x5, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x4b, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0a },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr1 = {
+ .clock = 270000,
+ .config = {
+ 0x8b,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x3, 0xca, 0x34, 0xa0 },
+ { 0xe0, 0x0, 0x0, 0x0 },
+ { 0x5, 0x4, 0x81, 0xad },
+ { 0xfa, 0x11, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x7, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x43, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0d },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr2 = {
+ .clock = 540000,
+ .config = {
+ 0x93,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0x4d, 0x34, 0xa0 },
+ { 0xe0, 0x0, 0x0, 0x0 },
+ { 0xa, 0x4, 0x81, 0xda },
+ { 0xfa, 0x11, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x7, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x43, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0d },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr3 = {
+ .clock = 810000,
+ .config = {
+ 0x9b,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0x4a, 0x34, 0xa0 },
+ { 0xe0, 0x0, 0x0, 0x0 },
+ { 0x5, 0x4, 0x80, 0xa8 },
+ { 0xfa, 0x11, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x7, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x43, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0d },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr10 = {
+ .clock = 1000000,
+ .config = {
+ 0x43,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x85,
+ 0x85,
+ 0x85,
+ 0x85,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0xa, 0x20, 0x80 },
+ { 0x6a, 0xaa, 0xaa, 0xab },
+ { 0x0, 0x3, 0x4, 0x94 },
+ { 0xfa, 0x1c, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x4, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x45, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x5b, 0xe0, 0x8 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr13_5 = {
+ .clock = 1350000,
+ .config = {
+ 0xcb,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x2, 0x9, 0x2b, 0xe0 },
+ { 0x90, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x80, 0xe0 },
+ { 0xfa, 0x15, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x6, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x49, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x57, 0xe0, 0x0c },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr20 = {
+ .clock = 2000000,
+ .config = {
+ 0x53,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x85,
+ 0x85,
+ 0x85,
+ 0x85,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0xa, 0x20, 0x80 },
+ { 0x6a, 0xaa, 0xaa, 0xab },
+ { 0x0, 0x3, 0x4, 0x94 },
+ { 0xfa, 0x1c, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x4, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x45, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x5b, 0xe0, 0x8 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state * const xe3plpd_lt_dp_tables[] = {
+ &xe3plpd_lt_dp_rbr,
+ &xe3plpd_lt_dp_hbr1,
+ &xe3plpd_lt_dp_hbr2,
+ &xe3plpd_lt_dp_hbr3,
+ &xe3plpd_lt_dp_uhbr10,
+ &xe3plpd_lt_dp_uhbr13_5,
+ &xe3plpd_lt_dp_uhbr20,
+ NULL,
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_16 = {
+ .clock = 216000,
+ .config = {
+ 0xa3,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x3, 0xca, 0x2a, 0x20 },
+ { 0x80, 0x0, 0x0, 0x0 },
+ { 0x6, 0x4, 0x81, 0xbc },
+ { 0xfa, 0x16, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x5, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x4b, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0a },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_43 = {
+ .clock = 243000,
+ .config = {
+ 0xab,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x3, 0xca, 0x2f, 0x60 },
+ { 0xb0, 0x0, 0x0, 0x0 },
+ { 0x6, 0x4, 0x81, 0xbc },
+ { 0xfa, 0x13, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x6, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x47, 0x48, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0c },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_3_24 = {
+ .clock = 324000,
+ .config = {
+ 0xb3,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x2, 0x8a, 0x2a, 0x20 },
+ { 0x80, 0x0, 0x0, 0x0 },
+ { 0x6, 0x4, 0x81, 0x28 },
+ { 0xfa, 0x16, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x5, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x4b, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0a },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_4_32 = {
+ .clock = 432000,
+ .config = {
+ 0xbb,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0x4d, 0x2a, 0x20 },
+ { 0x80, 0x0, 0x0, 0x0 },
+ { 0xc, 0x4, 0x81, 0xbc },
+ { 0xfa, 0x16, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x5, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x4b, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0a },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_6_75 = {
+ .clock = 675000,
+ .config = {
+ 0xdb,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0x4a, 0x2b, 0xe0 },
+ { 0x90, 0x0, 0x0, 0x0 },
+ { 0x6, 0x4, 0x80, 0xa8 },
+ { 0xfa, 0x15, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x6, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x49, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x57, 0xe0, 0x0c },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state * const xe3plpd_lt_edp_tables[] = {
+ &xe3plpd_lt_dp_rbr,
+ &xe3plpd_lt_edp_2_16,
+ &xe3plpd_lt_edp_2_43,
+ &xe3plpd_lt_dp_hbr1,
+ &xe3plpd_lt_edp_3_24,
+ &xe3plpd_lt_edp_4_32,
+ &xe3plpd_lt_dp_hbr2,
+ &xe3plpd_lt_edp_6_75,
+ &xe3plpd_lt_dp_hbr3,
+ NULL,
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_252 = {
+ .clock = 25200,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x0c, 0x15, 0x27, 0x60 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x98, 0x28 },
+ { 0x42, 0x0, 0x84, 0x10 },
+ { 0x80, 0x0f, 0xd9, 0xb5 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_272 = {
+ .clock = 27200,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x0b, 0x15, 0x26, 0xa0 },
+ { 0x60, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x96, 0x28 },
+ { 0xfa, 0x0c, 0x84, 0x11 },
+ { 0x80, 0x0f, 0xd9, 0x53 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_742p5 = {
+ .clock = 74250,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x4, 0x15, 0x26, 0xa0 },
+ { 0x60, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x88, 0x28 },
+ { 0xfa, 0x0c, 0x84, 0x11 },
+ { 0x80, 0x0f, 0xd9, 0x53 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_1p485 = {
+ .clock = 148500,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x2, 0x15, 0x26, 0xa0 },
+ { 0x60, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x84, 0x28 },
+ { 0xfa, 0x0c, 0x84, 0x11 },
+ { 0x80, 0x0f, 0xd9, 0x53 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_5p94 = {
+ .clock = 594000,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x0, 0x95, 0x26, 0xa0 },
+ { 0x60, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x81, 0x28 },
+ { 0xfa, 0x0c, 0x84, 0x11 },
+ { 0x80, 0x0f, 0xd9, 0x53 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state * const xe3plpd_lt_hdmi_tables[] = {
+ &xe3plpd_lt_hdmi_252,
+ &xe3plpd_lt_hdmi_272,
+ &xe3plpd_lt_hdmi_742p5,
+ &xe3plpd_lt_hdmi_1p485,
+ &xe3plpd_lt_hdmi_5p94,
+ NULL,
+};
+
+static u8 intel_lt_phy_get_owned_lane_mask(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (!intel_tc_port_in_dp_alt_mode(dig_port))
+ return INTEL_LT_PHY_BOTH_LANES;
+
+ return intel_tc_port_max_lane_count(dig_port) > 2
+ ? INTEL_LT_PHY_BOTH_LANES : INTEL_LT_PHY_LANE0;
+}
+
+static u8 intel_lt_phy_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr)
+{
+ return intel_cx0_read(encoder, lane_mask, addr);
+}
+
+static void intel_lt_phy_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data, bool committed)
+{
+ intel_cx0_write(encoder, lane_mask, addr, data, committed);
+}
+
+static void intel_lt_phy_rmw(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
+{
+ intel_cx0_rmw(encoder, lane_mask, addr, clear, set, committed);
+}
+
+static void intel_lt_phy_clear_status_p2p(struct intel_encoder *encoder,
+ int lane)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ intel_de_rmw(display,
+ XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(encoder->port, lane),
+ XELPDP_PORT_P2M_RESPONSE_READY, 0);
+}
+
+static void
+assert_dc_off(struct intel_display *display)
+{
+ bool enabled;
+
+ enabled = intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF);
+ drm_WARN_ON(display->drm, !enabled);
+}
+
+static int __intel_lt_phy_p2p_write_once(struct intel_encoder *encoder,
+ int lane, u16 addr, u8 data,
+ i915_reg_t mac_reg_addr,
+ u8 expected_mac_val)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
+ int ack;
+ u32 val;
+
+ if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_P2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ drm_dbg_kms(display->drm,
+ "PHY %c Timeout waiting for previous transaction to complete. Resetting bus.\n",
+ phy_name(phy));
+ intel_cx0_bus_reset(encoder, lane);
+ return -ETIMEDOUT;
+ }
+
+ intel_de_rmw(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), 0, 0);
+
+ intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_P2P_TRANSACTION_PENDING |
+ XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED |
+ XELPDP_PORT_M2P_DATA(data) |
+ XELPDP_PORT_M2P_ADDRESS(addr));
+
+ ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
+ if (ack < 0)
+ return ack;
+
+ if (val & XELPDP_PORT_P2M_ERROR_SET) {
+ drm_dbg_kms(display->drm,
+ "PHY %c Error occurred during P2P write command. Status: 0x%x\n",
+ phy_name(phy), val);
+ intel_lt_phy_clear_status_p2p(encoder, lane);
+ intel_cx0_bus_reset(encoder, lane);
+ return -EINVAL;
+ }
+
+ /*
+ * RE-VISIT:
+ * This needs to be added to give PHY time to set everything up this was a requirement
+ * to get the display up and running
+ * This is the time PHY takes to settle down after programming the PHY.
+ */
+ udelay(150);
+ intel_clear_response_ready_flag(encoder, lane);
+ intel_lt_phy_clear_status_p2p(encoder, lane);
+
+ return 0;
+}
+
+static void __intel_lt_phy_p2p_write(struct intel_encoder *encoder,
+ int lane, u16 addr, u8 data,
+ i915_reg_t mac_reg_addr,
+ u8 expected_mac_val)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ int i, status;
+
+ assert_dc_off(display);
+
+ /* 3 tries is assumed to be enough to write successfully */
+ for (i = 0; i < 3; i++) {
+ status = __intel_lt_phy_p2p_write_once(encoder, lane, addr, data, mac_reg_addr,
+ expected_mac_val);
+
+ if (status == 0)
+ return;
+ }
+
+ drm_err_once(display->drm,
+ "PHY %c P2P Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
+}
+
+static void intel_lt_phy_p2p_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data,
+ i915_reg_t mac_reg_addr,
+ u8 expected_mac_val)
+{
+ int lane;
+
+ for_each_lt_phy_lane_in_mask(lane_mask, lane)
+ __intel_lt_phy_p2p_write(encoder, lane, addr, data, mac_reg_addr, expected_mac_val);
+}
+
+static void
+intel_lt_phy_setup_powerdown(struct intel_encoder *encoder, u8 lane_count)
+{
+ /*
+ * The new PORT_BUF_CTL6 stuff for dc5 entry and exit needs to be handled
+ * by dmc firmware not explicitly mentioned in Bspec. This leaves this
+ * function as a wrapper only but keeping it expecting future changes.
+ */
+ intel_cx0_setup_powerdown(encoder);
+}
+
+static void
+intel_lt_phy_powerdown_change_sequence(struct intel_encoder *encoder,
+ u8 lane_mask, u8 state)
+{
+ intel_cx0_powerdown_change_sequence(encoder, lane_mask, state);
+}
+
+static void
+intel_lt_phy_lane_reset(struct intel_encoder *encoder,
+ u8 lane_count)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
+ u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1)
+ : XELPDP_LANE_PIPE_RESET(0);
+ u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) |
+ XELPDP_LANE_PHY_CURRENT_STATUS(1))
+ : XELPDP_LANE_PHY_CURRENT_STATUS(0);
+ u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
+ XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
+ : XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
+
+ intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
+ XE3PLPD_MACCLK_RATE_MASK, XE3PLPD_MACCLK_RATE_DEF);
+
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
+ XE3PLPDP_PHY_MODE_MASK, XE3PLPDP_PHY_MODE_DP);
+
+ intel_lt_phy_setup_powerdown(encoder, lane_count);
+ intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
+ XELPDP_P2_STATE_RESET);
+
+ intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
+ XE3PLPD_MACCLK_RESET_0, 0);
+
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0));
+
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XE3PLPD_MACCLK_TURNON_LATENCY_US,
+ XE3PLPD_MACCLK_TURNON_LATENCY_MS, NULL))
+ drm_warn(display->drm, "PHY %c PLL MacCLK assertion Ack not done after %dus.\n",
+ phy_name(phy), XE3PLPD_MACCLK_TURNON_LATENCY_MS * 1000);
+
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_FORWARD_CLOCK_UNGATE,
+ XELPDP_FORWARD_CLOCK_UNGATE);
+
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_pipe_reset | lane_phy_pulse_status, 0);
+
+ if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status, 0,
+ XE3PLPD_RESET_END_LATENCY_US, 2, NULL))
+ drm_warn(display->drm,
+ "PHY %c failed to bring out of Lane reset after %dus.\n",
+ phy_name(phy), XE3PLPD_RESET_END_LATENCY_US);
+
+ if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status, lane_phy_pulse_status,
+ XE3PLPD_RATE_CALIB_DONE_LATENCY_US, 0, NULL))
+ drm_warn(display->drm, "PHY %c PLL rate not changed after %dus.\n",
+ phy_name(phy), XE3PLPD_RATE_CALIB_DONE_LATENCY_US);
+
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_pulse_status, 0);
+}
+
+static void
+intel_lt_phy_program_port_clock_ctl(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool lane_reversal)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ u32 val = 0;
+
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
+ XELPDP_PORT_REVERSAL,
+ lane_reversal ? XELPDP_PORT_REVERSAL : 0);
+
+ val |= XELPDP_FORWARD_CLOCK_UNGATE;
+
+ /*
+ * We actually mean MACCLK here and not MAXPCLK when using LT Phy
+ * but since the register bits still remain the same we use
+ * the same definition
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
+ intel_hdmi_is_frl(crtc_state->port_clock))
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
+ else
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
+
+ /* DP2.0 10G and 20G rates enable MPLLA*/
+ if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
+ val |= XELPDP_SSC_ENABLE_PLLA;
+ else
+ val |= crtc_state->dpll_hw_state.ltpll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
+
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA |
+ XELPDP_SSC_ENABLE_PLLB, val);
+}
+
+static u32 intel_lt_phy_get_dp_clock(u8 rate)
+{
+ switch (rate) {
+ case 0:
+ return 162000;
+ case 1:
+ return 270000;
+ case 2:
+ return 540000;
+ case 3:
+ return 810000;
+ case 4:
+ return 216000;
+ case 5:
+ return 243000;
+ case 6:
+ return 324000;
+ case 7:
+ return 432000;
+ case 8:
+ return 1000000;
+ case 9:
+ return 1350000;
+ case 10:
+ return 2000000;
+ case 11:
+ return 675000;
+ default:
+ MISSING_CASE(rate);
+ return 0;
+ }
+}
+
+static bool
+intel_lt_phy_config_changed(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 val, rate;
+ u32 clock;
+
+ val = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0,
+ LT_PHY_VDR_0_CONFIG);
+ rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK, val);
+
+ /*
+ * The only time we do not reconfigure the PLL is when we are
+ * using 1.62 Gbps clock since PHY PLL defaults to that
+ * otherwise we always need to reconfigure it.
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ clock = intel_lt_phy_get_dp_clock(rate);
+ if (crtc_state->port_clock == 1620000 && crtc_state->port_clock == clock)
+ return false;
+ }
+
+ return true;
+}
+
+static intel_wakeref_t intel_lt_phy_transaction_begin(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_wakeref_t wakeref;
+
+ intel_psr_pause(intel_dp);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF);
+
+ return wakeref;
+}
+
+static void intel_lt_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_psr_resume(intel_dp);
+ intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
+}
+
+static const struct intel_lt_phy_pll_state * const *
+intel_lt_phy_pll_tables_get(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return xe3plpd_lt_edp_tables;
+
+ return xe3plpd_lt_dp_tables;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ return xe3plpd_lt_hdmi_tables;
+ }
+
+ MISSING_CASE(encoder->type);
+ return NULL;
+}
+
+static bool
+intel_lt_phy_pll_is_ssc_enabled(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (intel_panel_use_ssc(display)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ return (intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
+ }
+ }
+
+ return false;
+}
+
+static int
+intel_lt_phy_calc_hdmi_port_clock(const struct intel_lt_phy_pll_state *lt_state)
+{
+#define REF_CLK_KHZ 38400
+#define REGVAL(i) ( \
+ (lt_state->data[i][3]) | \
+ (lt_state->data[i][2] << 8) | \
+ (lt_state->data[i][1] << 16) | \
+ (lt_state->data[i][0] << 24) \
+)
+
+ int clk = 0;
+ u32 d8, pll_reg_5, pll_reg_3, pll_reg_57, m2div_frac, m2div_int;
+ u64 temp0, temp1;
+ /*
+ * The algorithm uses '+' to combine bitfields when
+ * constructing PLL_reg3 and PLL_reg57:
+ * PLL_reg57 = (D7 << 24) + (postdiv << 15) + (D8 << 7) + D6_new;
+ * PLL_reg3 = (D4 << 21) + (D3 << 18) + (D1 << 15) + (m2div_int << 5);
+ *
+ * However, this is likely intended to be a bitwise OR operation,
+ * as each field occupies distinct, non-overlapping bits in the register.
+ *
+ * PLL_reg57 is composed of following fields packed into a 32-bit value:
+ * - D7: max value 10 -> fits in 4 bits -> placed at bits 24-27
+ * - postdiv: max value 9 -> fits in 4 bits -> placed at bits 15-18
+ * - D8: derived from loop_cnt / 2, max 127 -> fits in 7 bits
+ * (though 8 bits are given to it) -> placed at bits 7-14
+ * - D6_new: fits in lower 7 bits -> placed at bits 0-6
+ * PLL_reg57 = (D7 << 24) | (postdiv << 15) | (D8 << 7) | D6_new;
+ *
+ * Similarly, PLL_reg3 is packed as:
+ * - D4: max value 256 -> fits in 9 bits -> placed at bits 21-29
+ * - D3: max value 9 -> fits in 4 bits -> placed at bits 18-21
+ * - D1: max value 2 -> fits in 2 bits -> placed at bits 15-16
+ * - m2div_int: max value 511 -> fits in 9 bits (10 bits allocated)
+ * -> placed at bits 5-14
+ * PLL_reg3 = (D4 << 21) | (D3 << 18) | (D1 << 15) | (m2div_int << 5);
+ */
+ pll_reg_5 = REGVAL(2);
+ pll_reg_3 = REGVAL(1);
+ pll_reg_57 = REGVAL(3);
+ m2div_frac = pll_reg_5;
+
+ /*
+ * From forward algorithm we know
+ * m2div = 2 * m2
+ * val = y * frequency * 5
+ * So now,
+ * frequency = (m2 * 2 * refclk_khz / (d8 * 10))
+ * frequency = (m2div * refclk_khz / (d8 * 10))
+ */
+ d8 = (pll_reg_57 & REG_GENMASK(14, 7)) >> 7;
+ m2div_int = (pll_reg_3 & REG_GENMASK(14, 5)) >> 5;
+ temp0 = ((u64)m2div_frac * REF_CLK_KHZ) >> 32;
+ temp1 = (u64)m2div_int * REF_CLK_KHZ;
+ if (d8 == 0)
+ return 0;
+
+ clk = div_u64((temp1 + temp0), d8 * 10);
+
+ return clk;
+}
+
+int
+intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ int clk;
+ const struct intel_lt_phy_pll_state *lt_state =
+ &crtc_state->dpll_hw_state.ltpll;
+ u8 mode, rate;
+
+ mode = REG_FIELD_GET8(LT_PHY_VDR_MODE_ENCODING_MASK,
+ lt_state->config[0]);
+ /*
+ * For edp/dp read the clock value from the tables
+ * and return the clock as the algorithm used for
+ * calculating the port clock does not exactly matches
+ * with edp/dp clock.
+ */
+ if (mode == MODE_DP) {
+ rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK,
+ lt_state->config[0]);
+ clk = intel_lt_phy_get_dp_clock(rate);
+ } else {
+ clk = intel_lt_phy_calc_hdmi_port_clock(lt_state);
+ }
+
+ return clk;
+}
+
+int
+intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ const struct intel_lt_phy_pll_state * const *tables;
+ int i;
+
+ tables = intel_lt_phy_pll_tables_get(crtc_state, encoder);
+ if (!tables)
+ return -EINVAL;
+
+ for (i = 0; tables[i]; i++) {
+ if (crtc_state->port_clock == tables[i]->clock) {
+ crtc_state->dpll_hw_state.ltpll = *tables[i];
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ crtc_state->dpll_hw_state.ltpll.config[2] = 1;
+ }
+ crtc_state->dpll_hw_state.ltpll.ssc_enabled =
+ intel_lt_phy_pll_is_ssc_enabled(crtc_state, encoder);
+ return 0;
+ }
+ }
+
+ /* TODO: Add a function to compute the data for HDMI TMDS*/
+
+ return -EINVAL;
+}
+
+static void
+intel_lt_phy_program_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ int i, j, k;
+
+ intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_0_CONFIG,
+ crtc_state->dpll_hw_state.ltpll.config[0], MB_WRITE_COMMITTED);
+ intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG,
+ crtc_state->dpll_hw_state.ltpll.config[1], MB_WRITE_COMMITTED);
+ intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_2_CONFIG,
+ crtc_state->dpll_hw_state.ltpll.config[2], MB_WRITE_COMMITTED);
+
+ for (i = 0; i <= 12; i++) {
+ intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_MSB(i),
+ crtc_state->dpll_hw_state.ltpll.addr_msb[i],
+ MB_WRITE_COMMITTED);
+ intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_LSB(i),
+ crtc_state->dpll_hw_state.ltpll.addr_lsb[i],
+ MB_WRITE_COMMITTED);
+
+ for (j = 3, k = 0; j >= 0; j--, k++)
+ intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0,
+ LT_PHY_VDR_X_DATAY(i, j),
+ crtc_state->dpll_hw_state.ltpll.data[i][k],
+ MB_WRITE_COMMITTED);
+ }
+}
+
+static void
+intel_lt_phy_enable_disable_tx(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool lane_reversal = dig_port->lane_reversal;
+ u8 lane_count = crtc_state->lane_count;
+ bool is_dp_alt =
+ intel_tc_port_in_dp_alt_mode(dig_port);
+ enum intel_tc_pin_assignment tc_pin =
+ intel_tc_port_get_pin_assignment(dig_port);
+ u8 transmitter_mask = 0;
+
+ /*
+ * We have a two transmitters per lane and total of 2 PHY lanes so a total
+ * of 4 transmitters. We prepare a mask of the lanes that need to be activated
+ * and the transmitter which need to be activated for each lane. TX 0,1 correspond
+ * to LANE0 and TX 2, 3 correspond to LANE1.
+ */
+
+ switch (lane_count) {
+ case 1:
+ transmitter_mask = lane_reversal ? REG_BIT8(3) : REG_BIT8(0);
+ if (is_dp_alt) {
+ if (tc_pin == INTEL_TC_PIN_ASSIGNMENT_D)
+ transmitter_mask = REG_BIT8(0);
+ else
+ transmitter_mask = REG_BIT8(1);
+ }
+ break;
+ case 2:
+ transmitter_mask = lane_reversal ? REG_GENMASK8(3, 2) : REG_GENMASK8(1, 0);
+ if (is_dp_alt)
+ transmitter_mask = REG_GENMASK8(1, 0);
+ break;
+ case 3:
+ transmitter_mask = lane_reversal ? REG_GENMASK8(3, 1) : REG_GENMASK8(2, 0);
+ if (is_dp_alt)
+ transmitter_mask = REG_GENMASK8(2, 0);
+ break;
+ case 4:
+ transmitter_mask = REG_GENMASK8(3, 0);
+ break;
+ default:
+ MISSING_CASE(lane_count);
+ transmitter_mask = REG_GENMASK8(3, 0);
+ break;
+ }
+
+ if (transmitter_mask & BIT(0)) {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0),
+ LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0),
+ LT_PHY_TX_LANE_ENABLE);
+ } else {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0),
+ 0, LT_PHY_TXY_CTL10_MAC(0), 0);
+ }
+
+ if (transmitter_mask & BIT(1)) {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1),
+ LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1),
+ LT_PHY_TX_LANE_ENABLE);
+ } else {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1),
+ 0, LT_PHY_TXY_CTL10_MAC(1), 0);
+ }
+
+ if (transmitter_mask & BIT(2)) {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0),
+ LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0),
+ LT_PHY_TX_LANE_ENABLE);
+ } else {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0),
+ 0, LT_PHY_TXY_CTL10_MAC(0), 0);
+ }
+
+ if (transmitter_mask & BIT(3)) {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1),
+ LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1),
+ LT_PHY_TX_LANE_ENABLE);
+ } else {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1),
+ 0, LT_PHY_TXY_CTL10_MAC(1), 0);
+ }
+}
+
+void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool lane_reversal = dig_port->lane_reversal;
+ u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ enum port port = encoder->port;
+ intel_wakeref_t wakeref = 0;
+ u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
+ XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
+ : XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
+ u8 rate_update;
+
+ wakeref = intel_lt_phy_transaction_begin(encoder);
+
+ /* 1. Enable MacCLK at default 162 MHz frequency. */
+ intel_lt_phy_lane_reset(encoder, crtc_state->lane_count);
+
+ /* 2. Program PORT_CLOCK_CTL register to configure clock muxes, gating, and SSC. */
+ intel_lt_phy_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
+
+ /* 3. Change owned PHY lanes power to Ready state. */
+ intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
+ XELPDP_P2_STATE_READY);
+
+ /*
+ * 4. Read the PHY message bus VDR register PHY_VDR_0_Config check enabled PLL type,
+ * encoded rate and encoded mode.
+ */
+ if (intel_lt_phy_config_changed(encoder, crtc_state)) {
+ /*
+ * 5. Program the PHY internal PLL registers over PHY message bus for the desired
+ * frequency and protocol type
+ */
+ intel_lt_phy_program_pll(encoder, crtc_state);
+
+ /* 6. Use the P2P transaction flow */
+ /*
+ * 6.1. Set the PHY VDR register 0xCC4[Rate Control VDR Update] = 1 over PHY message
+ * bus for Owned PHY Lanes.
+ */
+ /*
+ * 6.2. Poll for P2P Transaction Ready = "1" and read the MAC message bus VDR
+ * register at offset 0xC00 for Owned PHY Lanes*.
+ */
+ /* 6.3. Clear P2P transaction Ready bit. */
+ intel_lt_phy_p2p_write(encoder, owned_lane_mask, LT_PHY_RATE_UPDATE,
+ LT_PHY_RATE_CONTROL_VDR_UPDATE, LT_PHY_MAC_VDR,
+ LT_PHY_PCLKIN_GATE);
+
+ /* 7. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0), 0);
+
+ /* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0), 0,
+ XE3PLPD_MACCLK_TURNOFF_LATENCY_US, 0, NULL))
+ drm_warn(display->drm, "PHY %c PLL MacCLK Ack deassertion Timeout after %dus.\n",
+ phy_name(phy), XE3PLPD_MACCLK_TURNOFF_LATENCY_US);
+
+ /*
+ * 9. Follow the Display Voltage Frequency Switching - Sequence Before Frequency
+ * Change. We handle this step in bxt_set_cdclk().
+ */
+ /* 10. Program DDI_CLK_VALFREQ to match intended DDI clock frequency. */
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port),
+ crtc_state->port_clock);
+
+ /* 11. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 1. */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0));
+
+ /* 12. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 1. */
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XE3PLPD_MACCLK_TURNON_LATENCY_US, 2, NULL))
+ drm_warn(display->drm, "PHY %c PLL MacCLK Ack assertion Timeout after %dus.\n",
+ phy_name(phy), XE3PLPD_MACCLK_TURNON_LATENCY_US);
+
+ /*
+ * 13. Ungate the forward clock by setting
+ * PORT_CLOCK_CTL[Forward Clock Ungate] = 1.
+ */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_FORWARD_CLOCK_UNGATE,
+ XELPDP_FORWARD_CLOCK_UNGATE);
+
+ /* 14. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ lane_phy_pulse_status);
+ /*
+ * 15. Clear the PHY VDR register 0xCC4[Rate Control VDR Update] over
+ * PHY message bus for Owned PHY Lanes.
+ */
+ rate_update = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_RATE_UPDATE);
+ rate_update &= ~LT_PHY_RATE_CONTROL_VDR_UPDATE;
+ intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_RATE_UPDATE,
+ rate_update, MB_WRITE_COMMITTED);
+
+ /* 16. Poll for PORT_BUF_CTL2 register PHY Pulse Status = 1 for Owned PHY Lanes. */
+ if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status, lane_phy_pulse_status,
+ XE3PLPD_RATE_CALIB_DONE_LATENCY_US, 2, NULL))
+ drm_warn(display->drm, "PHY %c PLL rate not changed after %dus.\n",
+ phy_name(phy), XE3PLPD_RATE_CALIB_DONE_LATENCY_US);
+
+ /* 17. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ lane_phy_pulse_status);
+ } else {
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock);
+ }
+
+ /*
+ * 18. Follow the Display Voltage Frequency Switching - Sequence After Frequency Change.
+ * We handle this step in bxt_set_cdclk()
+ */
+ /* 19. Move the PHY powerdown state to Active and program to enable/disable transmitters */
+ intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
+ XELPDP_P0_STATE_ACTIVE);
+
+ intel_lt_phy_enable_disable_tx(encoder, crtc_state);
+ intel_lt_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_lt_phy_pll_disable(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ enum port port = encoder->port;
+ intel_wakeref_t wakeref;
+ u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XELPDP_LANE_PIPE_RESET(0) |
+ XELPDP_LANE_PIPE_RESET(1))
+ : XELPDP_LANE_PIPE_RESET(0);
+ u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) |
+ XELPDP_LANE_PHY_CURRENT_STATUS(1))
+ : XELPDP_LANE_PHY_CURRENT_STATUS(0);
+ u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
+ XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
+ : XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
+
+ wakeref = intel_lt_phy_transaction_begin(encoder);
+
+ /* 1. Clear PORT_BUF_CTL2 [PHY Pulse Status]. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ lane_phy_pulse_status);
+
+ /* 2. Set PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> Pipe Reset to 1. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset,
+ lane_pipe_reset);
+
+ /* 3. Poll for PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> PHY Current Status == 1. */
+ if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status,
+ lane_phy_current_status,
+ XE3PLPD_RESET_START_LATENCY_US, 0, NULL))
+ drm_warn(display->drm,
+ "PHY %c failed to reset Lane after %dms.\n",
+ phy_name(phy), XE3PLPD_RESET_START_LATENCY_US);
+
+ /* 4. Clear for PHY pulse status on owned PHY lanes. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ lane_phy_pulse_status);
+
+ /*
+ * 5. Follow the Display Voltage Frequency Switching -
+ * Sequence Before Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+ /* 6. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0), 0);
+
+ /* 7. Program DDI_CLK_VALFREQ to 0. */
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0);
+
+ /* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0), 0,
+ XE3PLPD_MACCLK_TURNOFF_LATENCY_US, 0, NULL))
+ drm_warn(display->drm, "PHY %c PLL MacCLK Ack deassertion Timeout after %dus.\n",
+ phy_name(phy), XE3PLPD_MACCLK_TURNOFF_LATENCY_US);
+
+ /*
+ * 9. Follow the Display Voltage Frequency Switching -
+ * Sequence After Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+ /* 10. Program PORT_CLOCK_CTL register to disable and gate clocks. */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_FORWARD_CLOCK_UNGATE, 0);
+
+ /* 11. Program PORT_BUF_CTL5[MacCLK Reset_0] = 1 to assert MacCLK reset. */
+ intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
+ XE3PLPD_MACCLK_RESET_0, XE3PLPD_MACCLK_RESET_0);
+
+ intel_lt_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_ddi_buf_trans *trans;
+ u8 owned_lane_mask;
+ intel_wakeref_t wakeref;
+ int n_entries, ln;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ return;
+
+ owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+
+ wakeref = intel_lt_phy_transaction_begin(encoder);
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(display->drm, !trans)) {
+ intel_lt_phy_transaction_end(encoder, wakeref);
+ return;
+ }
+
+ for (ln = 0; ln < crtc_state->lane_count; ln++) {
+ int level = intel_ddi_level(encoder, crtc_state, ln);
+ int lane = ln / 2;
+ int tx = ln % 2;
+ u8 lane_mask = lane == 0 ? INTEL_LT_PHY_LANE0 : INTEL_LT_PHY_LANE1;
+
+ if (!(lane_mask & owned_lane_mask))
+ continue;
+
+ intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL8(tx),
+ LT_PHY_TX_SWING_LEVEL_MASK | LT_PHY_TX_SWING_MASK,
+ LT_PHY_TX_SWING_LEVEL(trans->entries[level].lt.txswing_level) |
+ LT_PHY_TX_SWING(trans->entries[level].lt.txswing),
+ MB_WRITE_COMMITTED);
+
+ intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL2(tx),
+ LT_PHY_TX_CURSOR_MASK,
+ LT_PHY_TX_CURSOR(trans->entries[level].lt.pre_cursor),
+ MB_WRITE_COMMITTED);
+ intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL3(tx),
+ LT_PHY_TX_CURSOR_MASK,
+ LT_PHY_TX_CURSOR(trans->entries[level].lt.main_cursor),
+ MB_WRITE_COMMITTED);
+ intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL4(tx),
+ LT_PHY_TX_CURSOR_MASK,
+ LT_PHY_TX_CURSOR(trans->entries[level].lt.post_cursor),
+ MB_WRITE_COMMITTED);
+ }
+
+ intel_lt_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_lt_phy_dump_hw_state(struct intel_display *display,
+ const struct intel_lt_phy_pll_state *hw_state)
+{
+ int i, j;
+
+ drm_dbg_kms(display->drm, "lt_phy_pll_hw_state:\n");
+ for (i = 0; i < 3; i++) {
+ drm_dbg_kms(display->drm, "config[%d] = 0x%.4x,\n",
+ i, hw_state->config[i]);
+ }
+
+ for (i = 0; i <= 12; i++)
+ for (j = 3; j >= 0; j--)
+ drm_dbg_kms(display->drm, "vdr_data[%d][%d] = 0x%.4x,\n",
+ i, j, hw_state->data[i][j]);
+}
+
+bool
+intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a,
+ const struct intel_lt_phy_pll_state *b)
+{
+ if (memcmp(&a->config, &b->config, sizeof(a->config)) != 0)
+ return false;
+
+ if (memcmp(&a->data, &b->data, sizeof(a->data)) != 0)
+ return false;
+
+ return true;
+}
+
+void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ struct intel_lt_phy_pll_state *pll_state)
+{
+ u8 owned_lane_mask;
+ u8 lane;
+ intel_wakeref_t wakeref;
+ int i, j, k;
+
+ pll_state->tbt_mode = intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder));
+ if (pll_state->tbt_mode)
+ return;
+
+ owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ lane = owned_lane_mask & INTEL_LT_PHY_LANE0 ? : INTEL_LT_PHY_LANE1;
+ wakeref = intel_lt_phy_transaction_begin(encoder);
+
+ pll_state->config[0] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_0_CONFIG);
+ pll_state->config[1] = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG);
+ pll_state->config[2] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_2_CONFIG);
+
+ for (i = 0; i <= 12; i++) {
+ for (j = 3, k = 0; j >= 0; j--, k++)
+ pll_state->data[i][k] =
+ intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0,
+ LT_PHY_VDR_X_DATAY(i, j));
+ }
+
+ pll_state->clock =
+ intel_lt_phy_calc_port_clock(encoder, crtc_state);
+ intel_lt_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_digital_port *dig_port;
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder;
+ struct intel_lt_phy_pll_state pll_hw_state = {};
+ const struct intel_lt_phy_pll_state *pll_sw_state = &new_crtc_state->dpll_hw_state.ltpll;
+ int clock;
+ int i, j;
+
+ if (DISPLAY_VER(display) < 35)
+ return;
+
+ if (!new_crtc_state->hw.active)
+ return;
+
+ /* intel_get_crtc_new_encoder() only works for modeset/fastset commits */
+ if (!intel_crtc_needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_fastset(new_crtc_state))
+ return;
+
+ encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
+ intel_lt_phy_pll_readout_hw_state(encoder, new_crtc_state, &pll_hw_state);
+ clock = intel_lt_phy_calc_port_clock(encoder, new_crtc_state);
+
+ dig_port = enc_to_dig_port(encoder);
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ return;
+
+ INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.clock != clock,
+ "[CRTC:%d:%s] mismatch in LT PHY: Register CLOCK (expected %d, found %d)",
+ crtc->base.base.id, crtc->base.name,
+ pll_sw_state->clock, pll_hw_state.clock);
+
+ for (i = 0; i < 3; i++) {
+ INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.config[i] != pll_sw_state->config[i],
+ "[CRTC:%d:%s] mismatch in LT PHY PLL CONFIG%d: (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ pll_sw_state->config[i], pll_hw_state.config[i]);
+ }
+
+ for (i = 0; i <= 12; i++) {
+ for (j = 3; j >= 0; j--)
+ INTEL_DISPLAY_STATE_WARN(display,
+ pll_hw_state.data[i][j] !=
+ pll_sw_state->data[i][j],
+ "[CRTC:%d:%s] mismatch in LT PHY PLL DATA[%d][%d]: (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i, j,
+ pll_sw_state->data[i][j], pll_hw_state.data[i][j]);
+ }
+}
+
+void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ intel_mtl_tbt_pll_enable(encoder, crtc_state);
+ else
+ intel_lt_phy_pll_enable(encoder, crtc_state);
+}
+
+void intel_xe3plpd_pll_disable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ intel_mtl_tbt_pll_disable(encoder);
+ else
+ intel_lt_phy_pll_disable(encoder);
+
+}
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.h b/drivers/gpu/drm/i915/display/intel_lt_phy.h
new file mode 100644
index 000000000000..a538d4c69210
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_LT_PHY_H__
+#define __INTEL_LT_PHY_H__
+
+#include <linux/types.h>
+
+struct intel_atomic_state;
+struct intel_display;
+struct intel_encoder;
+struct intel_crtc_state;
+struct intel_crtc;
+struct intel_lt_phy_pll_state;
+
+void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_lt_phy_pll_disable(struct intel_encoder *encoder);
+int
+intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder);
+int intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_lt_phy_dump_hw_state(struct intel_display *display,
+ const struct intel_lt_phy_pll_state *hw_state);
+bool
+intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a,
+ const struct intel_lt_phy_pll_state *b);
+void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ struct intel_lt_phy_pll_state *pll_state);
+void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_xe3plpd_pll_disable(struct intel_encoder *encoder);
+
+#define HAS_LT_PHY(display) (DISPLAY_VER(display) >= 35)
+
+#endif /* __INTEL_LT_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
new file mode 100644
index 000000000000..9223487d764e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_LT_PHY_REGS_H__
+#define __INTEL_LT_PHY_REGS_H__
+
+#define XE3PLPD_MSGBUS_TIMEOUT_FAST_US 500
+#define XE3PLPD_MACCLK_TURNON_LATENCY_MS 1
+#define XE3PLPD_MACCLK_TURNON_LATENCY_US 21
+#define XE3PLPD_MACCLK_TURNOFF_LATENCY_US 1
+#define XE3PLPD_RATE_CALIB_DONE_LATENCY_US 50
+#define XE3PLPD_RESET_START_LATENCY_US 10
+#define XE3PLPD_PWRDN_TO_RDY_LATENCY_US 4
+#define XE3PLPD_RESET_END_LATENCY_US 200
+
+/* LT Phy MAC Register */
+#define LT_PHY_MAC_VDR _MMIO(0xC00)
+#define LT_PHY_PCLKIN_GATE REG_BIT8(0)
+
+/* LT Phy Pipe Spec Registers */
+#define LT_PHY_TXY_CTL8(idx) (0x408 + (0x200 * (idx)))
+#define LT_PHY_TX_SWING_LEVEL_MASK REG_GENMASK8(7, 4)
+#define LT_PHY_TX_SWING_LEVEL(val) REG_FIELD_PREP8(LT_PHY_TX_SWING_LEVEL_MASK, val)
+#define LT_PHY_TX_SWING_MASK REG_BIT8(3)
+#define LT_PHY_TX_SWING(val) REG_FIELD_PREP8(LT_PHY_TX_SWING_MASK, val)
+
+#define LT_PHY_TXY_CTL2(idx) (0x402 + (0x200 * (idx)))
+#define LT_PHY_TXY_CTL3(idx) (0x403 + (0x200 * (idx)))
+#define LT_PHY_TXY_CTL4(idx) (0x404 + (0x200 * (idx)))
+#define LT_PHY_TX_CURSOR_MASK REG_GENMASK8(5, 0)
+#define LT_PHY_TX_CURSOR(val) REG_FIELD_PREP8(LT_PHY_TX_CURSOR_MASK, val)
+
+#define LT_PHY_TXY_CTL10(idx) (0x40A + (0x200 * (idx)))
+#define LT_PHY_TXY_CTL10_MAC(idx) _MMIO(LT_PHY_TXY_CTL10(idx))
+#define LT_PHY_TX_LANE_ENABLE REG_BIT8(0)
+
+/* LT Phy Vendor Register */
+#define LT_PHY_VDR_0_CONFIG 0xC02
+#define LT_PHY_VDR_DP_PLL_ENABLE REG_BIT(7)
+#define LT_PHY_VDR_1_CONFIG 0xC03
+#define LT_PHY_VDR_RATE_ENCODING_MASK REG_GENMASK8(6, 3)
+#define LT_PHY_VDR_MODE_ENCODING_MASK REG_GENMASK8(2, 0)
+#define LT_PHY_VDR_2_CONFIG 0xCC3
+
+#define LT_PHY_VDR_X_ADDR_MSB(idx) (0xC04 + 0x6 * (idx))
+#define LT_PHY_VDR_X_ADDR_LSB(idx) (0xC05 + 0x6 * (idx))
+
+#define LT_PHY_VDR_X_DATAY(idx, y) ((0xC06 + (3 - (y))) + 0x6 * (idx))
+
+#define LT_PHY_RATE_UPDATE 0xCC4
+#define LT_PHY_RATE_CONTROL_VDR_UPDATE REG_BIT8(0)
+
+#define _XE3PLPD_PORT_BUF_CTL5(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
+ _XELPDP_PORT_BUF_CTL1_LN0_A, \
+ _XELPDP_PORT_BUF_CTL1_LN0_B, \
+ _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
+ _XELPDP_PORT_BUF_CTL1_LN0_USBC2) \
+ + 0x34)
+#define XE3PLPD_PORT_BUF_CTL5(port) _XE3PLPD_PORT_BUF_CTL5(__xe2lpd_port_idx(port))
+#define XE3PLPD_MACCLK_RESET_0 REG_BIT(11)
+#define XE3PLPD_MACCLK_RATE_MASK REG_GENMASK(4, 0)
+#define XE3PLPD_MACCLK_RATE_DEF REG_FIELD_PREP(XE3PLPD_MACCLK_RATE_MASK, 0x1F)
+
+#define _XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(idx, lane) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) \
+ + 0x60 + (lane) * 0x4)
+#define XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(port, lane) _XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(__xe2lpd_port_idx(port), \
+ lane)
+#define XE3LPD_PORT_P2M_ADDR_MASK REG_GENMASK(11, 0)
+#endif /* __INTEL_LT_PHY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 8415f3d703ed..0dcb0597879a 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -19,6 +19,7 @@
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
+#include "intel_dbuf_bw.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display.h"
@@ -176,6 +177,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
intel_cdclk_crtc_disable_noatomic(crtc);
skl_wm_crtc_disable_noatomic(crtc);
intel_bw_crtc_disable_noatomic(crtc);
+ intel_dbuf_bw_crtc_disable_noatomic(crtc);
intel_pmdemand_update_port_clock(display, pmdemand_state, pipe, 0);
}
@@ -851,18 +853,23 @@ static void intel_modeset_readout_hw_state(struct intel_display *display)
*/
if (plane_state->uapi.visible && plane->min_cdclk) {
if (crtc_state->double_wide || DISPLAY_VER(display) >= 10)
- crtc_state->min_cdclk[plane->id] =
+ crtc_state->plane_min_cdclk[plane->id] =
DIV_ROUND_UP(crtc_state->pixel_rate, 2);
else
- crtc_state->min_cdclk[plane->id] =
+ crtc_state->plane_min_cdclk[plane->id] =
crtc_state->pixel_rate;
}
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_cdclk %d kHz\n",
plane->base.base.id, plane->base.name,
- crtc_state->min_cdclk[plane->id]);
+ crtc_state->plane_min_cdclk[plane->id]);
}
+ crtc_state->min_cdclk = intel_crtc_min_cdclk(crtc_state);
+
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] min_cdclk %d kHz\n",
+ crtc->base.base.id, crtc->base.name, crtc_state->min_cdclk);
+
intel_pmdemand_update_port_clock(display, pmdemand_state, pipe,
crtc_state->port_clock);
}
@@ -872,6 +879,7 @@ static void intel_modeset_readout_hw_state(struct intel_display *display)
intel_wm_get_hw_state(display);
intel_bw_update_hw_state(display);
+ intel_dbuf_bw_update_hw_state(display);
intel_cdclk_update_hw_state(display);
intel_pmdemand_init_pmdemand_params(display, pmdemand_state);
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index f2f6b9d9afa1..b361a77cd235 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -16,6 +16,7 @@
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
+#include "intel_lt_phy.h"
#include "intel_modeset_verify.h"
#include "intel_snps_phy.h"
#include "skl_watermark.h"
@@ -246,6 +247,7 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state,
intel_dpll_state_verify(state, crtc);
intel_mpllb_state_verify(state, crtc);
intel_cx0pll_state_verify(state, crtc);
+ intel_lt_phy_pll_state_verify(state, crtc);
}
void intel_modeset_verify_disabled(struct intel_atomic_state *state)
diff --git a/drivers/gpu/drm/i915/display/intel_pch.c b/drivers/gpu/drm/i915/display/intel_pch.c
index 469e8a3cfb49..65359a36df48 100644
--- a/drivers/gpu/drm/i915/display/intel_pch.c
+++ b/drivers/gpu/drm/i915/display/intel_pch.c
@@ -5,8 +5,8 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_display_core.h"
+#include "intel_display_utils.h"
#include "intel_pch.h"
#define INTEL_PCH_DEVICE_ID_MASK 0xff80
@@ -328,7 +328,7 @@ void intel_pch_detect(struct intel_display *display)
"Display disabled, reverting to NOP PCH\n");
display->pch_type = PCH_NOP;
} else if (!pch) {
- if (i915_run_as_guest() && HAS_DISPLAY(display)) {
+ if (intel_display_run_as_guest(display) && HAS_DISPLAY(display)) {
intel_virt_detect_pch(display, &id, &pch_type);
display->pch_type = pch_type;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 9ae53679a041..cca880c7eed4 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -6,10 +6,10 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_panel.h"
#include "intel_pch_refclk.h"
#include "intel_sbi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
index 68539e7c2a24..6dda496190e0 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.c
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -5,12 +5,12 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_lvds_regs.h"
#include "intel_pfit.h"
#include "intel_pfit_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c
index 2329f09d413d..78329deb395a 100644
--- a/drivers/gpu/drm/i915/display/intel_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_plane.c
@@ -45,7 +45,6 @@
#include <drm/drm_panic.h>
#include "gem/i915_gem_object.h"
-#include "i915_scheduler_types.h"
#include "i9xx_plane_regs.h"
#include "intel_cdclk.h"
#include "intel_cursor.h"
@@ -292,64 +291,21 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
rel_data_rate);
}
-int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
- struct intel_plane *plane,
- bool *need_cdclk_calc)
+static void intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane)
{
- struct intel_display *display = to_intel_display(plane);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
- const struct intel_cdclk_state *cdclk_state;
- const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
if (!plane_state->uapi.visible || !plane->min_cdclk)
- return 0;
+ return;
- old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- new_crtc_state->min_cdclk[plane->id] =
+ new_crtc_state->plane_min_cdclk[plane->id] =
plane->min_cdclk(new_crtc_state, plane_state);
-
- /*
- * No need to check against the cdclk state if
- * the min cdclk for the plane doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to plane
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_crtc_state->min_cdclk[plane->id] <=
- old_crtc_state->min_cdclk[plane->id])
- return 0;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- /*
- * No need to recalculate the cdclk state if
- * the min cdclk for the pipe doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to plane
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_crtc_state->min_cdclk[plane->id] <=
- intel_cdclk_min_cdclk(cdclk_state, crtc->pipe))
- return 0;
-
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
- plane->base.base.id, plane->base.name,
- new_crtc_state->min_cdclk[plane->id],
- crtc->base.base.id, crtc->base.name,
- intel_cdclk_min_cdclk(cdclk_state, crtc->pipe));
- *need_cdclk_calc = true;
-
- return 0;
}
static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
@@ -435,7 +391,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
crtc_state->data_rate_y[plane->id] = 0;
crtc_state->rel_data_rate[plane->id] = 0;
crtc_state->rel_data_rate_y[plane->id] = 0;
- crtc_state->min_cdclk[plane->id] = 0;
+ crtc_state->plane_min_cdclk[plane->id] = 0;
plane_state->uapi.visible = false;
}
@@ -1172,7 +1128,6 @@ static int
intel_prepare_plane_fb(struct drm_plane *_plane,
struct drm_plane_state *_new_plane_state)
{
- struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_display *display = to_intel_display(plane);
struct intel_plane_state *new_plane_state =
@@ -1221,8 +1176,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
goto unpin_fb;
if (new_plane_state->uapi.fence) {
- i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
- &attr);
+ i915_gem_fence_wait_priority_display(new_plane_state->uapi.fence);
intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
new_plane_state->uapi.fence);
@@ -1746,5 +1700,8 @@ int intel_plane_atomic_check(struct intel_atomic_state *state)
return ret;
}
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i)
+ intel_plane_calc_min_cdclk(state, plane);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane.h b/drivers/gpu/drm/i915/display/intel_plane.h
index 8af41ccc0a69..4e99df9de3e8 100644
--- a/drivers/gpu/drm/i915/display/intel_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_plane.h
@@ -69,9 +69,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
-int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
- struct intel_plane *plane,
- bool *need_cdclk_calc);
int intel_plane_check_clipping(struct intel_plane_state *plane_state,
struct intel_crtc_state *crtc_state,
int min_scale, int max_scale,
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index d806c15db7ce..f52abd4e2eb0 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -7,13 +7,14 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_de.h"
+#include "intel_display_jiffies.h"
#include "intel_display_regs.h"
#include "intel_display_trace.h"
+#include "intel_display_utils.h"
#include "intel_pmdemand.h"
#include "intel_step.h"
#include "skl_watermark.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 327e0de86f1e..25692a547764 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -10,11 +10,12 @@
#include "g4x_dp.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_jiffies.h"
#include "intel_display_power_well.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 10eb93a34cf2..0ca2b06d18b5 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -39,6 +39,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -50,6 +51,7 @@
#include "intel_snps_phy.h"
#include "intel_step.h"
#include "intel_vblank.h"
+#include "intel_vdsc.h"
#include "intel_vrr.h"
#include "skl_universal_plane.h"
@@ -580,6 +582,44 @@ exit:
intel_dp->psr.su_y_granularity = y;
}
+static enum intel_panel_replay_dsc_support
+compute_pr_dsc_support(struct intel_dp *intel_dp)
+{
+ u8 pr_dsc_mode;
+ u8 val;
+
+ val = intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)];
+ pr_dsc_mode = REG_FIELD_GET8(DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK, val);
+
+ switch (pr_dsc_mode) {
+ case DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY:
+ return INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY;
+ case DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED:
+ return INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE;
+ default:
+ MISSING_CASE(pr_dsc_mode);
+ fallthrough;
+ case DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED:
+ case DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED:
+ return INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED;
+ }
+}
+
+static const char *panel_replay_dsc_support_str(enum intel_panel_replay_dsc_support dsc_support)
+{
+ switch (dsc_support) {
+ case INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED:
+ return "not supported";
+ case INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY:
+ return "full frame only";
+ case INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE:
+ return "selective update";
+ default:
+ MISSING_CASE(dsc_support);
+ return "n/a";
+ };
+}
+
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -615,10 +655,13 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
DP_PANEL_REPLAY_SU_SUPPORT)
intel_dp->psr.sink_panel_replay_su_support = true;
+ intel_dp->psr.sink_panel_replay_dsc_support = compute_pr_dsc_support(intel_dp);
+
drm_dbg_kms(display->drm,
- "Panel replay %sis supported by panel\n",
+ "Panel replay %sis supported by panel (in DSC mode: %s)\n",
intel_dp->psr.sink_panel_replay_su_support ?
- "selective_update " : "");
+ "selective_update " : "",
+ panel_replay_dsc_support_str(intel_dp->psr.sink_panel_replay_dsc_support));
}
static void _psr_init_dpcd(struct intel_dp *intel_dp)
@@ -956,15 +999,16 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
return val;
}
-static int psr2_block_count_lines(struct intel_dp *intel_dp)
+static int
+psr2_block_count_lines(u8 io_wake_lines, u8 fast_wake_lines)
{
- return intel_dp->alpm_parameters.io_wake_lines < 9 &&
- intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
+ return io_wake_lines < 9 && fast_wake_lines < 9 ? 8 : 12;
}
static int psr2_block_count(struct intel_dp *intel_dp)
{
- return psr2_block_count_lines(intel_dp) / 4;
+ return psr2_block_count_lines(intel_dp->psr.io_wake_lines,
+ intel_dp->psr.fast_wake_lines) / 4;
}
static u8 frames_before_su_entry(struct intel_dp *intel_dp)
@@ -1059,20 +1103,20 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
*/
int tmp;
- tmp = map[intel_dp->alpm_parameters.io_wake_lines -
+ tmp = map[intel_dp->psr.io_wake_lines -
TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
- tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
} else if (DISPLAY_VER(display) >= 20) {
- val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
+ val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
} else if (DISPLAY_VER(display) >= 12) {
- val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
- val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
} else if (DISPLAY_VER(display) >= 9) {
- val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
- val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
+ val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
}
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
@@ -1360,22 +1404,54 @@ static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
return entry_setup_frames;
}
-static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool aux_less)
+static
+int _intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state,
+ bool needs_panel_replay,
+ bool needs_sel_update)
{
- struct intel_display *display = to_intel_display(intel_dp);
- int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
- crtc_state->hw.adjusted_mode.crtc_vblank_start;
- int wake_lines;
+ struct intel_display *display = to_intel_display(crtc_state);
- if (aux_less)
- wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
+ if (!crtc_state->has_psr)
+ return 0;
+
+ /* Wa_14015401596 */
+ if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
+ return 1;
+
+ /* Rest is for SRD_STATUS needed on LunarLake and onwards */
+ if (DISPLAY_VER(display) < 20)
+ return 0;
+
+ /*
+ * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
+ *
+ * To deterministically capture the transition of the state machine
+ * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
+ * one line after the non-delayed V. Blank.
+ *
+ * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
+ * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
+ * - TRANS_VTOTAL[ Vertical Active ])
+ *
+ * SRD_STATUS is used only by PSR1 on PantherLake.
+ * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
+ */
+
+ if (DISPLAY_VER(display) >= 30 && (needs_panel_replay ||
+ needs_sel_update))
+ return 0;
+ else if (DISPLAY_VER(display) < 30 && (needs_sel_update ||
+ intel_crtc_has_type(crtc_state,
+ INTEL_OUTPUT_EDP)))
+ return 0;
else
- wake_lines = DISPLAY_VER(display) < 20 ?
- psr2_block_count_lines(intel_dp) :
- intel_dp->alpm_parameters.io_wake_lines;
+ return 1;
+}
+static bool _wake_lines_fit_into_vblank(const struct intel_crtc_state *crtc_state,
+ int vblank,
+ int wake_lines)
+{
if (crtc_state->req_psr2_sdp_prior_scanline)
vblank -= 1;
@@ -1386,9 +1462,46 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
return true;
}
+static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool aux_less,
+ bool needs_panel_replay,
+ bool needs_sel_update)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
+ crtc_state->hw.adjusted_mode.crtc_vblank_start;
+ int wake_lines;
+ int scl = _intel_psr_min_set_context_latency(crtc_state,
+ needs_panel_replay,
+ needs_sel_update);
+ vblank -= scl;
+
+ if (aux_less)
+ wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
+ else
+ wake_lines = DISPLAY_VER(display) < 20 ?
+ psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
+ crtc_state->alpm_state.fast_wake_lines) :
+ crtc_state->alpm_state.io_wake_lines;
+
+ /*
+ * Guardband has not been computed yet, so we conservatively check if the
+ * full vblank duration is sufficient to accommodate wake line requirements
+ * for PSR features like Panel Replay and Selective Update.
+ *
+ * Once the actual guardband is available, a more accurate validation is
+ * performed in intel_psr_compute_config_late(), and PSR features are
+ * disabled if wake lines exceed the available guardband.
+ */
+ return _wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines);
+}
+
static bool alpm_config_valid(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool aux_less)
+ struct intel_crtc_state *crtc_state,
+ bool aux_less,
+ bool needs_panel_replay,
+ bool needs_sel_update)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1398,7 +1511,8 @@ static bool alpm_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
+ if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less,
+ needs_panel_replay, needs_sel_update)) {
drm_dbg_kms(display->drm,
"PSR2/Panel Replay not enabled, too short vblank time\n");
return false;
@@ -1490,7 +1604,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (!alpm_config_valid(intel_dp, crtc_state, false))
+ if (!alpm_config_valid(intel_dp, crtc_state, false, false, true))
return false;
if (!crtc_state->enable_psr2_sel_fetch &&
@@ -1535,9 +1649,21 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
goto unsupported;
}
- if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
- !intel_dp->psr.sink_panel_replay_su_support))
- goto unsupported;
+ if (crtc_state->has_panel_replay) {
+ if (DISPLAY_VER(display) < 14)
+ goto unsupported;
+
+ if (!intel_dp->psr.sink_panel_replay_su_support)
+ goto unsupported;
+
+ if (intel_dsc_enabled_on_link(crtc_state) &&
+ intel_dp->psr.sink_panel_replay_dsc_support !=
+ INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE) {
+ drm_dbg_kms(display->drm,
+ "Selective update with Panel Replay not enabled because it's not supported with DSC\n");
+ goto unsupported;
+ }
+ }
if (crtc_state->crc_enabled) {
drm_dbg_kms(display->drm,
@@ -1582,6 +1708,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
if (entry_setup_frames >= 0) {
intel_dp->psr.entry_setup_frames = entry_setup_frames;
} else {
+ crtc_state->no_psr_reason = "PSR setup timing not met";
drm_dbg_kms(display->drm,
"PSR condition failed: PSR setup timing not met\n");
return false;
@@ -1592,7 +1719,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
static bool
_panel_replay_compute_config(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
+ struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1614,6 +1741,14 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
}
+ if (intel_dsc_enabled_on_link(crtc_state) &&
+ intel_dp->psr.sink_panel_replay_dsc_support ==
+ INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED) {
+ drm_dbg_kms(display->drm,
+ "Panel Replay not enabled because it's not supported with DSC\n");
+ return false;
+ }
+
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -1641,7 +1776,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
}
- if (!alpm_config_valid(intel_dp, crtc_state, true))
+ if (!alpm_config_valid(intel_dp, crtc_state, true, true, false))
return false;
return true;
@@ -1656,15 +1791,40 @@ static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
!crtc_state->has_sel_update);
}
+static
+void intel_psr_set_non_psr_pipes(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc *crtc;
+ u8 active_pipes = 0;
+
+ /* Wa_16025596647 */
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ /* Not needed by Panel Replay */
+ if (crtc_state->has_panel_replay)
+ return;
+
+ /* We ignore possible secondary PSR/Panel Replay capable eDP */
+ for_each_intel_crtc(display->drm, crtc)
+ active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
+
+ active_pipes = intel_calc_active_pipes(state, active_pipes);
+
+ crtc_state->active_non_psr_pipes = active_pipes &
+ ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
+}
+
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
- struct intel_crtc *crtc;
- u8 active_pipes = 0;
if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(display->drm, "PSR disabled by flag\n");
@@ -1694,6 +1854,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
+ /* Only used for state verification. */
+ crtc_state->panel_replay_dsc_support = intel_dp->psr.sink_panel_replay_dsc_support;
crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
crtc_state,
conn_state);
@@ -1705,31 +1867,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
-
- /* Wa_18037818876 */
- if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
- crtc_state->has_psr = false;
- drm_dbg_kms(display->drm,
- "PSR disabled to workaround PSR FSM hang issue\n");
- }
-
- /* Rest is for Wa_16025596647 */
- if (DISPLAY_VER(display) != 20 &&
- !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
- return;
-
- /* Not needed by Panel Replay */
- if (crtc_state->has_panel_replay)
- return;
-
- /* We ignore possible secondary PSR/Panel Replay capable eDP */
- for_each_intel_crtc(display->drm, crtc)
- active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
-
- active_pipes = intel_calc_active_pipes(state, active_pipes);
-
- crtc_state->active_non_psr_pipes = active_pipes &
- ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1813,6 +1950,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
hsw_activate_psr1(intel_dp);
intel_dp->psr.active = true;
+ intel_dp->psr.no_psr_reason = NULL;
}
/*
@@ -2022,6 +2160,8 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
crtc_state->req_psr2_sdp_prior_scanline;
intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
+ intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines;
+ intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines;
if (!psr_interrupt_error_check(intel_dp))
return;
@@ -2360,50 +2500,17 @@ void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
}
/**
- * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
+ * intel_psr_min_set_context_latency - Minimum 'set context latency' lines needed by PSR
* @crtc_state: the crtc state
*
- * Return minimum vblank delay needed by PSR.
+ * Return minimum SCL lines/delay needed by PSR.
*/
-int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
+int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
- if (!crtc_state->has_psr)
- return 0;
-
- /* Wa_14015401596 */
- if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
- return 1;
-
- /* Rest is for SRD_STATUS needed on LunarLake and onwards */
- if (DISPLAY_VER(display) < 20)
- return 0;
-
- /*
- * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
- *
- * To deterministically capture the transition of the state machine
- * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
- * one line after the non-delayed V. Blank.
- *
- * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
- * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
- * - TRANS_VTOTAL[ Vertical Active ])
- *
- * SRD_STATUS is used only by PSR1 on PantherLake.
- * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
- */
-
- if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
- crtc_state->has_sel_update))
- return 0;
- else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
- intel_crtc_has_type(crtc_state,
- INTEL_OUTPUT_EDP)))
- return 0;
- else
- return 1;
+ return _intel_psr_min_set_context_latency(crtc_state,
+ crtc_state->has_panel_replay,
+ crtc_state->has_sel_update);
}
static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
@@ -2925,6 +3032,9 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
mutex_lock(&psr->lock);
+ if (!new_crtc_state->has_psr)
+ psr->no_psr_reason = new_crtc_state->no_psr_reason;
+
if (psr->enabled) {
/*
* Reasons to disable:
@@ -2951,6 +3061,20 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
}
}
+static void
+verify_panel_replay_dsc_state(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!crtc_state->has_panel_replay)
+ return;
+
+ drm_WARN_ON(display->drm,
+ intel_dsc_enabled_on_link(crtc_state) &&
+ crtc_state->panel_replay_dsc_support ==
+ INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED);
+}
+
void intel_psr_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -2962,6 +3086,8 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
if (!crtc_state->has_psr)
return;
+ verify_panel_replay_dsc_state(crtc_state);
+
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2973,12 +3099,19 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
drm_WARN_ON(display->drm,
psr->enabled && !crtc_state->active_planes);
- keep_disabled |= psr->sink_not_reliable;
- keep_disabled |= !crtc_state->active_planes;
+ if (psr->sink_not_reliable)
+ keep_disabled = true;
+
+ if (!crtc_state->active_planes) {
+ psr->no_psr_reason = "All planes inactive";
+ keep_disabled = true;
+ }
/* Display WA #1136: skl, bxt */
- keep_disabled |= DISPLAY_VER(display) < 11 &&
- crtc_state->wm_level_disabled;
+ if (DISPLAY_VER(display) < 11 && crtc_state->wm_level_disabled) {
+ psr->no_psr_reason = "Workaround #1136 for skl, bxt";
+ keep_disabled = true;
+ }
if (!psr->enabled && !keep_disabled)
intel_psr_enable_locked(intel_dp, crtc_state);
@@ -3991,6 +4124,8 @@ static void intel_psr_sink_capability(struct intel_dp *intel_dp,
seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
seq_printf(m, ", Panel Replay Selective Update = %s",
str_yes_no(psr->sink_panel_replay_su_support));
+ seq_printf(m, ", Panel Replay DSC support = %s",
+ panel_replay_dsc_support_str(psr->sink_panel_replay_dsc_support));
if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
seq_printf(m, " (Early Transport)");
@@ -4025,6 +4160,8 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp,
region_et = "";
seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
+ if (psr->no_psr_reason)
+ seq_printf(m, " %s\n", psr->no_psr_reason);
}
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
@@ -4322,3 +4459,84 @@ bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
{
return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
}
+
+void intel_psr_compute_config_late(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ int vblank = intel_crtc_vblank_length(crtc_state);
+ int wake_lines;
+
+ if (intel_psr_needs_alpm_aux_less(intel_dp, crtc_state))
+ wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
+ else if (intel_psr_needs_alpm(intel_dp, crtc_state))
+ wake_lines = DISPLAY_VER(display) < 20 ?
+ psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
+ crtc_state->alpm_state.fast_wake_lines) :
+ crtc_state->alpm_state.io_wake_lines;
+ else
+ wake_lines = 0;
+
+ /*
+ * Disable the PSR features if wake lines exceed the available vblank.
+ * Though SCL is computed based on these PSR features, it is not reset
+ * even if the PSR features are disabled to avoid changing vblank start
+ * at this stage.
+ */
+ if (wake_lines && !_wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines)) {
+ drm_dbg_kms(display->drm,
+ "Adjusting PSR/PR mode: vblank too short for wake lines = %d\n",
+ wake_lines);
+
+ if (crtc_state->has_panel_replay) {
+ crtc_state->has_panel_replay = false;
+ /*
+ * #TODO : Add fall back to PSR/PSR2
+ * Since panel replay cannot be supported, we can fall back to PSR/PSR2.
+ * This will require calling compute_config for psr and psr2 with check for
+ * actual guardband instead of vblank_length.
+ */
+ crtc_state->has_psr = false;
+ }
+
+ crtc_state->has_sel_update = false;
+ crtc_state->enable_psr2_su_region_et = false;
+ crtc_state->enable_psr2_sel_fetch = false;
+ }
+
+ /* Wa_18037818876 */
+ if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
+ crtc_state->has_psr = false;
+ drm_dbg_kms(display->drm,
+ "PSR disabled to workaround PSR FSM hang issue\n");
+ }
+
+ intel_psr_set_non_psr_pipes(intel_dp, crtc_state);
+}
+
+int intel_psr_min_guardband(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int psr_min_guardband;
+ int wake_lines;
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return 0;
+
+ if (crtc_state->has_panel_replay)
+ wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
+ else if (crtc_state->has_sel_update)
+ wake_lines = DISPLAY_VER(display) < 20 ?
+ psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
+ crtc_state->alpm_state.fast_wake_lines) :
+ crtc_state->alpm_state.io_wake_lines;
+ else
+ return 0;
+
+ psr_min_guardband = wake_lines + crtc_state->set_context_latency;
+
+ if (crtc_state->req_psr2_sdp_prior_scanline)
+ psr_min_guardband++;
+
+ return psr_min_guardband;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 077751aa599f..620b35928832 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -77,11 +77,14 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
struct intel_atomic_state *state,
struct intel_crtc *crtc);
-int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state);
+int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
void intel_psr_debugfs_register(struct intel_display *display);
bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state);
bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
+void intel_psr_compute_config_late(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
+int intel_psr_min_guardband(struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c
index 600c815e37e4..c05d4beb91d8 100644
--- a/drivers/gpu/drm/i915/display/intel_qp_tables.c
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c
@@ -5,7 +5,7 @@
#include <drm/display/drm_dsc.h>
-#include "i915_utils.h"
+#include "intel_display_utils.h"
#include "intel_qp_tables.h"
/* from BPP 6 to 24 in steps of 0.5 */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
index 7fe6b4a18213..a201edceee10 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
@@ -332,6 +332,8 @@ void intel_snps_hdmi_pll_compute_c10pll(struct intel_c10pll_state *pll_state, u6
c10_curve_1, c10_curve_2, prescaler_divider,
&pll_params);
+ pll_state->clock = pixel_clock;
+
pll_state->tx = 0x10;
pll_state->cmn = 0x1;
pll_state->pll[0] = REG_FIELD_PREP(C10_PLL0_DIV5CLK_EN, pll_params.mpll_div5_en) |
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index b2dd69a11124..4f028e6a91cd 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -7,12 +7,12 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_snps_hdmi_pll.h"
#include "intel_snps_phy.h"
#include "intel_snps_phy_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 75bbaa923204..60f1d9ed181e 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -39,10 +39,10 @@
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
-#include "i915_utils.h"
#include "i9xx_plane.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_frontbuffer.h"
#include "intel_plane.h"
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index c4a5601c5107..7e17ca018748 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -8,7 +8,6 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -18,6 +17,7 @@
#include "intel_display_power_map.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
#include "intel_dp_mst.h"
@@ -1703,6 +1703,19 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
mutex_unlock(&tc->lock);
}
+void intel_tc_info(struct drm_printer *p, struct intel_digital_port *dig_port)
+{
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+
+ intel_tc_port_lock(dig_port);
+ drm_printf(p, "\tTC Port %s: mode: %s, pin assignment: %c, max lanes: %d\n",
+ tc->port_name,
+ tc_port_mode_name(tc->mode),
+ pin_assignment_name(tc->pin_assignment),
+ tc->max_lane_count);
+ intel_tc_port_unlock(dig_port);
+}
+
/*
* The type-C ports are different because even when they are connected, they may
* not be available/usable by the graphics driver: see the comment on
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index fff8b96e4972..6719aea5bd58 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+struct drm_printer;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_encoder;
@@ -113,4 +114,6 @@ void intel_tc_port_cleanup(struct intel_digital_port *dig_port);
bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port);
+void intel_tc_info(struct drm_printer *p, struct intel_digital_port *dig_port);
+
#endif /* __INTEL_TC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index c15234c1d96e..8aa427a93964 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -8,12 +8,13 @@
#include <drm/drm_vblank.h>
#include "i915_drv.h"
-#include "i915_utils.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_jiffies.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
@@ -681,7 +682,7 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
else
evade->vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
- vblank_delay = intel_vrr_vblank_delay(crtc_state);
+ vblank_delay = crtc_state->set_context_latency;
} else {
evade->vblank_start = intel_mode_vblank_start(adjusted_mode);
@@ -767,3 +768,13 @@ int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
return scanline;
}
+
+int intel_crtc_vblank_length(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ if (crtc_state->vrr.enable)
+ return crtc_state->vrr.guardband;
+ else
+ return adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
index 21fbb08d61d5..98d04cacd65f 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.h
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -48,4 +48,6 @@ const struct intel_crtc_state *
intel_pre_commit_crtc_state(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_crtc_vblank_length(const struct intel_crtc_state *crtc_state);
+
#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 8e799e225af1..0e727fc5e80c 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -11,10 +11,10 @@
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dsi.h"
#include "intel_qp_tables.h"
@@ -372,6 +372,22 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
return 0;
}
+void intel_dsc_enable_on_crtc(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->dsc.compression_enabled_on_link = true;
+ crtc_state->dsc.compression_enable = true;
+}
+
+bool intel_dsc_enabled_on_link(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ drm_WARN_ON(display->drm, crtc_state->dsc.compression_enable &&
+ !crtc_state->dsc.compression_enabled_on_link);
+
+ return crtc_state->dsc.compression_enabled_on_link;
+}
+
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
@@ -1077,3 +1093,11 @@ int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
return min_cdclk;
}
+
+unsigned int intel_vdsc_prefill_lines(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->dsc.compression_enable)
+ return 0;
+
+ return 0x18000; /* 1.5 */
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 9e2812f99dd7..99f64ac54b27 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -20,6 +20,8 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
void intel_dsc_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config);
+void intel_dsc_enable_on_crtc(struct intel_crtc_state *crtc_state);
+bool intel_dsc_enabled_on_link(const struct intel_crtc_state *crtc_state);
void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder);
@@ -32,5 +34,6 @@ void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state);
int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state);
+unsigned int intel_vdsc_prefill_lines(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 3eed37f271b0..00cbc126fb36 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -10,8 +10,11 @@
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_psr.h"
#include "intel_vrr.h"
#include "intel_vrr_regs.h"
+#include "skl_prefill.h"
+#include "skl_watermark.h"
#define FIXED_POINT_PRECISION 100
#define CMRR_PRECISION_TOLERANCE 10
@@ -22,6 +25,9 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
const struct drm_display_info *info = &connector->base.display_info;
struct intel_dp *intel_dp;
+ if (!HAS_VRR(display))
+ return false;
+
/*
* DP Sink is capable of VRR video timings if
* Ignore MSA bit is set in DPCD.
@@ -46,8 +52,7 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
return false;
}
- return HAS_VRR(display) &&
- info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
+ return info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
}
bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh)
@@ -79,44 +84,42 @@ intel_vrr_check_modeset(struct intel_atomic_state *state)
}
}
-static int intel_vrr_real_vblank_delay(const struct intel_crtc_state *crtc_state)
-{
- return crtc_state->hw.adjusted_mode.crtc_vblank_start -
- crtc_state->hw.adjusted_mode.crtc_vdisplay;
-}
-
static int intel_vrr_extra_vblank_delay(struct intel_display *display)
{
/*
* On ICL/TGL VRR hardware inserts one extra scanline
* just after vactive, which pushes the vmin decision
- * boundary ahead accordingly. We'll include the extra
- * scanline in our vblank delay estimates to make sure
- * that we never underestimate how long we have until
- * the delayed vblank has passed.
+ * boundary ahead accordingly, and thus reduces the
+ * max guardband length by one scanline.
*/
return DISPLAY_VER(display) < 13 ? 1 : 0;
}
-int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state)
+static int intel_vrr_vmin_flipline_offset(struct intel_display *display)
{
- struct intel_display *display = to_intel_display(crtc_state);
-
- return intel_vrr_real_vblank_delay(crtc_state) +
- intel_vrr_extra_vblank_delay(display);
+ /*
+ * ICL/TGL hardware imposes flipline>=vmin+1
+ *
+ * We reduce the vmin value to compensate when programming the
+ * hardware. This approach allows flipline to remain set at the
+ * original value, and thus the frame will have the desired
+ * minimum vtotal.
+ */
+ return DISPLAY_VER(display) < 13 ? 1 : 0;
}
-static int intel_vrr_flipline_offset(struct intel_display *display)
+static int intel_vrr_guardband_to_pipeline_full(const struct intel_crtc_state *crtc_state,
+ int guardband)
{
- /* ICL/TGL hardware imposes flipline>=vmin+1 */
- return DISPLAY_VER(display) < 13 ? 1 : 0;
+ /* hardware imposes one extra scanline somewhere */
+ return guardband - crtc_state->framestart_delay - 1;
}
-static int intel_vrr_vmin_flipline(const struct intel_crtc_state *crtc_state)
+static int intel_vrr_pipeline_full_to_guardband(const struct intel_crtc_state *crtc_state,
+ int pipeline_full)
{
- struct intel_display *display = to_intel_display(crtc_state);
-
- return crtc_state->vrr.vmin + intel_vrr_flipline_offset(display);
+ /* hardware imposes one extra scanline somewhere */
+ return pipeline_full + crtc_state->framestart_delay + 1;
}
/*
@@ -135,48 +138,26 @@ static int intel_vrr_vmin_flipline(const struct intel_crtc_state *crtc_state)
*
* framestart_delay is programmable 1-4.
*/
-static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
-
- if (DISPLAY_VER(display) >= 13)
- return crtc_state->vrr.guardband;
- else
- /* hardware imposes one extra scanline somewhere */
- return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1;
-}
int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
-
/* Min vblank actually determined by flipline */
- if (DISPLAY_VER(display) >= 13)
- return intel_vrr_vmin_flipline(crtc_state);
- else
- return intel_vrr_vmin_flipline(crtc_state) +
- intel_vrr_real_vblank_delay(crtc_state);
+ return crtc_state->vrr.vmin;
}
int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
-
- if (DISPLAY_VER(display) >= 13)
- return crtc_state->vrr.vmax;
- else
- return crtc_state->vrr.vmax +
- intel_vrr_real_vblank_delay(crtc_state);
+ return crtc_state->vrr.vmax;
}
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
{
- return intel_vrr_vmin_vtotal(crtc_state) - intel_vrr_vblank_exit_length(crtc_state);
+ return intel_vrr_vmin_vtotal(crtc_state) - crtc_state->vrr.guardband;
}
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
{
- return intel_vrr_vmax_vtotal(crtc_state) - intel_vrr_vblank_exit_length(crtc_state);
+ return intel_vrr_vmax_vtotal(crtc_state) - crtc_state->vrr.guardband;
}
static bool
@@ -230,7 +211,6 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required)
static
void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state)
{
- crtc_state->cmrr.enable = true;
/*
* TODO: Compute precise target refresh rate to determine
* if video_mode_required should be true. Currently set to
@@ -240,52 +220,76 @@ void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
crtc_state->vrr.vmin = crtc_state->vrr.vmax;
crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+
+ crtc_state->cmrr.enable = true;
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
static
-void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state)
+void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state,
+ int vmin, int vmax)
{
+ crtc_state->vrr.vmax = vmax;
+ crtc_state->vrr.vmin = vmin;
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+
crtc_state->vrr.enable = true;
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
-/*
- * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to
- * Vtotal value.
- */
static
-int intel_vrr_fixed_rr_vtotal(const struct intel_crtc_state *crtc_state)
+void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state)
+{
+ /* For fixed rr, vmin = vmax = flipline */
+ crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ crtc_state->vrr.vmin = crtc_state->vrr.vmax;
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+}
+
+static int intel_vrr_hw_value(const struct intel_crtc_state *crtc_state,
+ int value)
{
struct intel_display *display = to_intel_display(crtc_state);
- int crtc_vtotal = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ /*
+ * On TGL vmin/vmax/flipline also need to be
+ * adjusted by the SCL to maintain correct vtotals.
+ */
if (DISPLAY_VER(display) >= 13)
- return crtc_vtotal;
+ return value;
else
- return crtc_vtotal -
- intel_vrr_real_vblank_delay(crtc_state);
+ return value - crtc_state->set_context_latency;
+}
+
+/*
+ * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to
+ * Vtotal value.
+ */
+static
+int intel_vrr_fixed_rr_hw_vtotal(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_hw_value(crtc_state, crtc_state->hw.adjusted_mode.crtc_vtotal);
}
static
-int intel_vrr_fixed_rr_vmax(const struct intel_crtc_state *crtc_state)
+int intel_vrr_fixed_rr_hw_vmax(const struct intel_crtc_state *crtc_state)
{
- return intel_vrr_fixed_rr_vtotal(crtc_state);
+ return intel_vrr_fixed_rr_hw_vtotal(crtc_state);
}
static
-int intel_vrr_fixed_rr_vmin(const struct intel_crtc_state *crtc_state)
+int intel_vrr_fixed_rr_hw_vmin(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- return intel_vrr_fixed_rr_vtotal(crtc_state) -
- intel_vrr_flipline_offset(display);
+ return intel_vrr_fixed_rr_hw_vtotal(crtc_state) -
+ intel_vrr_vmin_flipline_offset(display);
}
static
-int intel_vrr_fixed_rr_flipline(const struct intel_crtc_state *crtc_state)
+int intel_vrr_fixed_rr_hw_flipline(const struct intel_crtc_state *crtc_state)
{
- return intel_vrr_fixed_rr_vtotal(crtc_state);
+ return intel_vrr_fixed_rr_hw_vtotal(crtc_state);
}
void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state)
@@ -297,22 +301,11 @@ void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state)
return;
intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
- intel_vrr_fixed_rr_vmin(crtc_state) - 1);
+ intel_vrr_fixed_rr_hw_vmin(crtc_state) - 1);
intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
- intel_vrr_fixed_rr_vmax(crtc_state) - 1);
+ intel_vrr_fixed_rr_hw_vmax(crtc_state) - 1);
intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
- intel_vrr_fixed_rr_flipline(crtc_state) - 1);
-}
-
-static
-void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state)
-{
- /*
- * For fixed rr, vmin = vmax = flipline.
- * vmin is already set to crtc_vtotal set vmax and flipline the same.
- */
- crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal;
- crtc_state->vrr.flipline = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ intel_vrr_fixed_rr_hw_flipline(crtc_state) - 1);
}
static
@@ -384,60 +377,131 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
vmax = vmin;
}
- crtc_state->vrr.vmin = vmin;
- crtc_state->vrr.vmax = vmax;
-
- crtc_state->vrr.flipline = crtc_state->vrr.vmin;
-
if (crtc_state->uapi.vrr_enabled && vmin < vmax)
- intel_vrr_compute_vrr_timings(crtc_state);
+ intel_vrr_compute_vrr_timings(crtc_state, vmin, vmax);
else if (is_cmrr_frac_required(crtc_state) && is_edp)
intel_vrr_compute_cmrr_timings(crtc_state);
else
intel_vrr_compute_fixed_rr_timings(crtc_state);
- /*
- * flipline determines the min vblank length the hardware will
- * generate, and on ICL/TGL flipline>=vmin+1, hence we reduce
- * vmin by one to make sure we can get the actual min vblank length.
- */
- crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display);
-
if (HAS_AS_SDP(display)) {
crtc_state->vrr.vsync_start =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
- crtc_state->hw.adjusted_mode.vsync_start);
+ crtc_state->hw.adjusted_mode.crtc_vsync_start);
crtc_state->vrr.vsync_end =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
- crtc_state->hw.adjusted_mode.vsync_end);
+ crtc_state->hw.adjusted_mode.crtc_vsync_end);
}
}
-void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state)
+static int
+intel_vrr_max_hw_guardband(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int max_pipeline_full = REG_FIELD_MAX(VRR_CTL_PIPELINE_FULL_MASK);
+
+ if (DISPLAY_VER(display) >= 13)
+ return REG_FIELD_MAX(XELPD_VRR_CTL_VRR_GUARDBAND_MASK);
+ else
+ return intel_vrr_pipeline_full_to_guardband(crtc_state,
+ max_pipeline_full);
+}
+
+static int
+intel_vrr_max_vblank_guardband(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ return crtc_state->vrr.vmin -
+ adjusted_mode->crtc_vdisplay -
+ crtc_state->set_context_latency -
+ intel_vrr_extra_vblank_delay(display);
+}
+
+static int
+intel_vrr_max_guardband(struct intel_crtc_state *crtc_state)
+{
+ return min(intel_vrr_max_hw_guardband(crtc_state),
+ intel_vrr_max_vblank_guardband(crtc_state));
+}
+
+static
+int intel_vrr_compute_optimized_guardband(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct skl_prefill_ctx prefill_ctx;
+ int prefill_latency_us;
+ int guardband = 0;
+
+ skl_prefill_init_worst(&prefill_ctx, crtc_state);
+
+ /*
+ * The SoC power controller runs SAGV mutually exclusive with package C states,
+ * so the max of package C and SAGV latencies is used to compute the min prefill guardband.
+ * PM delay = max(sagv_latency, pkgc_max_latency (highest enabled wm level 1 and up))
+ */
+ prefill_latency_us = max(display->sagv.block_time_us,
+ skl_watermark_max_latency(display, 1));
+
+ guardband = skl_prefill_min_guardband(&prefill_ctx,
+ crtc_state,
+ prefill_latency_us);
+
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ guardband = max(guardband, intel_psr_min_guardband(crtc_state));
+ guardband = max(guardband, intel_dp_sdp_min_guardband(crtc_state, true));
+ }
+
+ return guardband;
+}
+
+static bool intel_vrr_use_optimized_guardband(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * #TODO: Enable optimized guardband for HDMI
+ * For HDMI lot of infoframes are transmitted a line or two after vsync.
+ * Since with optimized guardband the double bufferring point is at delayed vblank,
+ * we need to ensure that vsync happens after delayed vblank for the HDMI case.
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return false;
+
+ return true;
+}
+
+void intel_vrr_compute_guardband(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int guardband;
+
if (!intel_vrr_possible(crtc_state))
return;
- if (DISPLAY_VER(display) >= 13) {
- crtc_state->vrr.guardband =
- crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start;
- } else {
- /* hardware imposes one extra scanline somewhere */
- crtc_state->vrr.pipeline_full =
- min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start -
- crtc_state->framestart_delay - 1);
+ if (intel_vrr_use_optimized_guardband(crtc_state))
+ guardband = intel_vrr_compute_optimized_guardband(crtc_state);
+ else
+ guardband = crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay;
+
+ crtc_state->vrr.guardband = min(guardband, intel_vrr_max_guardband(crtc_state));
+ if (intel_vrr_always_use_vrr_tg(display)) {
+ adjusted_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vtotal - crtc_state->vrr.guardband;
/*
- * vmin/vmax/flipline also need to be adjusted by
- * the vblank delay to maintain correct vtotals.
+ * pipe_mode has already been derived from the
+ * original adjusted_mode, keep the two in sync.
*/
- crtc_state->vrr.vmin -= intel_vrr_real_vblank_delay(crtc_state);
- crtc_state->vrr.vmax -= intel_vrr_real_vblank_delay(crtc_state);
- crtc_state->vrr.flipline -= intel_vrr_real_vblank_delay(crtc_state);
+ pipe_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vblank_start;
}
+
+ if (DISPLAY_VER(display) < 13)
+ crtc_state->vrr.pipeline_full =
+ intel_vrr_guardband_to_pipeline_full(crtc_state,
+ crtc_state->vrr.guardband);
}
static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
@@ -461,6 +525,9 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ if (!HAS_VRR(display))
+ return;
+
/*
* This bit seems to have two meanings depending on the platform:
* TGL: generate VRR "safe window" for DSB vblank waits
@@ -489,7 +556,7 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
intel_vrr_set_fixed_rr_timings(crtc_state);
- if (!intel_vrr_always_use_vrr_tg(display) && !crtc_state->vrr.enable)
+ if (!intel_vrr_always_use_vrr_tg(display))
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
trans_vrr_ctl(crtc_state));
@@ -498,6 +565,18 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
TRANS_VRR_VSYNC(display, cpu_transcoder),
VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
VRR_VSYNC_START(crtc_state->vrr.vsync_start));
+
+ /*
+ * For BMG and LNL+ onwards the EMP_AS_SDP_TL is used for programming
+ * double buffering point and transmission line for VRR packets for
+ * HDMI2.1/DP/eDP/DP->HDMI2.1 PCON.
+ * Since currently we support VRR only for DP/eDP, so this is programmed
+ * to for Adaptive Sync SDP to Vsync start.
+ */
+ if (DISPLAY_VERx100(display) == 1401 || DISPLAY_VER(display) >= 20)
+ intel_de_write(display,
+ EMP_AS_SDP_TL(display, cpu_transcoder),
+ EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start));
}
void intel_vrr_send_push(struct intel_dsb *dsb,
@@ -576,126 +655,128 @@ bool intel_vrr_always_use_vrr_tg(struct intel_display *display)
return false;
}
-static
-void intel_vrr_set_db_point_and_transmission_line(const struct intel_crtc_state *crtc_state)
+static int intel_vrr_hw_vmin(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- /*
- * For BMG and LNL+ onwards the EMP_AS_SDP_TL is used for programming
- * double buffering point and transmission line for VRR packets for
- * HDMI2.1/DP/eDP/DP->HDMI2.1 PCON.
- * Since currently we support VRR only for DP/eDP, so this is programmed
- * to for Adaptive Sync SDP to Vsync start.
- */
- if (DISPLAY_VERx100(display) == 1401 || DISPLAY_VER(display) >= 20)
- intel_de_write(display,
- EMP_AS_SDP_TL(display, cpu_transcoder),
- EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start));
+ return intel_vrr_hw_value(crtc_state, crtc_state->vrr.vmin) -
+ intel_vrr_vmin_flipline_offset(display);
}
-void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
+static int intel_vrr_hw_vmax(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_hw_value(crtc_state, crtc_state->vrr.vmax);
+}
+
+static int intel_vrr_hw_flipline(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_hw_value(crtc_state, crtc_state->vrr.flipline);
+}
+
+static void intel_vrr_set_vrr_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!crtc_state->vrr.enable)
- return;
-
intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
- crtc_state->vrr.vmin - 1);
+ intel_vrr_hw_vmin(crtc_state) - 1);
intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
- crtc_state->vrr.vmax - 1);
+ intel_vrr_hw_vmax(crtc_state) - 1);
intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
- crtc_state->vrr.flipline - 1);
+ intel_vrr_hw_flipline(crtc_state) - 1);
+}
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
- TRANS_PUSH_EN);
+static void intel_vrr_tg_enable(const struct intel_crtc_state *crtc_state,
+ bool cmrr_enable)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 vrr_ctl;
- if (!intel_vrr_always_use_vrr_tg(display)) {
- intel_vrr_set_db_point_and_transmission_line(crtc_state);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), TRANS_PUSH_EN);
- if (crtc_state->cmrr.enable) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
- trans_vrr_ctl(crtc_state));
- } else {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
- }
- }
+ vrr_ctl = VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state);
+
+ /*
+ * FIXME this might be broken as bspec seems to imply that
+ * even VRR_CTL_CMRR_ENABLE is armed by TRANS_CMRR_N_HI
+ * when enabling CMRR (but not when disabling CMRR?).
+ */
+ if (cmrr_enable)
+ vrr_ctl |= VRR_CTL_CMRR_ENABLE;
+
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), vrr_ctl);
}
-void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+static void intel_vrr_tg_disable(const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(old_crtc_state);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- if (!old_crtc_state->vrr.enable)
- return;
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(old_crtc_state));
- if (!intel_vrr_always_use_vrr_tg(display)) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(old_crtc_state));
- intel_de_wait_for_clear(display,
- TRANS_VRR_STATUS(display, cpu_transcoder),
- VRR_STATUS_VRR_EN_LIVE, 1000);
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
- }
+ if (intel_de_wait_for_clear(display,
+ TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_STATUS_VRR_EN_LIVE, 1000))
+ drm_err(display->drm, "Timed out waiting for VRR live status to clear\n");
- intel_vrr_set_fixed_rr_timings(old_crtc_state);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
}
-void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
+void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!HAS_VRR(display))
+ if (!crtc_state->vrr.enable)
return;
- if (!intel_vrr_possible(crtc_state))
- return;
+ intel_vrr_set_vrr_timings(crtc_state);
- if (!intel_vrr_always_use_vrr_tg(display)) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(crtc_state));
- return;
- }
+ if (!intel_vrr_always_use_vrr_tg(display))
+ intel_vrr_tg_enable(crtc_state, crtc_state->cmrr.enable);
+}
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
- TRANS_PUSH_EN);
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
- intel_vrr_set_db_point_and_transmission_line(crtc_state);
+ if (!old_crtc_state->vrr.enable)
+ return;
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ if (!intel_vrr_always_use_vrr_tg(display))
+ intel_vrr_tg_disable(old_crtc_state);
+
+ intel_vrr_set_fixed_rr_timings(old_crtc_state);
}
-void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state)
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-
- if (!HAS_VRR(display))
- return;
if (!intel_vrr_possible(crtc_state))
return;
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), 0);
+ if (intel_vrr_always_use_vrr_tg(display))
+ intel_vrr_tg_enable(crtc_state, false);
+}
- intel_de_wait_for_clear(display, TRANS_VRR_STATUS(display, cpu_transcoder),
- VRR_STATUS_VRR_EN_LIVE, 1000);
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
+
+ if (!intel_vrr_possible(old_crtc_state))
+ return;
+
+ if (intel_vrr_always_use_vrr_tg(display))
+ intel_vrr_tg_disable(old_crtc_state);
}
bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state)
{
return crtc_state->vrr.flipline &&
crtc_state->vrr.flipline == crtc_state->vrr.vmax &&
- crtc_state->vrr.flipline == intel_vrr_vmin_flipline(crtc_state);
+ crtc_state->vrr.flipline == crtc_state->vrr.vmin;
}
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
@@ -720,14 +801,20 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
TRANS_CMRR_M_HI(display, cpu_transcoder));
}
- if (DISPLAY_VER(display) >= 13)
+ if (DISPLAY_VER(display) >= 13) {
crtc_state->vrr.guardband =
REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl);
- else
- if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
+ } else {
+ if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE) {
crtc_state->vrr.pipeline_full =
REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
+ crtc_state->vrr.guardband =
+ intel_vrr_pipeline_full_to_guardband(crtc_state,
+ crtc_state->vrr.pipeline_full);
+ }
+ }
+
if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) {
crtc_state->vrr.flipline = intel_de_read(display,
TRANS_VRR_FLIPLINE(display, cpu_transcoder)) + 1;
@@ -736,6 +823,15 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmin = intel_de_read(display,
TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
+ if (DISPLAY_VER(display) < 13) {
+ /* undo what intel_vrr_hw_value() does when writing the values */
+ crtc_state->vrr.flipline += crtc_state->set_context_latency;
+ crtc_state->vrr.vmax += crtc_state->set_context_latency;
+ crtc_state->vrr.vmin += crtc_state->set_context_latency;
+
+ crtc_state->vrr.vmin += intel_vrr_vmin_flipline_offset(display);
+ }
+
/*
* For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
* bits are not filled. Since for these platforms TRAN_VMIN is always
@@ -771,4 +867,34 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
*/
if (crtc_state->vrr.enable)
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+
+ /*
+ * For platforms that always use the VRR timing generator, we overwrite
+ * crtc_vblank_start with vtotal - guardband to reflect the delayed
+ * vblank start. This works for both default and optimized guardband values.
+ * On other platforms, we keep the original value from
+ * intel_get_transcoder_timings() and apply adjustments only in VRR-specific
+ * paths as needed.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->hw.adjusted_mode.crtc_vblank_start =
+ crtc_state->hw.adjusted_mode.crtc_vtotal -
+ crtc_state->vrr.guardband;
+}
+
+int intel_vrr_safe_window_start(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) >= 30)
+ return crtc_state->hw.adjusted_mode.crtc_vdisplay -
+ crtc_state->set_context_latency;
+ else
+ return crtc_state->hw.adjusted_mode.crtc_vdisplay;
+}
+
+int intel_vrr_vmin_safe_window_end(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_vmin_vblank_start(crtc_state) -
+ crtc_state->set_context_latency;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 38bf9996b883..bc9044621635 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -21,7 +21,7 @@ bool intel_vrr_possible(const struct intel_crtc_state *crtc_state);
void intel_vrr_check_modeset(struct intel_atomic_state *state);
void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
-void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state);
+void intel_vrr_compute_guardband(struct intel_crtc_state *crtc_state);
void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
void intel_vrr_enable(const struct intel_crtc_state *crtc_state);
void intel_vrr_send_push(struct intel_dsb *dsb,
@@ -35,11 +35,12 @@ int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
-int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state);
bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state);
void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state);
void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state);
void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state);
bool intel_vrr_always_use_vrr_tg(struct intel_display *display);
+int intel_vrr_safe_window_start(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vmin_safe_window_end(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_prefill.c b/drivers/gpu/drm/i915/display/skl_prefill.c
new file mode 100644
index 000000000000..4707c2e7127a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_prefill.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include <drm/drm_print.h>
+
+#include "intel_cdclk.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "intel_vblank.h"
+#include "intel_vdsc.h"
+#include "skl_prefill.h"
+#include "skl_scaler.h"
+#include "skl_watermark.h"
+
+static unsigned int prefill_usecs_to_lines(const struct intel_crtc_state *crtc_state,
+ unsigned int usecs)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pipe_mode->crtc_clock, usecs << 16),
+ pipe_mode->crtc_htotal * 1000);
+}
+
+static void prefill_init(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->prefill.fixed = crtc_state->framestart_delay << 16;
+
+ /* 20 usec for translation walks/etc. */
+ ctx->prefill.fixed += prefill_usecs_to_lines(crtc_state, 20);
+
+ ctx->prefill.dsc = intel_vdsc_prefill_lines(crtc_state);
+}
+
+static void prefill_init_nocdclk_worst(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ prefill_init(ctx, crtc_state);
+
+ ctx->prefill.wm0 = skl_wm0_prefill_lines_worst(crtc_state);
+ ctx->prefill.scaler_1st = skl_scaler_1st_prefill_lines_worst(crtc_state);
+ ctx->prefill.scaler_2nd = skl_scaler_2nd_prefill_lines_worst(crtc_state);
+
+ ctx->adj.scaler_1st = skl_scaler_1st_prefill_adjustment_worst(crtc_state);
+ ctx->adj.scaler_2nd = skl_scaler_2nd_prefill_adjustment_worst(crtc_state);
+}
+
+static void prefill_init_nocdclk(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ prefill_init(ctx, crtc_state);
+
+ ctx->prefill.wm0 = skl_wm0_prefill_lines(crtc_state);
+ ctx->prefill.scaler_1st = skl_scaler_1st_prefill_lines(crtc_state);
+ ctx->prefill.scaler_2nd = skl_scaler_2nd_prefill_lines(crtc_state);
+
+ ctx->adj.scaler_1st = skl_scaler_1st_prefill_adjustment(crtc_state);
+ ctx->adj.scaler_2nd = skl_scaler_2nd_prefill_adjustment(crtc_state);
+}
+
+static unsigned int prefill_adjust(unsigned int value, unsigned int factor)
+{
+ return DIV_ROUND_UP_ULL(mul_u32_u32(value, factor), 0x10000);
+}
+
+static unsigned int prefill_lines_nocdclk(const struct skl_prefill_ctx *ctx)
+{
+ unsigned int prefill = 0;
+
+ prefill += ctx->prefill.dsc;
+ prefill = prefill_adjust(prefill, ctx->adj.scaler_2nd);
+
+ prefill += ctx->prefill.scaler_2nd;
+ prefill = prefill_adjust(prefill, ctx->adj.scaler_1st);
+
+ prefill += ctx->prefill.scaler_1st;
+ prefill += ctx->prefill.wm0;
+
+ return prefill;
+}
+
+static unsigned int prefill_lines_cdclk(const struct skl_prefill_ctx *ctx)
+{
+ return prefill_adjust(prefill_lines_nocdclk(ctx), ctx->adj.cdclk);
+}
+
+static unsigned int prefill_lines_full(const struct skl_prefill_ctx *ctx)
+{
+ return ctx->prefill.fixed + prefill_lines_cdclk(ctx);
+}
+
+void skl_prefill_init_worst(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ prefill_init_nocdclk_worst(ctx, crtc_state);
+
+ ctx->adj.cdclk = intel_cdclk_prefill_adjustment_worst(crtc_state);
+
+ ctx->prefill.full = prefill_lines_full(ctx);
+}
+
+void skl_prefill_init(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ prefill_init_nocdclk(ctx, crtc_state);
+
+ ctx->adj.cdclk = intel_cdclk_prefill_adjustment(crtc_state);
+
+ ctx->prefill.full = prefill_lines_full(ctx);
+}
+
+static unsigned int prefill_lines_with_latency(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us)
+{
+ return ctx->prefill.full + prefill_usecs_to_lines(crtc_state, latency_us);
+}
+
+int skl_prefill_min_guardband(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us)
+{
+ unsigned int prefill = prefill_lines_with_latency(ctx, crtc_state, latency_us);
+
+ return DIV_ROUND_UP(prefill, 0x10000);
+}
+
+static unsigned int prefill_guardband(const struct intel_crtc_state *crtc_state)
+{
+ return intel_crtc_vblank_length(crtc_state) << 16;
+}
+
+bool skl_prefill_vblank_too_short(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us)
+{
+ unsigned int guardband = prefill_guardband(crtc_state);
+ unsigned int prefill = prefill_lines_with_latency(ctx, crtc_state, latency_us);
+
+ return guardband < prefill;
+}
+
+int skl_prefill_min_cdclk(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ unsigned int prefill_unadjusted = prefill_lines_nocdclk(ctx);
+ unsigned int prefill_available = prefill_guardband(crtc_state) - ctx->prefill.fixed;
+
+ return intel_cdclk_min_cdclk_for_prefill(crtc_state, prefill_unadjusted,
+ prefill_available);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_prefill.h b/drivers/gpu/drm/i915/display/skl_prefill.h
new file mode 100644
index 000000000000..028ee19b64ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_prefill.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __SKL_PREFILL_H__
+#define __SKL_PREFILL_H__
+
+#include <linux/types.h>
+
+struct intel_crtc_state;
+
+struct skl_prefill_ctx {
+ /* .16 scanlines */
+ struct {
+ unsigned int fixed;
+ unsigned int wm0;
+ unsigned int scaler_1st;
+ unsigned int scaler_2nd;
+ unsigned int dsc;
+ unsigned int full;
+ } prefill;
+
+ /* .16 adjustment factors */
+ struct {
+ unsigned int cdclk;
+ unsigned int scaler_1st;
+ unsigned int scaler_2nd;
+ } adj;
+};
+
+void skl_prefill_init_worst(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state);
+void skl_prefill_init(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state);
+
+bool skl_prefill_vblank_too_short(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us);
+int skl_prefill_min_guardband(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us);
+int skl_prefill_min_cdclk(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state);
+
+#endif /* __SKL_PREFILL_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index c6cccf170ff1..4c4deac7f9c8 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -5,11 +5,13 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
+#include "intel_casf.h"
+#include "intel_casf_regs.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_display_wa.h"
#include "intel_fb.h"
#include "skl_scaler.h"
@@ -282,7 +284,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
drm_rect_width(&crtc_state->pipe_src),
drm_rect_height(&crtc_state->pipe_src),
width, height, NULL, 0,
- crtc_state->pch_pfit.enabled);
+ crtc_state->pch_pfit.enabled ||
+ intel_casf_needs_scaler(crtc_state));
}
/**
@@ -321,7 +324,9 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
}
static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state,
- struct intel_crtc *crtc)
+ struct intel_crtc *crtc,
+ struct intel_plane_state *plane_state,
+ bool casf_scaler)
{
int i;
@@ -329,6 +334,10 @@ static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state,
if (scaler_state->scalers[i].in_use)
continue;
+ /* CASF needs second scaler */
+ if (!plane_state && casf_scaler && i != 1)
+ continue;
+
scaler_state->scalers[i].in_use = true;
return i;
@@ -379,7 +388,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_state *crtc_state,
int num_scalers_need, struct intel_crtc *crtc,
const char *name, int idx,
struct intel_plane_state *plane_state,
- int *scaler_id)
+ int *scaler_id, bool casf_scaler)
{
struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
@@ -388,7 +397,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_state *crtc_state,
int vscale = 0;
if (*scaler_id < 0)
- *scaler_id = intel_allocate_scaler(scaler_state, crtc);
+ *scaler_id = intel_allocate_scaler(scaler_state, crtc, plane_state, casf_scaler);
if (drm_WARN(display->drm, *scaler_id < 0,
"Cannot find scaler for %s:%d\n", name, idx))
@@ -520,10 +529,14 @@ static int setup_crtc_scaler(struct intel_atomic_state *state,
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
+ if (intel_casf_needs_scaler(crtc_state) && crtc_state->pch_pfit.enabled)
+ return -EINVAL;
+
return intel_atomic_setup_scaler(crtc_state,
hweight32(scaler_state->scaler_users),
crtc, "CRTC", crtc->base.base.id,
- NULL, &scaler_state->scaler_id);
+ NULL, &scaler_state->scaler_id,
+ intel_casf_needs_scaler(crtc_state));
}
static int setup_plane_scaler(struct intel_atomic_state *state,
@@ -558,7 +571,8 @@ static int setup_plane_scaler(struct intel_atomic_state *state,
return intel_atomic_setup_scaler(crtc_state,
hweight32(scaler_state->scaler_users),
crtc, "PLANE", plane->base.base.id,
- plane_state, &plane_state->scaler_id);
+ plane_state, &plane_state->scaler_id,
+ false);
}
/**
@@ -738,6 +752,52 @@ static void skl_scaler_setup_filter(struct intel_display *display,
}
}
+#define CASF_SCALER_FILTER_SELECT \
+ (PS_FILTER_PROGRAMMED | \
+ PS_Y_VERT_FILTER_SELECT(0) | \
+ PS_Y_HORZ_FILTER_SELECT(0) | \
+ PS_UV_VERT_FILTER_SELECT(0) | \
+ PS_UV_HORZ_FILTER_SELECT(0))
+
+void skl_scaler_setup_casf(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct drm_rect src, dest;
+ int id, width, height;
+ int x = 0, y = 0;
+ enum pipe pipe = crtc->pipe;
+ u32 ps_ctrl;
+
+ width = adjusted_mode->crtc_hdisplay;
+ height = adjusted_mode->crtc_vdisplay;
+
+ drm_rect_init(&dest, x, y, width, height);
+
+ width = drm_rect_width(&dest);
+ height = drm_rect_height(&dest);
+ id = scaler_state->scaler_id;
+
+ drm_rect_init(&src, 0, 0,
+ drm_rect_width(&crtc_state->pipe_src) << 16,
+ drm_rect_height(&crtc_state->pipe_src) << 16);
+
+ trace_intel_pipe_scaler_update_arm(crtc, id, x, y, width, height);
+
+ ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode |
+ CASF_SCALER_FILTER_SELECT;
+
+ intel_de_write_fw(display, SKL_PS_CTRL(pipe, id), ps_ctrl);
+ intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, id),
+ PS_WIN_XPOS(x) | PS_WIN_YPOS(y));
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, id),
+ PS_WIN_XSIZE(width) | PS_WIN_YSIZE(height));
+}
+
void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -921,16 +981,23 @@ void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
continue;
id = i;
- crtc_state->pch_pfit.enabled = true;
+
+ /* Read CASF regs for second scaler */
+ if (HAS_CASF(display) && id == 1)
+ intel_casf_sharpness_get_config(crtc_state);
+
+ if (!crtc_state->hw.casf_params.casf_enable)
+ crtc_state->pch_pfit.enabled = true;
pos = intel_de_read(display, SKL_PS_WIN_POS(crtc->pipe, i));
size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, i));
- drm_rect_init(&crtc_state->pch_pfit.dst,
- REG_FIELD_GET(PS_WIN_XPOS_MASK, pos),
- REG_FIELD_GET(PS_WIN_YPOS_MASK, pos),
- REG_FIELD_GET(PS_WIN_XSIZE_MASK, size),
- REG_FIELD_GET(PS_WIN_YSIZE_MASK, size));
+ if (!crtc_state->hw.casf_params.casf_enable)
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ REG_FIELD_GET(PS_WIN_XPOS_MASK, pos),
+ REG_FIELD_GET(PS_WIN_YPOS_MASK, pos),
+ REG_FIELD_GET(PS_WIN_XSIZE_MASK, size),
+ REG_FIELD_GET(PS_WIN_YSIZE_MASK, size));
scaler_state->scalers[i].in_use = true;
break;
@@ -968,3 +1035,144 @@ void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state)
1);
intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, 0);
}
+
+unsigned int skl_scaler_1st_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * FIXME don't have scalers assigned yet
+ * so can't look up the scale factors
+ */
+ return 0x10000;
+}
+
+unsigned int skl_scaler_2nd_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * FIXME don't have scalers assigned yet
+ * so can't look up the scale factors
+ */
+ return 0x10000;
+}
+
+unsigned int skl_scaler_1st_prefill_lines(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int num_scalers = hweight32(scaler_state->scaler_users);
+
+ if (num_scalers > 0)
+ return 4 << 16;
+
+ return 0;
+}
+
+unsigned int skl_scaler_2nd_prefill_lines(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int num_scalers = hweight32(scaler_state->scaler_users);
+
+ if (num_scalers > 1 && crtc_state->pch_pfit.enabled)
+ return 4 << 16;
+
+ return 0;
+}
+
+static unsigned int _skl_scaler_max_scale(const struct intel_crtc_state *crtc_state,
+ unsigned int max_scale)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ /*
+ * Downscaling requires increasing cdclk, so max scale
+ * factor is limited to the max_dotclock/dotclock ratio.
+ *
+ * FIXME find out the max downscale factors properly
+ */
+ return min(max_scale, DIV_ROUND_UP_ULL((u64)display->cdclk.max_dotclk_freq << 16,
+ crtc_state->hw.pipe_mode.crtc_clock));
+}
+
+unsigned int skl_scaler_max_total_scale(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int max_scale;
+
+ if (crtc->num_scalers < 1)
+ return 0x10000;
+
+ /* FIXME find out the max downscale factors properly */
+ max_scale = 9 << 16;
+ if (crtc->num_scalers > 1)
+ max_scale *= 9;
+
+ return _skl_scaler_max_scale(crtc_state, max_scale);
+}
+
+unsigned int skl_scaler_max_hscale(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int max_scale;
+
+ if (crtc->num_scalers < 1)
+ return 0x10000;
+
+ /* FIXME find out the max downscale factors properly */
+ max_scale = 3 << 16;
+
+ return _skl_scaler_max_scale(crtc_state, max_scale);
+}
+
+unsigned int skl_scaler_max_scale(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int max_scale;
+
+ if (crtc->num_scalers < 1)
+ return 0x10000;
+
+ /* FIXME find out the max downscale factors properly */
+ max_scale = 9 << 16;
+
+ return _skl_scaler_max_scale(crtc_state, max_scale);
+}
+
+unsigned int skl_scaler_1st_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc->num_scalers > 0)
+ return skl_scaler_max_scale(crtc_state);
+ else
+ return 0x10000;
+}
+
+unsigned int skl_scaler_2nd_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc->num_scalers > 1)
+ return skl_scaler_max_scale(crtc_state);
+ else
+ return 0x10000;
+}
+
+unsigned int skl_scaler_1st_prefill_lines_worst(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc->num_scalers > 0)
+ return 4 << 16;
+ else
+ return 0;
+}
+
+unsigned int skl_scaler_2nd_prefill_lines_worst(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc->num_scalers > 1)
+ return 4 << 16;
+ else
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
index 12a19016c5f6..7e8d819c019d 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.h
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -36,6 +36,8 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
void skl_scaler_get_config(struct intel_crtc_state *crtc_state);
+void skl_scaler_setup_casf(struct intel_crtc_state *crtc_state);
+
enum drm_mode_status
skl_scaler_mode_valid(struct intel_display *display,
const struct drm_display_mode *mode,
@@ -45,4 +47,19 @@ skl_scaler_mode_valid(struct intel_display *display,
void adl_scaler_ecc_mask(const struct intel_crtc_state *crtc_state);
void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state);
+
+unsigned int skl_scaler_max_total_scale(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_max_scale(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_max_hscale(const struct intel_crtc_state *crtc_state);
+
+unsigned int skl_scaler_1st_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_2nd_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_1st_prefill_lines_worst(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_2nd_prefill_lines_worst(const struct intel_crtc_state *crtc_state);
+
+unsigned int skl_scaler_1st_prefill_adjustment(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_2nd_prefill_adjustment(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_1st_prefill_lines(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_2nd_prefill_lines(const struct intel_crtc_state *crtc_state);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index e13fb781e7b2..ba1bf0bd4c55 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -7,15 +7,15 @@
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include "pxp/intel_pxp.h"
-#include "i915_drv.h"
-#include "i915_utils.h"
#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fbc.h"
@@ -24,6 +24,7 @@
#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#include "intel_step.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_universal_plane_regs.h"
@@ -389,44 +390,19 @@ static int glk_plane_max_width(const struct drm_framebuffer *fb,
}
}
+static int adl_plane_min_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 16 / fb->format->cpp[color_plane];
+}
+
static int icl_plane_min_width(const struct drm_framebuffer *fb,
int color_plane,
unsigned int rotation)
{
/* Wa_14011264657, Wa_14011050563: gen11+ */
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- return 18;
- case DRM_FORMAT_RGB565:
- return 10;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XVYU2101010:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- return 6;
- case DRM_FORMAT_NV12:
- return 20;
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- return 12;
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ARGB16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- return 4;
- default:
- return 1;
- }
+ return 16 / fb->format->cpp[color_plane] + 2;
}
static int xe3_plane_max_width(const struct drm_framebuffer *fb,
@@ -463,6 +439,23 @@ static int skl_plane_max_height(const struct drm_framebuffer *fb,
return 4096;
}
+static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe)
+{
+ return pipe - PIPE_A + INTEL_FBC_A;
+}
+
+static bool skl_plane_has_fbc(struct intel_display *display,
+ enum intel_fbc_id fbc_id, enum plane_id plane_id)
+{
+ if ((DISPLAY_RUNTIME_INFO(display)->fbc_mask & BIT(fbc_id)) == 0)
+ return false;
+
+ if (DISPLAY_VER(display) >= 20)
+ return icl_is_hdr_plane(display, plane_id);
+ else
+ return plane_id == PLANE_1;
+}
+
static int icl_plane_max_height(const struct drm_framebuffer *fb,
int color_plane,
unsigned int rotation)
@@ -898,6 +891,25 @@ static void icl_plane_disable_sel_fetch_arm(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0);
}
+static void x3p_lpd_plane_update_pixel_normalizer(struct intel_dsb *dsb,
+ struct intel_plane *plane,
+ bool enable)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(plane->pipe);
+ u32 val;
+
+ /* Only HDR planes have pixel normalizer and don't matter if no FBC */
+ if (!skl_plane_has_fbc(display, fbc_id, plane->id))
+ return;
+
+ val = enable ? PLANE_PIXEL_NORMALIZE_NORM_FACTOR(PLANE_PIXEL_NORMALIZE_NORM_FACTOR_1_0) |
+ PLANE_PIXEL_NORMALIZE_ENABLE : 0;
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_PIXEL_NORMALIZE(plane->pipe, plane->id), val);
+}
+
static void
icl_plane_disable_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
@@ -913,6 +925,10 @@ icl_plane_disable_arm(struct intel_dsb *dsb,
skl_write_plane_wm(dsb, plane, crtc_state);
icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state);
+
+ if (DISPLAY_VER(display) >= 35)
+ x3p_lpd_plane_update_pixel_normalizer(dsb, plane, false);
+
intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0);
}
@@ -1572,7 +1588,7 @@ icl_plane_update_noarm(struct intel_dsb *dsb,
}
/* FLAT CCS doesn't need to program AUX_DIST */
- if (!HAS_FLAT_CCS(to_i915(display->drm)) && DISPLAY_VER(display) < 20)
+ if (HAS_AUX_CCS(display))
intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id),
skl_plane_aux_dist(plane_state, color_plane));
@@ -1643,6 +1659,14 @@ icl_plane_update_arm(struct intel_dsb *dsb,
icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
/*
+ * In order to have FBC for fp16 formats pixel normalizer block must be
+ * active. Check if pixel normalizer block need to be enabled for FBC.
+ * If needed, use normalization factor as 1.0 and enable the block.
+ */
+ if (intel_fbc_is_enable_pixel_normalizer(plane_state))
+ x3p_lpd_plane_update_pixel_normalizer(dsb, plane, true);
+
+ /*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
@@ -1780,8 +1804,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
}
/* Y-tiling is not supported in IF-ID Interlace mode */
- if (crtc_state->hw.enable &&
- crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
fb->modifier != DRM_FORMAT_MOD_LINEAR &&
fb->modifier != I915_FORMAT_MOD_X_TILED) {
drm_dbg_kms(display->drm,
@@ -1884,6 +1907,14 @@ static int intel_plane_min_width(struct intel_plane *plane,
return 1;
}
+static int intel_plane_min_height(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 1;
+}
+
static int intel_plane_max_width(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane,
@@ -2015,6 +2046,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
int w = drm_rect_width(&plane_state->uapi.src) >> 16;
int h = drm_rect_height(&plane_state->uapi.src) >> 16;
int min_width = intel_plane_min_width(plane, fb, 0, rotation);
+ int min_height = intel_plane_min_height(plane, fb, 0, rotation);
int max_width = intel_plane_max_width(plane, fb, 0, rotation);
int max_height = intel_plane_max_height(plane, fb, 0, rotation);
unsigned int alignment = plane->min_alignment(plane, fb, 0);
@@ -2022,11 +2054,11 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
u32 offset;
int ret;
- if (w > max_width || w < min_width || h > max_height || h < 1) {
+ if (w > max_width || w < min_width || h > max_height || h < min_height) {
drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ "[PLANE:%d:%s] requested Y/RGB source size %dx%d outside limits (min: %dx%d max: %dx%d)\n",
plane->base.base.id, plane->base.name,
- w, h, min_width, max_width, max_height);
+ w, h, min_width, min_height, max_width, max_height);
return -EINVAL;
}
@@ -2086,6 +2118,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
int uv_plane = 1;
int ccs_plane = intel_fb_is_ccs_modifier(fb->modifier) ?
skl_main_to_aux_plane(fb, uv_plane) : 0;
+ int min_width = intel_plane_min_width(plane, fb, uv_plane, rotation);
+ int min_height = intel_plane_min_height(plane, fb, uv_plane, rotation);
int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation);
int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation);
int x = plane_state->uapi.src.x1 >> 17;
@@ -2095,11 +2129,11 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
u32 offset;
/* FIXME not quite sure how/if these apply to the chroma plane */
- if (w > max_width || h > max_height) {
+ if (w > max_width || w < min_width || h > max_height || h < min_height) {
drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] CbCr source size %dx%d too big (limit %dx%d)\n",
+ "[PLANE:%d:%s] requested CbCr source size %dx%d outside limits (min: %dx%d max: %dx%d)\n",
plane->base.base.id, plane->base.name,
- w, h, max_width, max_height);
+ w, h, min_width, min_height, max_width, max_height);
return -EINVAL;
}
@@ -2404,23 +2438,6 @@ void icl_link_nv12_planes(struct intel_plane_state *uv_plane_state,
}
}
-static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe)
-{
- return pipe - PIPE_A + INTEL_FBC_A;
-}
-
-static bool skl_plane_has_fbc(struct intel_display *display,
- enum intel_fbc_id fbc_id, enum plane_id plane_id)
-{
- if ((DISPLAY_RUNTIME_INFO(display)->fbc_mask & BIT(fbc_id)) == 0)
- return false;
-
- if (DISPLAY_VER(display) >= 20)
- return icl_is_hdr_plane(display, plane_id);
- else
- return plane_id == PLANE_1;
-}
-
static struct intel_fbc *skl_plane_fbc(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2439,13 +2456,10 @@ static bool skl_plane_has_planar(struct intel_display *display,
if (display->platform.skylake || display->platform.broxton)
return false;
- if (DISPLAY_VER(display) == 9 && pipe == PIPE_C)
- return false;
-
- if (plane_id != PLANE_1 && plane_id != PLANE_2)
+ if (pipe == PIPE_C)
return false;
- return true;
+ return plane_id == PLANE_1 || plane_id == PLANE_2;
}
static const u32 *skl_get_plane_formats(struct intel_display *display,
@@ -2461,11 +2475,17 @@ static const u32 *skl_get_plane_formats(struct intel_display *display,
}
}
+static bool glk_plane_has_planar(struct intel_display *display,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ return plane_id == PLANE_1 || plane_id == PLANE_2;
+}
+
static const u32 *glk_get_plane_formats(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id,
int *num_formats)
{
- if (skl_plane_has_planar(display, pipe, plane_id)) {
+ if (glk_plane_has_planar(display, pipe, plane_id)) {
*num_formats = ARRAY_SIZE(glk_planar_formats);
return glk_planar_formats;
} else {
@@ -2705,8 +2725,10 @@ skl_plane_disable_flip_done(struct intel_plane *plane)
static bool skl_plane_has_rc_ccs(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
{
- return pipe != PIPE_C &&
- (plane_id == PLANE_1 || plane_id == PLANE_2);
+ if (pipe == PIPE_C)
+ return false;
+
+ return plane_id == PLANE_1 || plane_id == PLANE_2;
}
static u8 skl_plane_caps(struct intel_display *display,
@@ -2834,11 +2856,15 @@ skl_universal_plane_create(struct intel_display *display,
intel_fbc_add_plane(skl_plane_fbc(display, pipe, plane_id), plane);
if (DISPLAY_VER(display) >= 30) {
+ plane->min_width = adl_plane_min_width;
plane->max_width = xe3_plane_max_width;
plane->max_height = icl_plane_max_height;
plane->min_cdclk = icl_plane_min_cdclk;
} else if (DISPLAY_VER(display) >= 11) {
- plane->min_width = icl_plane_min_width;
+ if (DISPLAY_VER(display) >= 14 || display->platform.alderlake_p)
+ plane->min_width = adl_plane_min_width;
+ else
+ plane->min_width = icl_plane_min_width;
if (icl_is_hdr_plane(display, plane_id))
plane->max_width = icl_hdr_plane_max_width;
else
@@ -2930,7 +2956,7 @@ skl_universal_plane_create(struct intel_display *display,
caps = skl_plane_caps(display, pipe, plane_id);
/* FIXME: xe has problems with AUX */
- if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(to_i915(display->drm)))
+ if (!IS_ENABLED(I915) && HAS_AUX_CCS(display))
caps &= ~(INTEL_PLANE_CAP_CCS_RC |
INTEL_PLANE_CAP_CCS_RC_CC |
INTEL_PLANE_CAP_CCS_MC);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
index ca9fdfbbe57c..7c944d3ca855 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
@@ -455,4 +455,16 @@
_SEL_FETCH_PLANE_OFFSET_5_A, _SEL_FETCH_PLANE_OFFSET_5_B, \
_SEL_FETCH_PLANE_OFFSET_6_A, _SEL_FETCH_PLANE_OFFSET_6_B)
+#define _PLANE_PIXEL_NORMALIZE_1_A 0x701a8
+#define _PLANE_PIXEL_NORMALIZE_2_A 0x702a8
+#define _PLANE_PIXEL_NORMALIZE_1_B 0x711a8
+#define _PLANE_PIXEL_NORMALIZE_2_B 0x712a8
+#define PLANE_PIXEL_NORMALIZE(pipe, plane) _MMIO_SKL_PLANE((pipe), (plane), \
+ _PLANE_PIXEL_NORMALIZE_1_A, _PLANE_PIXEL_NORMALIZE_1_B, \
+ _PLANE_PIXEL_NORMALIZE_2_A, _PLANE_PIXEL_NORMALIZE_2_B)
+#define PLANE_PIXEL_NORMALIZE_ENABLE REG_BIT(31)
+#define PLANE_PIXEL_NORMALIZE_NORM_FACTOR_MASK REG_GENMASK(15, 0)
+#define PLANE_PIXEL_NORMALIZE_NORM_FACTOR(val) REG_FIELD_PREP(PLANE_PIXEL_NORMALIZE_NORM_FACTOR_MASK, (val))
+#define PLANE_PIXEL_NORMALIZE_NORM_FACTOR_1_0 0x3c00
+
#endif /* __SKL_UNIVERSAL_PLANE_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index d74cbb43ae6f..6d050408618c 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -10,7 +10,6 @@
#include "soc/intel_dram.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
@@ -23,12 +22,16 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_fixed.h"
#include "intel_flipq.h"
#include "intel_pcode.h"
#include "intel_plane.h"
+#include "intel_vblank.h"
#include "intel_wm.h"
+#include "skl_prefill.h"
+#include "skl_scaler.h"
#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
@@ -1636,26 +1639,11 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
return ret;
}
-static uint_fixed_16_16_t
-intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
+static int skl_wm_linetime_us(const struct intel_crtc_state *crtc_state,
+ int pixel_rate)
{
- struct intel_display *display = to_intel_display(crtc_state);
- u32 pixel_rate;
- u32 crtc_htotal;
- uint_fixed_16_16_t linetime_us;
-
- if (!crtc_state->hw.active)
- return u32_to_fixed16(0);
-
- pixel_rate = crtc_state->pixel_rate;
-
- if (drm_WARN_ON(display->drm, pixel_rate == 0))
- return u32_to_fixed16(0);
-
- crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
- linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
-
- return linetime_us;
+ return DIV_ROUND_UP(crtc_state->hw.pipe_mode.crtc_htotal * 1000,
+ pixel_rate);
}
static int
@@ -1743,7 +1731,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
wp->plane_blocks_per_line);
- wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
+ wp->linetime_us = skl_wm_linetime_us(crtc_state, plane_pixel_rate);
return 0;
}
@@ -1878,18 +1866,21 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
} else {
blocks++;
}
-
- /*
- * Make sure result blocks for higher latency levels are
- * at least as high as level below the current level.
- * Assumption in DDB algorithm optimization for special
- * cases. Also covers Display WA #1125 for RC.
- */
- if (result_prev->blocks > blocks)
- blocks = result_prev->blocks;
}
}
+ /*
+ * Make sure result blocks for higher latency levels are
+ * at least as high as level below the current level.
+ * Assumption in DDB algorithm optimization for special
+ * cases. Also covers Display WA #1125 for RC.
+ *
+ * Let's always do this as the algorithm can give non
+ * monotonic results on any platform.
+ */
+ blocks = max_t(u32, blocks, result_prev->blocks);
+ lines = max_t(u32, lines, result_prev->lines);
+
if (DISPLAY_VER(display) >= 11) {
if (wp->y_tiled) {
int extra_lines;
@@ -2157,103 +2148,55 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
}
-static int
-cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+unsigned int skl_wm0_prefill_lines_worst(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- const struct intel_cdclk_state *cdclk_state;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state)) {
- drm_WARN_ON(display->drm, PTR_ERR(cdclk_state));
- return 1;
- }
-
- return min(1, DIV_ROUND_UP(crtc_state->pixel_rate,
- 2 * intel_cdclk_logical(cdclk_state)));
-}
-
-static int
-dsc_prefill_latency(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal,
- crtc_state->hw.adjusted_mode.clock);
- int num_scaler_users = hweight32(scaler_state->scaler_users);
- int chroma_downscaling_factor =
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1;
- u32 dsc_prefill_latency = 0;
-
- if (!crtc_state->dsc.compression_enable ||
- !num_scaler_users ||
- num_scaler_users > crtc->num_scalers)
- return dsc_prefill_latency;
-
- dsc_prefill_latency = DIV_ROUND_UP(15 * linetime * chroma_downscaling_factor, 10);
-
- for (int i = 0; i < num_scaler_users; i++) {
- u64 hscale_k, vscale_k;
-
- hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[i].hscale, 1000) >> 16);
- vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[i].vscale, 1000) >> 16);
- dsc_prefill_latency = DIV_ROUND_UP_ULL(dsc_prefill_latency * hscale_k * vscale_k,
- 1000000);
- }
-
- dsc_prefill_latency *= cdclk_prefill_adjustment(crtc_state);
+ struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->primary);
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int ret, pixel_rate, width, level = 0;
+ const struct drm_format_info *info;
+ struct skl_wm_level wm = {};
+ struct skl_wm_params wp;
+ unsigned int latency;
+ u64 modifier;
+ u32 format;
- return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, dsc_prefill_latency);
-}
+ /* only expected to be used for VRR guardband calculation */
+ drm_WARN_ON(display->drm, !HAS_VRR(display));
-static int
-scaler_prefill_latency(const struct intel_crtc_state *crtc_state)
-{
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- int num_scaler_users = hweight32(scaler_state->scaler_users);
- int scaler_prefill_latency = 0;
- int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal,
- crtc_state->hw.adjusted_mode.clock);
+ /* FIXME rather ugly to pick this by hand but maybe no better way? */
+ format = DRM_FORMAT_XBGR16161616F;
+ if (HAS_4TILE(display))
+ modifier = I915_FORMAT_MOD_4_TILED;
+ else
+ modifier = I915_FORMAT_MOD_Y_TILED;
- if (!num_scaler_users)
- return scaler_prefill_latency;
+ info = drm_get_format_info(display->drm, format, modifier);
- scaler_prefill_latency = 4 * linetime;
+ pixel_rate = DIV_ROUND_UP_ULL(mul_u32_u32(skl_scaler_max_total_scale(crtc_state),
+ pipe_mode->crtc_clock),
+ 0x10000);
- if (num_scaler_users > 1) {
- u64 hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].hscale, 1000) >> 16);
- u64 vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].vscale, 1000) >> 16);
- int chroma_downscaling_factor =
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1;
- int latency;
+ /* FIXME limit to max plane width? */
+ width = DIV_ROUND_UP_ULL(mul_u32_u32(skl_scaler_max_hscale(crtc_state),
+ pipe_mode->crtc_hdisplay),
+ 0x10000);
- latency = DIV_ROUND_UP_ULL((4 * linetime * hscale_k * vscale_k *
- chroma_downscaling_factor), 1000000);
- scaler_prefill_latency += latency;
- }
+ /* FIXME is 90/270 rotation worse than 0/180? */
+ ret = skl_compute_wm_params(crtc_state, width, info,
+ modifier, DRM_MODE_ROTATE_0,
+ pixel_rate, &wp, 0, 1);
+ drm_WARN_ON(display->drm, ret);
- scaler_prefill_latency *= cdclk_prefill_adjustment(crtc_state);
+ latency = skl_wm_latency(display, level, &wp);
- return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, scaler_prefill_latency);
-}
+ skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
-static bool
-skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
- int wm0_lines, int latency)
-{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
+ /* FIXME is this sane? */
+ if (wm.min_ddb_alloc == U16_MAX)
+ wm.lines = skl_wm_max_lines(display);
- return crtc_state->framestart_delay +
- intel_usecs_to_scanlines(adjusted_mode, latency) +
- scaler_prefill_latency(crtc_state) +
- dsc_prefill_latency(crtc_state) +
- wm0_lines >
- adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
+ return wm.lines << 16;
}
static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
@@ -2272,15 +2215,21 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
return wm0_lines;
}
+unsigned int skl_wm0_prefill_lines(const struct intel_crtc_state *crtc_state)
+{
+ return skl_max_wm0_lines(crtc_state) << 16;
+}
+
/*
* TODO: In case we use PKG_C_LATENCY to allow C-states when the delayed vblank
* size is too small for the package C exit latency we need to notify PSR about
* the scenario to apply Wa_16025596647.
*/
static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
- int wm0_lines)
+ const struct skl_prefill_ctx *ctx)
{
struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int level;
for (level = display->wm.num_levels - 1; level >= 0; level--) {
@@ -2295,10 +2244,13 @@ static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
if (level == 0)
latency = 0;
- if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency))
+ if (!skl_prefill_vblank_too_short(ctx, crtc_state, latency))
return level;
}
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Not enough time in vblank for prefill\n",
+ crtc->base.base.id, crtc->base.name);
+
return -EINVAL;
}
@@ -2306,14 +2258,15 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- int wm0_lines, level;
+ struct skl_prefill_ctx ctx;
+ int level;
if (!crtc_state->hw.active)
return 0;
- wm0_lines = skl_max_wm0_lines(crtc_state);
+ skl_prefill_init(&ctx, crtc_state);
- level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines);
+ level = skl_max_wm_level_for_vblank(crtc_state, &ctx);
if (level < 0)
return level;
@@ -2323,6 +2276,13 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
*/
crtc_state->wm_level_disabled = level < display->wm.num_levels - 1;
+ /*
+ * TODO: assert that we are in fact using the maximum guardband
+ * if we end up disabling any WM levels here. Otherwise we clearly
+ * failed in using a realistic worst case prefill estimate when
+ * determining the guardband size.
+ */
+
for (level++; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
@@ -2341,8 +2301,8 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(display) >= 12 &&
display->sagv.block_time_us &&
- skl_is_vblank_too_short(crtc_state, wm0_lines,
- display->sagv.block_time_us)) {
+ skl_prefill_vblank_too_short(&ctx, crtc_state,
+ display->sagv.block_time_us)) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -3174,12 +3134,53 @@ void skl_watermark_ipc_init(struct intel_display *display)
skl_watermark_ipc_update(display);
}
-static void
-adjust_wm_latency(struct intel_display *display,
- u16 wm[], int num_levels, int read_latency)
+static void multiply_wm_latency(struct intel_display *display, int mult)
+{
+ u16 *wm = display->wm.skl_latency;
+ int level, num_levels = display->wm.num_levels;
+
+ for (level = 0; level < num_levels; level++)
+ wm[level] *= mult;
+}
+
+static void increase_wm_latency(struct intel_display *display, int inc)
+{
+ u16 *wm = display->wm.skl_latency;
+ int level, num_levels = display->wm.num_levels;
+
+ wm[0] += inc;
+
+ for (level = 1; level < num_levels; level++) {
+ if (wm[level] == 0)
+ break;
+
+ wm[level] += inc;
+ }
+}
+
+static bool need_16gb_dimm_wa(struct intel_display *display)
{
const struct dram_info *dram_info = intel_dram_info(display->drm);
- int i, level;
+
+ return (display->platform.skylake || display->platform.kabylake ||
+ display->platform.coffeelake || display->platform.cometlake ||
+ DISPLAY_VER(display) == 11) && dram_info->has_16gb_dimms;
+}
+
+static int wm_read_latency(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 14)
+ return 6;
+ else if (DISPLAY_VER(display) >= 12)
+ return 3;
+ else
+ return 2;
+}
+
+static void sanitize_wm_latency(struct intel_display *display)
+{
+ u16 *wm = display->wm.skl_latency;
+ int level, num_levels = display->wm.num_levels;
/*
* If a level n (n > 1) has a 0us latency, all levels m (m >= n)
@@ -3187,14 +3188,38 @@ adjust_wm_latency(struct intel_display *display,
* of the punit to satisfy this requirement.
*/
for (level = 1; level < num_levels; level++) {
- if (wm[level] == 0) {
- for (i = level + 1; i < num_levels; i++)
- wm[i] = 0;
+ if (wm[level] == 0)
+ break;
+ }
+
+ for (level = level + 1; level < num_levels; level++)
+ wm[level] = 0;
+}
- num_levels = level;
+static void make_wm_latency_monotonic(struct intel_display *display)
+{
+ u16 *wm = display->wm.skl_latency;
+ int level, num_levels = display->wm.num_levels;
+
+ for (level = 1; level < num_levels; level++) {
+ if (wm[level] == 0)
break;
- }
+
+ wm[level] = max(wm[level], wm[level-1]);
}
+}
+
+static void
+adjust_wm_latency(struct intel_display *display)
+{
+ u16 *wm = display->wm.skl_latency;
+
+ if (display->platform.dg2)
+ multiply_wm_latency(display, 2);
+
+ sanitize_wm_latency(display);
+
+ make_wm_latency_monotonic(display);
/*
* WaWmMemoryReadLatency
@@ -3203,24 +3228,22 @@ adjust_wm_latency(struct intel_display *display,
* to add proper adjustment to each valid level we retrieve
* from the punit when level 0 response data is 0us.
*/
- if (wm[0] == 0) {
- for (level = 0; level < num_levels; level++)
- wm[level] += read_latency;
- }
+ if (wm[0] == 0)
+ increase_wm_latency(display, wm_read_latency(display));
/*
- * WA Level-0 adjustment for 16Gb DIMMs: SKL+
+ * WA Level-0 adjustment for 16Gb+ DIMMs: SKL+
* If we could not get dimm info enable this WA to prevent from
- * any underrun. If not able to get DIMM info assume 16Gb DIMM
+ * any underrun. If not able to get DIMM info assume 16Gb+ DIMM
* to avoid any underrun.
*/
- if (!display->platform.dg2 && dram_info->has_16gb_dimms)
- wm[0] += 1;
+ if (need_16gb_dimm_wa(display))
+ increase_wm_latency(display, 1);
}
-static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
+static void mtl_read_wm_latency(struct intel_display *display)
{
- int num_levels = display->wm.num_levels;
+ u16 *wm = display->wm.skl_latency;
u32 val;
val = intel_de_read(display, MTL_LATENCY_LP0_LP1);
@@ -3234,15 +3257,11 @@ static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
val = intel_de_read(display, MTL_LATENCY_LP4_LP5);
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
-
- adjust_wm_latency(display, wm, num_levels, 6);
}
-static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
+static void skl_read_wm_latency(struct intel_display *display)
{
- int num_levels = display->wm.num_levels;
- int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2;
- int mult = display->platform.dg2 ? 2 : 1;
+ u16 *wm = display->wm.skl_latency;
u32 val;
int ret;
@@ -3254,10 +3273,10 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
return;
}
- wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
- wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
- wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
- wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
+ wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val);
+ wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val);
+ wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val);
+ wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val);
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
@@ -3267,12 +3286,10 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
return;
}
- wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
- wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
- wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
- wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
-
- adjust_wm_latency(display, wm, num_levels, read_latency);
+ wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val);
+ wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val);
+ wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val);
+ wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val);
}
static void skl_setup_wm_latency(struct intel_display *display)
@@ -3283,11 +3300,15 @@ static void skl_setup_wm_latency(struct intel_display *display)
display->wm.num_levels = 8;
if (DISPLAY_VER(display) >= 14)
- mtl_read_wm_latency(display, display->wm.skl_latency);
+ mtl_read_wm_latency(display);
else
- skl_read_wm_latency(display, display->wm.skl_latency);
+ skl_read_wm_latency(display);
+
+ intel_print_wm_latency(display, "original", display->wm.skl_latency);
+
+ adjust_wm_latency(display);
- intel_print_wm_latency(display, "Gen9 Plane", display->wm.skl_latency);
+ intel_print_wm_latency(display, "adjusted", display->wm.skl_latency);
}
static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index 62790816f030..6bc2ec9164bf 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -79,5 +79,8 @@ void intel_program_dpkgc_latency(struct intel_atomic_state *state);
bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state);
+unsigned int skl_wm0_prefill_lines_worst(const struct intel_crtc_state *crtc_state);
+unsigned int skl_wm0_prefill_lines(const struct intel_crtc_state *crtc_state);
+
#endif /* __SKL_WATERMARK_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_clock.c b/drivers/gpu/drm/i915/display/vlv_clock.c
new file mode 100644
index 000000000000..1abdae453514
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_clock.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_print.h>
+
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "vlv_clock.h"
+#include "vlv_sideband.h"
+
+/*
+ * FIXME: The caching of hpll_freq and czclk_freq relies on the first calls
+ * occurring at a time when they can actually be read. This appears to be the
+ * case, but is somewhat fragile. Make the initialization explicit at a point
+ * where they can be reliably read.
+ */
+
+/* returns HPLL frequency in kHz */
+int vlv_clock_get_hpll_vco(struct drm_device *drm)
+{
+ struct intel_display *display = to_intel_display(drm);
+ int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+
+ if (!display->vlv_clock.hpll_freq) {
+ vlv_cck_get(drm);
+ /* Obtain SKU information */
+ hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) &
+ CCK_FUSE_HPLL_FREQ_MASK;
+ vlv_cck_put(drm);
+
+ display->vlv_clock.hpll_freq = vco_freq[hpll_freq] * 1000;
+
+ drm_dbg_kms(drm, "HPLL frequency: %d kHz\n", display->vlv_clock.hpll_freq);
+ }
+
+ return display->vlv_clock.hpll_freq;
+}
+
+static int vlv_clock_get_cck(struct drm_device *drm,
+ const char *name, u32 reg, int ref_freq)
+{
+ u32 val;
+ int divider;
+
+ vlv_cck_get(drm);
+ val = vlv_cck_read(drm, reg);
+ vlv_cck_put(drm);
+
+ divider = val & CCK_FREQUENCY_VALUES;
+
+ drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) !=
+ (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ "%s change in progress\n", name);
+
+ return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
+}
+
+int vlv_clock_get_hrawclk(struct drm_device *drm)
+{
+ /* RAWCLK_FREQ_VLV register updated from power well code */
+ return vlv_clock_get_cck(drm, "hrawclk", CCK_DISPLAY_REF_CLOCK_CONTROL,
+ vlv_clock_get_hpll_vco(drm));
+}
+
+int vlv_clock_get_czclk(struct drm_device *drm)
+{
+ struct intel_display *display = to_intel_display(drm);
+
+ if (!display->vlv_clock.czclk_freq) {
+ display->vlv_clock.czclk_freq = vlv_clock_get_cck(drm, "czclk", CCK_CZ_CLOCK_CONTROL,
+ vlv_clock_get_hpll_vco(drm));
+ drm_dbg_kms(drm, "CZ clock rate: %d kHz\n", display->vlv_clock.czclk_freq);
+ }
+
+ return display->vlv_clock.czclk_freq;
+}
+
+int vlv_clock_get_cdclk(struct drm_device *drm)
+{
+ return vlv_clock_get_cck(drm, "cdclk", CCK_DISPLAY_CLOCK_CONTROL,
+ vlv_clock_get_hpll_vco(drm));
+}
+
+int vlv_clock_get_gpll(struct drm_device *drm)
+{
+ return vlv_clock_get_cck(drm, "GPLL ref", CCK_GPLL_CLOCK_CONTROL,
+ vlv_clock_get_czclk(drm));
+}
diff --git a/drivers/gpu/drm/i915/display/vlv_clock.h b/drivers/gpu/drm/i915/display/vlv_clock.h
new file mode 100644
index 000000000000..5742ed3c628d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_clock.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __VLV_CLOCK_H__
+#define __VLV_CLOCK_H__
+
+struct drm_device;
+
+#ifdef I915
+int vlv_clock_get_hpll_vco(struct drm_device *drm);
+int vlv_clock_get_hrawclk(struct drm_device *drm);
+int vlv_clock_get_czclk(struct drm_device *drm);
+int vlv_clock_get_cdclk(struct drm_device *drm);
+int vlv_clock_get_gpll(struct drm_device *drm);
+#else
+static inline int vlv_clock_get_hpll_vco(struct drm_device *drm)
+{
+ return 0;
+}
+static inline int vlv_clock_get_hrawclk(struct drm_device *drm)
+{
+ return 0;
+}
+static inline int vlv_clock_get_czclk(struct drm_device *drm)
+{
+ return 0;
+}
+static inline int vlv_clock_get_cdclk(struct drm_device *drm)
+{
+ return 0;
+}
+static inline int vlv_clock_get_gpll(struct drm_device *drm)
+{
+ return 0;
+}
+#endif
+
+#endif /* __VLV_CLOCK_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index c9a53fde79c4..444682995658 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -34,7 +34,6 @@
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -42,6 +41,7 @@
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_fifo_underrun.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 75f5b0e871ef..4542135b20d5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -16,12 +16,13 @@
#include "i915_gem_evict.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
-#include "i915_gem_object.h"
#include "i915_gem_mman.h"
+#include "i915_gem_object.h"
+#include "i915_gem_ttm.h"
+#include "i915_jiffies.h"
#include "i915_mm.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
-#include "i915_gem_ttm.h"
#include "i915_vma.h"
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 148034ef504d..8878539c10ed 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -802,6 +802,7 @@ static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
void i915_gem_fence_wait_priority(struct dma_fence *fence,
const struct i915_sched_attr *attr);
+void i915_gem_fence_wait_priority_display(struct dma_fence *fence);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 3380151edfc1..e73b369c3347 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -24,6 +24,11 @@
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
+struct intel_stolen_node {
+ struct drm_i915_private *i915;
+ struct drm_mm_node node;
+};
+
/*
* The BIOS typically reserves some of the system's memory for the exclusive
* use of the integrated graphics. This memory is no longer available for
@@ -36,9 +41,9 @@
* for is a boon.
*/
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
- struct drm_mm_node *node, u64 size,
- unsigned alignment, u64 start, u64 end)
+static int __i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
+ struct drm_mm_node *node, u64 size,
+ unsigned int alignment, u64 start, u64 end)
{
int ret;
@@ -58,24 +63,43 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
return ret;
}
-int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
- struct drm_mm_node *node, u64 size,
- unsigned alignment)
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
+ unsigned int alignment, u64 start, u64 end)
+{
+ return __i915_gem_stolen_insert_node_in_range(node->i915, &node->node,
+ size, alignment,
+ start, end);
+}
+
+static int __i915_gem_stolen_insert_node(struct drm_i915_private *i915,
+ struct drm_mm_node *node, u64 size,
+ unsigned int alignment)
{
- return i915_gem_stolen_insert_node_in_range(i915, node,
- size, alignment,
- I915_GEM_STOLEN_BIAS,
- U64_MAX);
+ return __i915_gem_stolen_insert_node_in_range(i915, node,
+ size, alignment,
+ I915_GEM_STOLEN_BIAS,
+ U64_MAX);
}
-void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
- struct drm_mm_node *node)
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
+ unsigned int alignment)
+{
+ return __i915_gem_stolen_insert_node(node->i915, &node->node, size, alignment);
+}
+
+static void __i915_gem_stolen_remove_node(struct drm_i915_private *i915,
+ struct drm_mm_node *node)
{
mutex_lock(&i915->mm.stolen_lock);
drm_mm_remove_node(node);
mutex_unlock(&i915->mm.stolen_lock);
}
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node)
+{
+ __i915_gem_stolen_remove_node(node->i915, &node->node);
+}
+
static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
{
return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
@@ -683,7 +707,7 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
GEM_BUG_ON(!stolen);
- i915_gem_stolen_remove_node(i915, stolen);
+ __i915_gem_stolen_remove_node(i915, stolen);
kfree(stolen);
i915_gem_object_release_memory_region(obj);
@@ -772,8 +796,8 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
mutex_unlock(&i915->mm.stolen_lock);
} else {
- ret = i915_gem_stolen_insert_node(i915, stolen, size,
- mem->min_page_size);
+ ret = __i915_gem_stolen_insert_node(i915, stolen, size,
+ mem->min_page_size);
}
if (ret)
goto err_free;
@@ -785,7 +809,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
return 0;
err_remove:
- i915_gem_stolen_remove_node(i915, stolen);
+ __i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
return ret;
@@ -1000,38 +1024,64 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
return obj->ops == &i915_gem_object_stolen_ops;
}
-bool i915_gem_stolen_initialized(const struct drm_i915_private *i915)
+bool i915_gem_stolen_initialized(struct drm_device *drm)
{
+ struct drm_i915_private *i915 = to_i915(drm);
+
return drm_mm_initialized(&i915->mm.stolen);
}
-u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915)
+u64 i915_gem_stolen_area_address(struct drm_device *drm)
{
+ struct drm_i915_private *i915 = to_i915(drm);
+
return i915->dsm.stolen.start;
}
-u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915)
+u64 i915_gem_stolen_area_size(struct drm_device *drm)
{
+ struct drm_i915_private *i915 = to_i915(drm);
+
return resource_size(&i915->dsm.stolen);
}
-u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
- const struct drm_mm_node *node)
+u64 i915_gem_stolen_node_address(const struct intel_stolen_node *node)
{
+ struct drm_i915_private *i915 = node->i915;
+
return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
}
-bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node)
+bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node)
+{
+ return drm_mm_node_allocated(&node->node);
+}
+
+u64 i915_gem_stolen_node_offset(const struct intel_stolen_node *node)
{
- return drm_mm_node_allocated(node);
+ return node->node.start;
}
-u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node)
+u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node)
{
- return node->start;
+ return node->node.size;
+}
+
+struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct intel_stolen_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+ node->i915 = i915;
+
+ return node;
}
-u64 i915_gem_stolen_node_size(const struct drm_mm_node *node)
+void i915_gem_stolen_node_free(const struct intel_stolen_node *node)
{
- return node->size;
+ kfree(node);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index dfe0db8bb1b9..7b0386002ed4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -8,21 +8,17 @@
#include <linux/types.h>
-struct drm_i915_private;
-struct drm_mm_node;
+struct drm_device;
struct drm_i915_gem_object;
+struct drm_i915_private;
+struct intel_stolen_node;
-#define i915_stolen_fb drm_mm_node
-
-int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
- struct drm_mm_node *node, u64 size,
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
- struct drm_mm_node *node, u64 size,
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
unsigned alignment, u64 start,
u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
- struct drm_mm_node *node);
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node);
struct intel_memory_region *
i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
u16 instance);
@@ -38,15 +34,17 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
#define I915_GEM_STOLEN_BIAS SZ_128K
-bool i915_gem_stolen_initialized(const struct drm_i915_private *i915);
-u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915);
-u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915);
+bool i915_gem_stolen_initialized(struct drm_device *drm);
+u64 i915_gem_stolen_area_address(struct drm_device *drm);
+u64 i915_gem_stolen_area_size(struct drm_device *drm);
+
+u64 i915_gem_stolen_node_address(const struct intel_stolen_node *node);
-u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
- const struct drm_mm_node *node);
+bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node);
+u64 i915_gem_stolen_node_offset(const struct intel_stolen_node *node);
+u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node);
-bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node);
-u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node);
-u64 i915_gem_stolen_node_size(const struct drm_mm_node *node);
+struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm);
+void i915_gem_stolen_node_free(const struct intel_stolen_node *node);
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 5a296ba3758a..567b97d28d30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -145,8 +145,9 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
return false;
}
- if (GRAPHICS_VER(i915) == 2 ||
- (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
+ if (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915))
+ tile_width = 128;
+ else if (GRAPHICS_VER(i915) == 2)
tile_width = 128;
else
tile_width = 512;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 57bb111d65da..d119e54e5be1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -10,6 +10,7 @@
#include <drm/drm_buddy.h>
#include "i915_drv.h"
+#include "i915_jiffies.h"
#include "i915_ttm_buddy_manager.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 54829801d3f7..2893df65c359 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -138,6 +138,13 @@ void i915_gem_fence_wait_priority(struct dma_fence *fence,
local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
+void i915_gem_fence_wait_priority_display(struct dma_fence *fence)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
+
+ i915_gem_fence_wait_priority(fence, &attr);
+}
+
int
i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index 8116fd5987e2..8c01fb6d4e7b 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -292,15 +292,15 @@ int gen4_emit_bb_start(struct i915_request *rq,
void gen2_irq_enable(struct intel_engine_cs *engine)
{
- engine->i915->irq_mask &= ~engine->irq_enable_mask;
- intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+ engine->i915->gen2_imr_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->gen2_imr_mask);
intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
}
void gen2_irq_disable(struct intel_engine_cs *engine)
{
- engine->i915->irq_mask |= engine->irq_enable_mask;
- intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+ engine->i915->gen2_imr_mask |= engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->gen2_imr_mask);
}
void gen5_irq_enable(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 8d4bb95f8424..dcd3a3047aa4 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -4,6 +4,7 @@
*/
#include "i915_drv.h"
+#include "i915_jiffies.h"
#include "i915_request.h"
#include "intel_context.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index bf38cc5fe872..932f9f1b06b2 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -6,6 +6,7 @@
#include <linux/pm_runtime.h>
#include <linux/string_helpers.h>
+#include "display/vlv_clock.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -802,7 +803,7 @@ u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, enum intel_rc6_res_type id)
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
mul = 1000000;
- div = i915->czclk_freq;
+ div = vlv_clock_get_czclk(&i915->drm);
overflow_hw = BIT_ULL(40);
time_hw = vlv_residency_raw(uncore, reg);
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 4da94098bd3e..b01c837ab646 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -7,8 +7,8 @@
#include <drm/intel/i915_drm.h>
-#include "display/intel_display.h"
#include "display/intel_display_rps.h"
+#include "display/vlv_clock.h"
#include "soc/intel_dram.h"
#include "i915_drv.h"
@@ -1690,10 +1690,7 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- rps->gpll_ref_freq =
- vlv_get_cck_clock(&i915->drm, "GPLL ref",
- CCK_GPLL_CLOCK_CONTROL,
- i915->czclk_freq);
+ rps->gpll_ref_freq = vlv_clock_get_gpll(&i915->drm);
drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
rps->gpll_ref_freq);
@@ -1703,13 +1700,13 @@ static void vlv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ vlv_init_gpll_ref_freq(rps);
+
vlv_iosf_sb_get(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
- vlv_init_gpll_ref_freq(rps);
-
rps->max_freq = vlv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
@@ -1737,13 +1734,13 @@ static void chv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ vlv_init_gpll_ref_freq(rps);
+
vlv_iosf_sb_get(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
- vlv_init_gpll_ref_freq(rps);
-
rps->max_freq = chv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
@@ -1780,6 +1777,7 @@ static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
const struct intel_rps_ei *prev = &rps->ei;
struct intel_rps_ei now;
@@ -1796,7 +1794,7 @@ static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
time = ktime_us_delta(now.ktime, prev->ktime);
- time *= rps_to_i915(rps)->czclk_freq;
+ time *= vlv_clock_get_czclk(&i915->drm);
/* Workload can be split between render + media,
* e.g. SwapBuffers being blitted in X after being rendered in
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 7d486dfa2fc1..ece88c612e27 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_mmio_range.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
@@ -2923,7 +2924,7 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(&engine->wa_list);
}
-static const struct i915_range mcr_ranges_gen8[] = {
+static const struct i915_mmio_range mcr_ranges_gen8[] = {
{ .start = 0x5500, .end = 0x55ff },
{ .start = 0x7000, .end = 0x7fff },
{ .start = 0x9400, .end = 0x97ff },
@@ -2932,7 +2933,7 @@ static const struct i915_range mcr_ranges_gen8[] = {
{},
};
-static const struct i915_range mcr_ranges_gen12[] = {
+static const struct i915_mmio_range mcr_ranges_gen12[] = {
{ .start = 0x8150, .end = 0x815f },
{ .start = 0x9520, .end = 0x955f },
{ .start = 0xb100, .end = 0xb3ff },
@@ -2941,7 +2942,7 @@ static const struct i915_range mcr_ranges_gen12[] = {
{},
};
-static const struct i915_range mcr_ranges_xehp[] = {
+static const struct i915_mmio_range mcr_ranges_xehp[] = {
{ .start = 0x4000, .end = 0x4aff },
{ .start = 0x5200, .end = 0x52ff },
{ .start = 0x5400, .end = 0x7fff },
@@ -2960,7 +2961,7 @@ static const struct i915_range mcr_ranges_xehp[] = {
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
- const struct i915_range *mcr_ranges;
+ const struct i915_mmio_range *mcr_ranges;
int i;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 0454eb1814bb..376f201a7650 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -11,6 +11,7 @@
#include "gt/intel_reset.h"
#include "gt/selftest_engine_heartbeat.h"
+#include "i915_jiffies.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 69830a5c49d3..c43b47687838 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1279,20 +1279,15 @@ static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
}
if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
- switch (cap_type_id) {
- case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
+ ret = -EINVAL;
+ if (cap_type_id == VFIO_REGION_INFO_CAP_SPARSE_MMAP)
ret = vfio_info_add_capability(&caps,
&sparse->header,
struct_size(sparse, areas,
sparse->nr_areas));
- if (ret) {
- kfree(sparse);
- return ret;
- }
- break;
- default:
+ if (ret) {
kfree(sparse);
- return -EINVAL;
+ return ret;
}
}
@@ -1361,21 +1356,27 @@ static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
+ if (!is_power_of_2(hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) ||
+ !is_power_of_2(hdr.flags & VFIO_IRQ_SET_ACTION_TYPE_MASK))
+ return -EINVAL;
+
if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
+ if (!hdr.count)
+ return -EINVAL;
+
ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
VFIO_PCI_NUM_IRQS, &data_size);
if (ret) {
- gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
- return -EINVAL;
- }
- if (data_size) {
- data = memdup_user((void __user *)(arg + minsz),
- data_size);
- if (IS_ERR(data))
- return PTR_ERR(data);
+ gvt_vgpu_err("vfio_set_irqs_validate_and_prepare failed\n");
+ return ret;
}
+
+ data = memdup_user((void __user *)(arg + minsz),
+ data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
}
ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index da1135fa7cda..db5cd65100fe 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -49,7 +49,7 @@
* @gpa: guest physical address
*
* Returns:
- * Zero on success, negative error code if failed
+ * The MMIO offset of the given GPA
*/
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
{
@@ -58,7 +58,7 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
}
#define reg_is_mmio(gvt, reg) \
- (reg >= 0 && reg < gvt->device_info.mmio_size)
+ (reg < gvt->device_info.mmio_size)
#define reg_is_gtt(gvt, reg) \
(reg >= gvt->device_info.gtt_start_offset \
diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c
index 24e5bb8a670e..3cb615ffa96d 100644
--- a/drivers/gpu/drm/i915/i915_config.c
+++ b/drivers/gpu/drm/i915/i915_config.c
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include "i915_config.h"
-#include "i915_utils.h"
+#include "i915_jiffies.h"
unsigned long
i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context)
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 89be8da79d3b..c97b76771917 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -46,6 +46,8 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
+#include <drm/intel/display_member.h>
+#include <drm/intel/display_parent_interface.h>
#include "display/i9xx_display_sr.h"
#include "display/intel_bw.h"
@@ -737,6 +739,18 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
"DRM_I915_DEBUG_RUNTIME_PM enabled\n");
}
+static const struct intel_display_parent_interface parent = {
+ .rpm = &i915_display_rpm_interface,
+};
+
+const struct intel_display_parent_interface *i915_driver_parent_interface(void)
+{
+ return &parent;
+}
+
+/* Ensure drm and display members are placed properly. */
+INTEL_DISPLAY_MEMBER_STATIC_ASSERT(struct drm_i915_private, drm, display);
+
static struct drm_i915_private *
i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -758,7 +772,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, match_info);
- display = intel_display_device_probe(pdev);
+ display = intel_display_device_probe(pdev, &parent);
if (IS_ERR(display))
return ERR_CAST(display);
@@ -1053,7 +1067,6 @@ static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_display *display = dev_priv->display;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
pci_power_t opregion_target_state;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1067,8 +1080,6 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_display_driver_disable_user_access(display);
}
- pci_save_state(pdev);
-
intel_display_driver_suspend(display);
intel_irq_suspend(dev_priv);
@@ -1103,7 +1114,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_display *display = dev_priv->display;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct intel_gt *gt;
int ret, i;
@@ -1124,11 +1134,21 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
if (ret) {
drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
intel_display_power_resume_early(display);
-
- goto out;
}
- pci_disable_device(pdev);
+ enable_rpm_wakeref_asserts(rpm);
+
+ if (!dev_priv->uncore.user_forcewake_count)
+ intel_runtime_pm_driver_release(rpm);
+
+ return ret;
+}
+
+static int i915_drm_suspend_noirq(struct drm_device *dev, bool hibernation)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+
/*
* During hibernation on some platforms the BIOS may try to access
* the device even though it's already in D3 and hang the machine. So
@@ -1140,21 +1160,20 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* Lenovo Thinkpad X301, X61s, X60, T60, X41
* Fujitsu FSC S7110
* Acer Aspire 1830T
+ *
+ * pci_save_state() prevents drivers/pci from
+ * automagically putting the device into D3.
*/
- if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
- pci_set_power_state(pdev, PCI_D3hot);
-
-out:
- enable_rpm_wakeref_asserts(rpm);
- if (!dev_priv->uncore.user_forcewake_count)
- intel_runtime_pm_driver_release(rpm);
+ if (hibernation && GRAPHICS_VER(dev_priv) < 6)
+ pci_save_state(pdev);
- return ret;
+ return 0;
}
int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
pm_message_t state)
{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
int error;
if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
@@ -1168,7 +1187,14 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
if (error)
return error;
- return i915_drm_suspend_late(&i915->drm, false);
+ error = i915_drm_suspend_late(&i915->drm, false);
+ if (error)
+ return error;
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
}
static int i915_drm_resume(struct drm_device *dev)
@@ -1260,7 +1286,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_display *display = dev_priv->display;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_gt *gt;
int ret, i;
@@ -1274,41 +1299,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
* similar so that power domains can be employed.
*/
- /*
- * Note that we need to set the power state explicitly, since we
- * powered off the device during freeze and the PCI core won't power
- * it back up for us during thaw. Powering off the device during
- * freeze is not a hard requirement though, and during the
- * suspend/resume phases the PCI core makes sure we get here with the
- * device powered on. So in case we change our freeze logic and keep
- * the device powered we can also remove the following set power state
- * call.
- */
- ret = pci_set_power_state(pdev, PCI_D0);
- if (ret) {
- drm_err(&dev_priv->drm,
- "failed to set PCI D0 power state (%d)\n", ret);
- return ret;
- }
-
- /*
- * Note that pci_enable_device() first enables any parent bridge
- * device and only then sets the power state for this device. The
- * bridge enabling is a nop though, since bridge devices are resumed
- * first. The order of enabling power and enabling the device is
- * imposed by the PCI core as described above, so here we preserve the
- * same order for the freeze/thaw phases.
- *
- * TODO: eventually we should remove pci_disable_device() /
- * pci_enable_enable_device() from suspend/resume. Due to how they
- * depend on the device enable refcount we can't anyway depend on them
- * disabling/enabling the device.
- */
- if (pci_enable_device(pdev))
- return -EIO;
-
- pci_set_master(pdev);
-
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
ret = vlv_resume_prepare(dev_priv, false);
@@ -1328,11 +1318,18 @@ static int i915_drm_resume_early(struct drm_device *dev)
int i915_driver_resume_switcheroo(struct drm_i915_private *i915)
{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
int ret;
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ ret = pci_set_power_state(pdev, PCI_D0);
+ if (ret)
+ return ret;
+
+ pci_restore_state(pdev);
+
ret = i915_drm_resume_early(&i915->drm);
if (ret)
return ret;
@@ -1389,6 +1386,16 @@ static int i915_pm_suspend_late(struct device *kdev)
return i915_drm_suspend_late(&i915->drm, false);
}
+static int i915_pm_suspend_noirq(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend_noirq(&i915->drm, false);
+}
+
static int i915_pm_poweroff_late(struct device *kdev)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
@@ -1399,6 +1406,16 @@ static int i915_pm_poweroff_late(struct device *kdev)
return i915_drm_suspend_late(&i915->drm, true);
}
+static int i915_pm_poweroff_noirq(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend_noirq(&i915->drm, true);
+}
+
static int i915_pm_resume_early(struct device *kdev)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
@@ -1664,24 +1681,25 @@ const struct dev_pm_ops i915_pm_ops = {
.prepare = i915_pm_prepare,
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
+ .suspend_noirq = i915_pm_suspend_noirq,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
.complete = i915_pm_complete,
/*
* S4 event handlers
- * @freeze, @freeze_late : called (1) before creating the
- * hibernation image [PMSG_FREEZE] and
- * (2) after rebooting, before restoring
- * the image [PMSG_QUIESCE]
- * @thaw, @thaw_early : called (1) after creating the hibernation
- * image, before writing it [PMSG_THAW]
- * and (2) after failing to create or
- * restore the image [PMSG_RECOVER]
- * @poweroff, @poweroff_late: called after writing the hibernation
- * image, before rebooting [PMSG_HIBERNATE]
- * @restore, @restore_early : called after rebooting and restoring the
- * hibernation image [PMSG_RESTORE]
+ * @freeze* : called (1) before creating the
+ * hibernation image [PMSG_FREEZE] and
+ * (2) after rebooting, before restoring
+ * the image [PMSG_QUIESCE]
+ * @thaw* : called (1) after creating the hibernation
+ * image, before writing it [PMSG_THAW]
+ * and (2) after failing to create or
+ * restore the image [PMSG_RECOVER]
+ * @poweroff* : called after writing the hibernation
+ * image, before rebooting [PMSG_HIBERNATE]
+ * @restore* : called after rebooting and restoring the
+ * hibernation image [PMSG_RESTORE]
*/
.freeze = i915_pm_freeze,
.freeze_late = i915_pm_freeze_late,
@@ -1689,6 +1707,7 @@ const struct dev_pm_ops i915_pm_ops = {
.thaw = i915_pm_thaw,
.poweroff = i915_pm_suspend,
.poweroff_late = i915_pm_poweroff_late,
+ .poweroff_noirq = i915_pm_poweroff_noirq,
.restore_early = i915_pm_restore_early,
.restore = i915_pm_restore,
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 1e95ecb2a163..9551519ab429 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -12,6 +12,7 @@ struct pci_dev;
struct pci_device_id;
struct drm_i915_private;
struct drm_printer;
+struct intel_display_parent_interface;
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
@@ -24,6 +25,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915);
int i915_driver_resume_switcheroo(struct drm_i915_private *i915);
int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
+const struct intel_display_parent_interface *i915_driver_parent_interface(void);
void
i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6a768aad8edd..95f9ddf22ce4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -174,6 +174,7 @@ struct i915_selftest_stash {
struct drm_i915_private {
struct drm_device drm;
+ /* display device data, must be placed after drm device member */
struct intel_display *display;
/* FIXME: Device release actions should all be moved to drmm_ */
@@ -234,14 +235,11 @@ struct drm_i915_private {
/* Sideband mailbox protection */
struct mutex sb_lock;
- /** Cached value of IMR to avoid reads in updating the bitfield */
- u32 irq_mask;
+ /* Cached value of gen 2-4 IMR to avoid reads in updating the bitfield */
+ u32 gen2_imr_mask;
bool preserve_bios_swizzle;
- unsigned int hpll_freq;
- unsigned int czclk_freq;
-
/**
* wq - Driver workqueue for GEM.
*
@@ -490,16 +488,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ALDERLAKE_P(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_P)
#define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2)
#define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE)
-/*
- * Display code shared by i915 and Xe relies on macros like IS_LUNARLAKE,
- * so we need to define these even on platforms that the i915 base driver
- * doesn't support. Ensure the parameter is used in the definition to
- * avoid 'unused variable' warnings when compiling the shared display code
- * for i915.
- */
-#define IS_LUNARLAKE(i915) (0 && i915)
-#define IS_BATTLEMAGE(i915) (0 && i915)
-#define IS_PANTHERLAKE(i915) (0 && i915)
#define IS_ARROWLAKE_H(i915) \
IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H)
@@ -604,8 +592,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(i915) (GRAPHICS_VER(i915) != 2 && \
- !(IS_I915G(i915) || IS_I915GM(i915)))
+#define HAS_128_BYTE_Y_TILING(i915) (!IS_I915G(i915) && !IS_I915GM(i915))
#define HAS_RC6(i915) (INTEL_INFO(i915)->has_rc6)
#define HAS_RC6p(i915) (INTEL_INFO(i915)->has_rc6p)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8d5da222a187..e0a0bd687f1b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -33,12 +33,10 @@
#include <drm/drm_drv.h>
-#include "display/intel_display_core.h"
#include "display/intel_display_irq.h"
#include "display/intel_hotplug.h"
#include "display/intel_hotplug_irq.h"
#include "display/intel_lpe_audio.h"
-#include "display/intel_psr_regs.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_gt.h"
@@ -415,7 +413,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
struct drm_i915_private *i915 = arg;
struct intel_display *display = i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
- u32 de_iir, gt_iir, de_ier, sde_ier = 0;
+ u32 gt_iir, de_ier = 0, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
if (unlikely(!intel_irqs_enabled(i915)))
@@ -424,19 +422,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(&i915->runtime_pm);
- /* disable master interrupt before clearing iir */
- de_ier = raw_reg_read(regs, DEIER);
- raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-
- /* Disable south interrupts. We'll only write to SDEIIR once, so further
- * interrupts will will be stored on its back queue, and then we'll be
- * able to process them after we restore SDEIER (as soon as we restore
- * it, we'll get an interrupt if SDEIIR still has something to process
- * due to its back queue). */
- if (!HAS_PCH_NOP(display)) {
- sde_ier = raw_reg_read(regs, SDEIER);
- raw_reg_write(regs, SDEIER, 0);
- }
+ /* Disable master and south interrupts */
+ ilk_display_irq_master_disable(display, &de_ier, &sde_ier);
/* Find, clear, then process each source of interrupt */
@@ -450,15 +437,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
}
- de_iir = raw_reg_read(regs, DEIIR);
- if (de_iir) {
- raw_reg_write(regs, DEIIR, de_iir);
- if (DISPLAY_VER(display) >= 7)
- ivb_display_irq_handler(display, de_iir);
- else
- ilk_display_irq_handler(display, de_iir);
+ if (ilk_display_irq_handler(display))
ret = IRQ_HANDLED;
- }
if (GRAPHICS_VER(i915) >= 6) {
u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
@@ -469,9 +449,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
}
}
- raw_reg_write(regs, DEIER, de_ier);
- if (sde_ier)
- raw_reg_write(regs, SDEIER, sde_ier);
+ /* Re-enable master and south interrupts */
+ ilk_display_irq_master_enable(display, de_ier, sde_ier);
pmu_irq_stats(i915, ret);
@@ -656,22 +635,10 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_display *display = dev_priv->display;
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- gen2_irq_reset(uncore, DE_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
-
- if (GRAPHICS_VER(dev_priv) == 7)
- intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
-
- if (IS_HASWELL(dev_priv)) {
- intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
- intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
- }
+ /* The master interrupt enable is in DEIER, reset display irq first */
+ ilk_display_irq_reset(display);
gen5_gt_irq_reset(to_gt(dev_priv));
-
- ibx_display_irq_reset(display);
}
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
@@ -826,9 +793,10 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
}
+#define I9XX_HAS_FBC(i915) (IS_I85X(i915) || IS_I865G(i915) || IS_I915GM(i915) || IS_I945GM(i915))
+
static u32 i9xx_error_mask(struct drm_i915_private *i915)
{
- struct intel_display *display = i915->display;
/*
* On gen2/3 FBC generates (seemingly spurious)
* display INVALID_GTT/INVALID_GTT_PTE table errors.
@@ -841,7 +809,7 @@ static u32 i9xx_error_mask(struct drm_i915_private *i915)
* Unfortunately we can't mask off individual PGTBL_ER bits,
* so we just have to mask off all page table errors via EMR.
*/
- if (HAS_FBC(display))
+ if (I9XX_HAS_FBC(i915))
return I915_ERROR_MEMORY_REFRESH;
else
return I915_ERROR_PAGE_TABLE |
@@ -897,7 +865,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
+ dev_priv->gen2_imr_mask = ~0u;
}
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -908,28 +876,14 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_error_init(uncore, GEN2_ERROR_REGS, ~i9xx_error_mask(dev_priv));
- dev_priv->irq_mask =
- ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT);
+ enable_mask = i9xx_display_irq_enable_mask(display) |
+ I915_MASTER_ERROR_INTERRUPT;
- enable_mask =
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT |
- I915_USER_INTERRUPT;
-
- if (DISPLAY_VER(display) >= 3) {
- dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT;
- enable_mask |= I915_ASLE_INTERRUPT;
- }
+ dev_priv->gen2_imr_mask = ~enable_mask;
- if (HAS_HOTPLUG(display)) {
- dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
- enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
- }
+ enable_mask |= I915_USER_INTERRUPT;
- gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
+ gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->gen2_imr_mask, enable_mask);
i915_display_irq_postinstall(display);
}
@@ -958,8 +912,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
- if (HAS_HOTPLUG(display) &&
- iir & I915_DISPLAY_PORT_INTERRUPT)
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
@@ -999,7 +952,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
+ dev_priv->gen2_imr_mask = ~0u;
}
static u32 i965_error_mask(struct drm_i915_private *i915)
@@ -1029,25 +982,17 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_error_init(uncore, GEN2_ERROR_REGS, ~i965_error_mask(dev_priv));
- dev_priv->irq_mask =
- ~(I915_ASLE_INTERRUPT |
- I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT);
-
- enable_mask =
- I915_ASLE_INTERRUPT |
- I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT |
- I915_USER_INTERRUPT;
+ enable_mask = i9xx_display_irq_enable_mask(display) |
+ I915_MASTER_ERROR_INTERRUPT;
+
+ dev_priv->gen2_imr_mask = ~enable_mask;
+
+ enable_mask |= I915_USER_INTERRUPT;
if (IS_G4X(dev_priv))
enable_mask |= I915_BSD_USER_INTERRUPT;
- gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
+ gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->gen2_imr_mask, enable_mask);
i965_display_irq_postinstall(display);
}
diff --git a/drivers/gpu/drm/i915/i915_jiffies.h b/drivers/gpu/drm/i915/i915_jiffies.h
new file mode 100644
index 000000000000..18a4eaea897a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_jiffies.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_JIFFIES_H__
+#define __I915_JIFFIES_H__
+
+#include <linux/jiffies.h>
+
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+ unsigned long j = msecs_to_jiffies(m);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+#endif /* __I915_JIFFIES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_mmio_range.c b/drivers/gpu/drm/i915/i915_mmio_range.c
new file mode 100644
index 000000000000..724041e81aa7
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mmio_range.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "i915_mmio_range.h"
+
+bool i915_mmio_range_table_contains(u32 addr, const struct i915_mmio_range *table)
+{
+ while (table->start || table->end) {
+ if (addr >= table->start && addr <= table->end)
+ return true;
+
+ table++;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/i915_mmio_range.h b/drivers/gpu/drm/i915/i915_mmio_range.h
new file mode 100644
index 000000000000..f1c7086d3e3c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mmio_range.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __I915_MMIO_RANGE_H__
+#define __I915_MMIO_RANGE_H__
+
+#include <linux/types.h>
+
+/* Other register ranges (e.g., shadow tables, MCR tables, etc.) */
+struct i915_mmio_range {
+ u32 start;
+ u32 end;
+};
+
+bool i915_mmio_range_table_contains(u32 addr, const struct i915_mmio_range *table);
+
+#endif /* __I915_MMIO_RANGE_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 1658f1246c6f..0b9d9f3f7813 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -219,6 +219,7 @@
#include "i915_perf.h"
#include "i915_perf_oa_regs.h"
#include "i915_reg.h"
+#include "i915_mmio_range.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -4320,29 +4321,17 @@ static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
return false;
}
-static bool reg_in_range_table(u32 addr, const struct i915_range *table)
-{
- while (table->start || table->end) {
- if (addr >= table->start && addr <= table->end)
- return true;
-
- table++;
- }
-
- return false;
-}
-
#define REG_EQUAL(addr, mmio) \
((addr) == i915_mmio_reg_offset(mmio))
-static const struct i915_range gen7_oa_b_counters[] = {
+static const struct i915_mmio_range gen7_oa_b_counters[] = {
{ .start = 0x2710, .end = 0x272c }, /* OASTARTTRIG[1-8] */
{ .start = 0x2740, .end = 0x275c }, /* OAREPORTTRIG[1-8] */
{ .start = 0x2770, .end = 0x27ac }, /* OACEC[0-7][0-1] */
{}
};
-static const struct i915_range gen12_oa_b_counters[] = {
+static const struct i915_mmio_range gen12_oa_b_counters[] = {
{ .start = 0x2b2c, .end = 0x2b2c }, /* GEN12_OAG_OA_PESS */
{ .start = 0xd900, .end = 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */
{ .start = 0xd920, .end = 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */
@@ -4353,7 +4342,7 @@ static const struct i915_range gen12_oa_b_counters[] = {
{}
};
-static const struct i915_range mtl_oam_b_counters[] = {
+static const struct i915_mmio_range mtl_oam_b_counters[] = {
{ .start = 0x393000, .end = 0x39301c }, /* GEN12_OAM_STARTTRIG1[1-8] */
{ .start = 0x393020, .end = 0x39303c }, /* GEN12_OAM_REPORTTRIG1[1-8] */
{ .start = 0x393040, .end = 0x39307c }, /* GEN12_OAM_CEC[0-7][0-1] */
@@ -4361,43 +4350,43 @@ static const struct i915_range mtl_oam_b_counters[] = {
{}
};
-static const struct i915_range xehp_oa_b_counters[] = {
+static const struct i915_mmio_range xehp_oa_b_counters[] = {
{ .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */
{ .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */
{}
};
-static const struct i915_range gen7_oa_mux_regs[] = {
+static const struct i915_mmio_range gen7_oa_mux_regs[] = {
{ .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */
{ .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */
{ .start = 0xe180, .end = 0xe180 }, /* HALF_SLICE_CHICKEN2 */
{}
};
-static const struct i915_range hsw_oa_mux_regs[] = {
+static const struct i915_mmio_range hsw_oa_mux_regs[] = {
{ .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
{ .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */
{ .start = 0x25100, .end = 0x2ff90 },
{}
};
-static const struct i915_range chv_oa_mux_regs[] = {
+static const struct i915_mmio_range chv_oa_mux_regs[] = {
{ .start = 0x182300, .end = 0x1823a4 },
{}
};
-static const struct i915_range gen8_oa_mux_regs[] = {
+static const struct i915_mmio_range gen8_oa_mux_regs[] = {
{ .start = 0x0d00, .end = 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
{ .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */
{}
};
-static const struct i915_range gen11_oa_mux_regs[] = {
+static const struct i915_mmio_range gen11_oa_mux_regs[] = {
{ .start = 0x91c8, .end = 0x91dc }, /* OA_PERFCNT[3-4] */
{}
};
-static const struct i915_range gen12_oa_mux_regs[] = {
+static const struct i915_mmio_range gen12_oa_mux_regs[] = {
{ .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
{ .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
{ .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */
@@ -4410,7 +4399,7 @@ static const struct i915_range gen12_oa_mux_regs[] = {
* Ref: 14010536224:
* 0x20cc is repurposed on MTL, so use a separate array for MTL.
*/
-static const struct i915_range mtl_oa_mux_regs[] = {
+static const struct i915_mmio_range mtl_oa_mux_regs[] = {
{ .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
{ .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
{ .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */
@@ -4421,61 +4410,61 @@ static const struct i915_range mtl_oa_mux_regs[] = {
static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_b_counters);
+ return i915_mmio_range_table_contains(addr, gen7_oa_b_counters);
}
static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_mux_regs) ||
- reg_in_range_table(addr, gen8_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, gen8_oa_mux_regs);
}
static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_mux_regs) ||
- reg_in_range_table(addr, gen8_oa_mux_regs) ||
- reg_in_range_table(addr, gen11_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, gen8_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, gen11_oa_mux_regs);
}
static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_mux_regs) ||
- reg_in_range_table(addr, hsw_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, hsw_oa_mux_regs);
}
static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_mux_regs) ||
- reg_in_range_table(addr, chv_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, chv_oa_mux_regs);
}
static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen12_oa_b_counters);
+ return i915_mmio_range_table_contains(addr, gen12_oa_b_counters);
}
static bool mtl_is_valid_oam_b_counter_addr(struct i915_perf *perf, u32 addr)
{
if (HAS_OAM(perf->i915) &&
GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
- return reg_in_range_table(addr, mtl_oam_b_counters);
+ return i915_mmio_range_table_contains(addr, mtl_oam_b_counters);
return false;
}
static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, xehp_oa_b_counters) ||
- reg_in_range_table(addr, gen12_oa_b_counters) ||
+ return i915_mmio_range_table_contains(addr, xehp_oa_b_counters) ||
+ i915_mmio_range_table_contains(addr, gen12_oa_b_counters) ||
mtl_is_valid_oam_b_counter_addr(perf, addr);
}
static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
- return reg_in_range_table(addr, mtl_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, mtl_oa_mux_regs);
else
- return reg_in_range_table(addr, gen12_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen12_oa_mux_regs);
}
static u32 mask_reg_value(u32 reg, u32 val)
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 5bc696bfbb0f..d8f69bba79a9 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -895,7 +895,7 @@ static ssize_t i915_pmu_format_show(struct device *dev,
struct i915_str_attribute *eattr;
eattr = container_of(attr, struct i915_str_attribute, attr);
- return sprintf(buf, "%s\n", eattr->str);
+ return sysfs_emit(buf, "%s\n", eattr->str);
}
#define I915_PMU_FORMAT_ATTR(_name, _config) \
@@ -925,7 +925,7 @@ static ssize_t i915_pmu_event_show(struct device *dev,
struct i915_ext_attribute *eattr;
eattr = container_of(attr, struct i915_ext_attribute, attr);
- return sprintf(buf, "config=0x%lx\n", eattr->val);
+ return sysfs_emit(buf, "config=0x%lx\n", eattr->val);
}
#define __event(__counter, __name, __unit) \
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index bfe98cb9a038..e81fac8ab51b 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -174,6 +174,16 @@
*/
#define REG_FIELD_GET8(__mask, __val) ((u8)FIELD_GET(__mask, __val))
+/**
+ * REG_FIELD_MAX() - produce the maximum value representable by a field
+ * @__mask: shifted mask defining the field's length and position
+ *
+ * Local wrapper for FIELD_MAX() to return the maximum bit value that can
+ * be held in the field specified by @_mask, cast to u32 for consistency
+ * with other macros.
+ */
+#define REG_FIELD_MAX(__mask) ((u32)FIELD_MAX(__mask))
+
typedef struct {
u32 reg;
} i915_reg_t;
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index a0c892e4c40d..4f75115b87d6 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -38,8 +38,10 @@
struct drm_i915_private;
+#ifndef MISSING_CASE
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
+#endif
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
@@ -65,11 +67,13 @@ bool i915_error_injected(void);
drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \
})
+#ifndef fetch_and_zero
#define fetch_and_zero(ptr) ({ \
typeof(*ptr) __T = *(ptr); \
*(ptr) = (typeof(*ptr))0; \
__T; \
})
+#endif
/*
* check_user_mbz: Check that a user value exists and is zero
@@ -100,43 +104,6 @@ static inline bool is_power_of_2_u64(u64 n)
return (n != 0 && ((n & (n - 1)) == 0));
}
-static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
-{
- unsigned long j = msecs_to_jiffies(m);
-
- return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
-/*
- * If you need to wait X milliseconds between events A and B, but event B
- * doesn't happen exactly after event A, you record the timestamp (jiffies) of
- * when event A happened, then just before event B you call this function and
- * pass the timestamp as the first argument, and X as the second argument.
- */
-static inline void
-wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
-{
- unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
-
- /*
- * Don't re-read the value of "jiffies" every time since it may change
- * behind our back and break the math.
- */
- tmp_jiffies = jiffies;
- target_jiffies = timestamp_jiffies +
- msecs_to_jiffies_timeout(to_wait_ms);
-
- if (time_after(target_jiffies, tmp_jiffies)) {
- remaining_jiffies = target_jiffies - tmp_jiffies;
- while (remaining_jiffies)
- remaining_jiffies =
- schedule_timeout_uninterruptible(remaining_jiffies);
- }
-}
-
-#define KHz(x) (1000 * (x))
-#define MHz(x) KHz(1000 * (x))
-
void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
static inline void __add_taint_for_CI(unsigned int taint)
{
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7ce3e6de0c19..d11c2814b787 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -29,6 +29,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_print.h>
+#include <drm/intel/display_parent_interface.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -177,6 +178,82 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
return track_intel_runtime_pm_wakeref(rpm);
}
+static struct intel_runtime_pm *drm_to_rpm(const struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return &i915->runtime_pm;
+}
+
+static struct ref_tracker *i915_display_rpm_get(const struct drm_device *drm)
+{
+ return intel_runtime_pm_get(drm_to_rpm(drm));
+}
+
+static struct ref_tracker *i915_display_rpm_get_raw(const struct drm_device *drm)
+{
+ return intel_runtime_pm_get_raw(drm_to_rpm(drm));
+}
+
+static struct ref_tracker *i915_display_rpm_get_if_in_use(const struct drm_device *drm)
+{
+ return intel_runtime_pm_get_if_in_use(drm_to_rpm(drm));
+}
+
+static struct ref_tracker *i915_display_rpm_get_noresume(const struct drm_device *drm)
+{
+ return intel_runtime_pm_get_noresume(drm_to_rpm(drm));
+}
+
+static void i915_display_rpm_put(const struct drm_device *drm, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put(drm_to_rpm(drm), wakeref);
+}
+
+static void i915_display_rpm_put_raw(const struct drm_device *drm, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put_raw(drm_to_rpm(drm), wakeref);
+}
+
+static void i915_display_rpm_put_unchecked(const struct drm_device *drm)
+{
+ intel_runtime_pm_put_unchecked(drm_to_rpm(drm));
+}
+
+static bool i915_display_rpm_suspended(const struct drm_device *drm)
+{
+ return intel_runtime_pm_suspended(drm_to_rpm(drm));
+}
+
+static void i915_display_rpm_assert_held(const struct drm_device *drm)
+{
+ assert_rpm_wakelock_held(drm_to_rpm(drm));
+}
+
+static void i915_display_rpm_assert_block(const struct drm_device *drm)
+{
+ disable_rpm_wakeref_asserts(drm_to_rpm(drm));
+}
+
+static void i915_display_rpm_assert_unblock(const struct drm_device *drm)
+{
+ enable_rpm_wakeref_asserts(drm_to_rpm(drm));
+}
+
+const struct intel_display_rpm_interface i915_display_rpm_interface = {
+ .get = i915_display_rpm_get,
+ .get_raw = i915_display_rpm_get_raw,
+ .get_if_in_use = i915_display_rpm_get_if_in_use,
+ .get_noresume = i915_display_rpm_get_noresume,
+ .put = i915_display_rpm_put,
+ .put_raw = i915_display_rpm_put_raw,
+ .put_unchecked = i915_display_rpm_put_unchecked,
+ .suspended = i915_display_rpm_suspended,
+ .assert_held = i915_display_rpm_assert_held,
+ .assert_block = i915_display_rpm_assert_block,
+ .assert_unblock = i915_display_rpm_assert_unblock
+};
+
/**
* intel_runtime_pm_get_raw - grab a raw runtime pm reference
* @rpm: the intel_runtime_pm structure
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 7428bd8fa67f..ed6c43b17f9a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -14,6 +14,7 @@
struct device;
struct drm_i915_private;
struct drm_printer;
+struct intel_display_rpm_interface;
/*
* This struct helps tracking the state needed for runtime PM, which puts the
@@ -226,4 +227,6 @@ static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
}
#endif
+extern const struct intel_display_rpm_interface i915_display_rpm_interface;
+
#endif /* __INTEL_RUNTIME_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8cb59f8d1f4c..aba90b80854f 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -35,6 +35,7 @@
#include "i915_reg.h"
#include "i915_vgpu.h"
#include "i915_wait_util.h"
+#include "i915_mmio_range.h"
#include "intel_uncore_trace.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
@@ -999,7 +1000,7 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
* scanned for obvious mistakes or typos by the selftests.
*/
-static const struct i915_range gen8_shadowed_regs[] = {
+static const struct i915_mmio_range gen8_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0xA008, .end = 0xA00C },
{ .start = 0x12030, .end = 0x12030 },
@@ -1007,7 +1008,7 @@ static const struct i915_range gen8_shadowed_regs[] = {
{ .start = 0x22030, .end = 0x22030 },
};
-static const struct i915_range gen11_shadowed_regs[] = {
+static const struct i915_mmio_range gen11_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2550, .end = 0x2550 },
{ .start = 0xA008, .end = 0xA00C },
@@ -1034,7 +1035,7 @@ static const struct i915_range gen11_shadowed_regs[] = {
{ .start = 0x1D8510, .end = 0x1D8550 },
};
-static const struct i915_range gen12_shadowed_regs[] = {
+static const struct i915_mmio_range gen12_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2510, .end = 0x2550 },
{ .start = 0xA008, .end = 0xA00C },
@@ -1078,7 +1079,7 @@ static const struct i915_range gen12_shadowed_regs[] = {
{ .start = 0x1F8510, .end = 0x1F8550 },
};
-static const struct i915_range dg2_shadowed_regs[] = {
+static const struct i915_mmio_range dg2_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2510, .end = 0x2550 },
{ .start = 0xA008, .end = 0xA00C },
@@ -1117,7 +1118,7 @@ static const struct i915_range dg2_shadowed_regs[] = {
{ .start = 0x1F8510, .end = 0x1F8550 },
};
-static const struct i915_range mtl_shadowed_regs[] = {
+static const struct i915_mmio_range mtl_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2510, .end = 0x2550 },
{ .start = 0xA008, .end = 0xA00C },
@@ -1135,7 +1136,7 @@ static const struct i915_range mtl_shadowed_regs[] = {
{ .start = 0x22510, .end = 0x22550 },
};
-static const struct i915_range xelpmp_shadowed_regs[] = {
+static const struct i915_mmio_range xelpmp_shadowed_regs[] = {
{ .start = 0x1C0030, .end = 0x1C0030 },
{ .start = 0x1C0510, .end = 0x1C0550 },
{ .start = 0x1C8030, .end = 0x1C8030 },
@@ -1156,7 +1157,7 @@ static const struct i915_range xelpmp_shadowed_regs[] = {
{ .start = 0x38CFD4, .end = 0x38CFDC },
};
-static int mmio_range_cmp(u32 key, const struct i915_range *range)
+static int mmio_range_cmp(u32 key, const struct i915_mmio_range *range)
{
if (key < range->start)
return -1;
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 6048b99b96cb..fafc2ca9a237 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -123,12 +123,6 @@ struct intel_forcewake_range {
enum forcewake_domains domains;
};
-/* Other register ranges (e.g., shadow tables, MCR tables, etc.) */
-struct i915_range {
- u32 start;
- u32 end;
-};
-
struct intel_uncore {
void __iomem *regs;
@@ -162,7 +156,7 @@ struct intel_uncore {
* Shadowed registers are special cases where we can safely write
* to the register *without* grabbing forcewake.
*/
- const struct i915_range *shadowed_reg_table;
+ const struct i915_mmio_range *shadowed_reg_table;
unsigned int shadowed_reg_table_entries;
struct notifier_block pmic_bus_access_nb;
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 9c276c9d0a75..8460f0a70d04 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -30,6 +30,7 @@
#include "i915_driver.h"
#include "i915_drv.h"
+#include "i915_jiffies.h"
#include "i915_selftest.h"
#include "i915_wait_util.h"
#include "igt_flush_test.h"
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 58bcbdcef563..507bf42a1aaf 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -64,7 +64,7 @@ static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
static int intel_shadow_table_check(void)
{
struct {
- const struct i915_range *regs;
+ const struct i915_mmio_range *regs;
unsigned int size;
} range_lists[] = {
{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
@@ -74,7 +74,7 @@ static int intel_shadow_table_check(void)
{ mtl_shadowed_regs, ARRAY_SIZE(mtl_shadowed_regs) },
{ xelpmp_shadowed_regs, ARRAY_SIZE(xelpmp_shadowed_regs) },
};
- const struct i915_range *range;
+ const struct i915_mmio_range *range;
unsigned int i, j;
s32 prev;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index fb8751bd5df0..b59626c4994c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -33,6 +33,7 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
+#include "i915_driver.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
@@ -183,7 +184,8 @@ struct drm_i915_private *mock_gem_device(void)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, &mock_info);
- display = intel_display_device_probe(pdev);
+ /* FIXME: Can we run selftests using a mock device without display? */
+ display = intel_display_device_probe(pdev, i915_driver_parent_interface());
if (IS_ERR(display))
goto err_device;
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index edffaed8f9a7..8841cfe1cac8 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -335,7 +335,7 @@ static bool
skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
{
/* Convert total Gb to Gb per DRAM device */
- return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+ return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) >= 16;
}
static void
@@ -354,7 +354,7 @@ skl_dram_get_dimm_info(struct drm_i915_private *i915,
}
drm_dbg_kms(&i915->drm,
- "CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+ "CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb+ DIMMs: %s\n",
channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
str_yes_no(skl_is_16gb_dimm(dimm)));
}
@@ -384,7 +384,7 @@ skl_dram_get_channel_info(struct drm_i915_private *i915,
ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
skl_is_16gb_dimm(&ch->dimm_s);
- drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
+ drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb+ DIMMs: %s\n",
channel, ch->ranks, str_yes_no(ch->is_16gb_dimm));
return 0;
@@ -406,7 +406,7 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram
u32 val;
int ret;
- /* Assume 16Gb DIMMs are present until proven otherwise */
+ /* Assume 16Gb+ DIMMs are present until proven otherwise */
dram_info->has_16gb_dimms = true;
val = intel_uncore_read(&i915->uncore,
@@ -438,7 +438,7 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram
drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
str_yes_no(dram_info->symmetric_memory));
- drm_dbg_kms(&i915->drm, "16Gb DIMMs: %s\n",
+ drm_dbg_kms(&i915->drm, "16Gb+ DIMMs: %s\n",
str_yes_no(dram_info->has_16gb_dimms));
return 0;
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 3fbec058facc..f8a3a1bfe42e 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -205,7 +205,6 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE
# Display code specific to xe
xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/ext/i915_irq.o \
- display/ext/i915_utils.o \
display/intel_bo.o \
display/intel_fb_bo.o \
display/intel_fbdev_fb.o \
@@ -218,6 +217,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/xe_hdcp_gsc.o \
display/xe_panic.o \
display/xe_plane_initial.o \
+ display/xe_stolen.o \
display/xe_tdf.o
# SOC code shared with i915
@@ -234,6 +234,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_backlight.o \
i915-display/intel_bios.o \
i915-display/intel_bw.o \
+ i915-display/intel_casf.o \
i915-display/intel_cdclk.o \
i915-display/intel_cmtg.o \
i915-display/intel_color.o \
@@ -243,6 +244,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_crtc_state_dump.o \
i915-display/intel_cursor.o \
i915-display/intel_cx0_phy.o \
+ i915-display/intel_dbuf_bw.o \
i915-display/intel_ddi.o \
i915-display/intel_ddi_buf_trans.o \
i915-display/intel_display.o \
@@ -254,7 +256,9 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_display_power.o \
i915-display/intel_display_power_map.o \
i915-display/intel_display_power_well.o \
+ i915-display/intel_display_rpm.o \
i915-display/intel_display_trace.o \
+ i915-display/intel_display_utils.o \
i915-display/intel_display_wa.o \
i915-display/intel_dkl_phy.o \
i915-display/intel_dmc.o \
@@ -291,6 +295,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_hti.o \
i915-display/intel_link_bw.o \
i915-display/intel_lspcon.o \
+ i915-display/intel_lt_phy.o \
i915-display/intel_modeset_lock.o \
i915-display/intel_modeset_setup.o \
i915-display/intel_modeset_verify.o \
@@ -311,6 +316,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_vga.o \
i915-display/intel_vrr.o \
i915-display/intel_wm.o \
+ i915-display/skl_prefill.o \
i915-display/skl_scaler.o \
i915-display/skl_universal_plane.o \
i915-display/skl_watermark.o
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
index 8a048980ea38..0548b2e0316f 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
@@ -5,10 +5,8 @@
#define __I915_GEM_OBJECT_H__
struct dma_fence;
-struct i915_sched_attr;
-static inline void i915_gem_fence_wait_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
+static inline void i915_gem_fence_wait_priority_display(struct dma_fence *fence)
{
}
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
index f097fc6d5127..48e3256ba37e 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
@@ -6,80 +6,35 @@
#ifndef _I915_GEM_STOLEN_H_
#define _I915_GEM_STOLEN_H_
-#include "xe_ttm_stolen_mgr.h"
-#include "xe_res_cursor.h"
-#include "xe_validation.h"
-
-struct xe_bo;
-
-struct i915_stolen_fb {
- struct xe_bo *bo;
-};
-
-static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
- struct i915_stolen_fb *fb,
- u32 size, u32 align,
- u32 start, u32 end)
-{
- struct xe_bo *bo;
- int err = 0;
- u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
-
- if (start < SZ_4K)
- start = SZ_4K;
-
- if (align) {
- size = ALIGN(size, align);
- start = ALIGN(start, align);
- }
-
- bo = xe_bo_create_pin_range_novm(xe, xe_device_get_root_tile(xe),
- size, start, end, ttm_bo_type_kernel, flags);
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- bo = NULL;
- return err;
- }
-
- fb->bo = bo;
-
- return err;
-}
-
-static inline int i915_gem_stolen_insert_node(struct xe_device *xe,
- struct i915_stolen_fb *fb,
- u32 size, u32 align)
-{
- /* Not used on xe */
- BUG_ON(1);
- return -ENODEV;
-}
-
-static inline void i915_gem_stolen_remove_node(struct xe_device *xe,
- struct i915_stolen_fb *fb)
-{
- xe_bo_unpin_map_no_vm(fb->bo);
- fb->bo = NULL;
-}
-
-#define i915_gem_stolen_initialized(xe) (!!ttm_manager_type(&(xe)->ttm, XE_PL_STOLEN))
-#define i915_gem_stolen_node_allocated(fb) (!!((fb)->bo))
-
-static inline u32 i915_gem_stolen_node_offset(struct i915_stolen_fb *fb)
-{
- struct xe_res_cursor res;
-
- xe_res_first(fb->bo->ttm.resource, 0, 4096, &res);
- return res.start;
-}
-
-/* Used for < gen4. These are not supported by Xe */
-#define i915_gem_stolen_area_address(xe) (!WARN_ON(1))
-/* Used for gen9 specific WA. Gen9 is not supported by Xe */
-#define i915_gem_stolen_area_size(xe) (!WARN_ON(1))
-
-#define i915_gem_stolen_node_address(xe, fb) (xe_ttm_stolen_gpu_offset(xe) + \
- i915_gem_stolen_node_offset(fb))
-#define i915_gem_stolen_node_size(fb) ((u64)((fb)->bo->ttm.base.size))
+#include <linux/types.h>
+
+struct drm_device;
+struct intel_stolen_node;
+
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
+ unsigned int align, u64 start, u64 end);
+
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
+ unsigned int align);
+
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node);
+
+bool i915_gem_stolen_initialized(struct drm_device *drm);
+
+bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node);
+
+u32 i915_gem_stolen_node_offset(struct intel_stolen_node *node);
+
+u64 i915_gem_stolen_area_address(struct drm_device *drm);
+
+u64 i915_gem_stolen_area_size(struct drm_device *drm);
+
+u64 i915_gem_stolen_node_address(struct intel_stolen_node *node);
+
+u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node);
+
+struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm);
+
+void i915_gem_stolen_node_free(const struct intel_stolen_node *node);
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index b8269391bc69..3e79a74ff7de 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -12,7 +12,6 @@
#include <drm/drm_drv.h>
-#include "xe_device.h" /* for xe_device_has_flat_ccs() */
#include "xe_device_types.h"
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
@@ -35,7 +34,4 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define IS_MOBILE(xe) (xe && 0)
-#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe))
-#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
-
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h
deleted file mode 100644
index c11130440d31..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/* Copyright © 2025 Intel Corporation */
-
-#ifndef __I915_SCHEDULER_TYPES_H__
-#define __I915_SCHEDULER_TYPES_H__
-
-#define I915_PRIORITY_DISPLAY 0
-
-struct i915_sched_attr {
- int priority;
-};
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h
index 1d7c4360e5c0..bcd441dc0fce 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h
@@ -3,4 +3,11 @@
* Copyright © 2023 Intel Corporation
*/
-#include "../../i915/i915_utils.h"
+/* for soc/ */
+#ifndef MISSING_CASE
+#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
+ __stringify(x), (long)(x))
+#endif
+
+/* for a couple of users under i915/display */
+#define i915_inject_probe_failure(unused) ((unused) && 0)
diff --git a/drivers/gpu/drm/xe/display/ext/i915_utils.c b/drivers/gpu/drm/xe/display/ext/i915_utils.c
deleted file mode 100644
index 1421c2a7b64d..000000000000
--- a/drivers/gpu/drm/xe/display/ext/i915_utils.c
+++ /dev/null
@@ -1,27 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "i915_drv.h"
-#include "i915_utils.h"
-
-bool i915_vtd_active(struct drm_i915_private *i915)
-{
- if (device_iommu_mapped(i915->drm.dev))
- return true;
-
- /* Running as a guest, we assume the host is enforcing VT'd */
- return i915_run_as_guest();
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-
-/* i915 specific, just put here for shutting it up */
-int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
- const char *func, int line)
-{
- return 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index ebdb22c9499d..db8b1a27b4de 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -24,8 +24,7 @@ void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
xe_bo_put(bo);
}
-int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
- struct drm_gem_object *obj,
+int intel_fb_bo_framebuffer_init(struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index af8139d00161..7ad76022cb14 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -3,11 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
-#include <drm/drm_fb_helper.h>
+#include <linux/fb.h>
-#include "intel_display_core.h"
-#include "intel_display_types.h"
-#include "intel_fb.h"
#include "intel_fbdev_fb.h"
#include "xe_bo.h"
#include "xe_ttm_stolen_mgr.h"
@@ -15,30 +12,22 @@
#include <generated/xe_device_wa_oob.h>
-struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+/*
+ * FIXME: There shouldn't be any reason to have XE_PAGE_SIZE stride
+ * alignment. The same 64 as i915 uses should be fine, and we shouldn't need to
+ * have driver specific values. However, dropping the stride alignment to 64
+ * leads to underflowing the bo pin count in the atomic cleanup work.
+ */
+u32 intel_fbdev_fb_pitch_align(u32 stride)
{
- struct drm_framebuffer *fb;
- struct drm_device *dev = helper->dev;
- struct xe_device *xe = to_xe_device(dev);
- struct drm_mode_fb_cmd2 mode_cmd = {};
- struct xe_bo *obj;
- int size;
-
- /* we don't do packed 24bpp */
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
+ return ALIGN(stride, XE_PAGE_SIZE);
+}
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
- DIV_ROUND_UP(sizes->surface_bpp, 8), XE_PAGE_SIZE);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
+struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_bo *obj;
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = PAGE_ALIGN(size);
obj = ERR_PTR(-ENODEV);
if (!IS_DGFX(xe) && !XE_DEVICE_WA(xe, 22019338487_display)) {
@@ -62,33 +51,22 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
if (IS_ERR(obj)) {
drm_err(&xe->drm, "failed to allocate framebuffer (%pe)\n", obj);
- fb = ERR_PTR(-ENOMEM);
- goto err;
- }
-
- fb = intel_framebuffer_create(&obj->ttm.base,
- drm_get_format_info(dev,
- mode_cmd.pixel_format,
- mode_cmd.modifier[0]),
- &mode_cmd);
- if (IS_ERR(fb)) {
- xe_bo_unpin_map_no_vm(obj);
- goto err;
+ return ERR_PTR(-ENOMEM);
}
- drm_gem_object_put(&obj->ttm.base);
-
- return to_intel_framebuffer(fb);
+ return &obj->ttm.base;
+}
-err:
- return ERR_CAST(fb);
+void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj)
+{
+ xe_bo_unpin_map_no_vm(gem_to_xe_bo(obj));
}
-int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info,
struct drm_gem_object *_obj, struct i915_vma *vma)
{
struct xe_bo *obj = gem_to_xe_bo(_obj);
- struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+ struct pci_dev *pdev = to_pci_dev(drm->dev);
if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
if (obj->flags & XE_BO_FLAG_STOLEN)
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 083c6904f8f1..8b0afa270216 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -13,6 +13,8 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
+#include <drm/intel/display_member.h>
+#include <drm/intel/display_parent_interface.h>
#include <uapi/drm/xe_drm.h>
#include "soc/intel_dram.h"
@@ -33,8 +35,12 @@
#include "intel_hotplug.h"
#include "intel_opregion.h"
#include "skl_watermark.h"
+#include "xe_display_rpm.h"
#include "xe_module.h"
+/* Ensure drm and display members are placed properly. */
+INTEL_DISPLAY_MEMBER_STATIC_ASSERT(struct xe_device, drm, display);
+
/* Xe device functions */
/**
@@ -510,6 +516,10 @@ static void display_device_remove(struct drm_device *dev, void *arg)
intel_display_device_remove(display);
}
+static const struct intel_display_parent_interface parent = {
+ .rpm = &xe_display_rpm_interface,
+};
+
/**
* xe_display_probe - probe display and create display struct
* @xe: XE device instance
@@ -530,7 +540,7 @@ int xe_display_probe(struct xe_device *xe)
if (!xe->info.probe_display)
goto no_display;
- display = intel_display_device_probe(pdev);
+ display = intel_display_device_probe(pdev, &parent);
if (IS_ERR(display))
return PTR_ERR(display);
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c
index 3825376e98cc..340f65884812 100644
--- a/drivers/gpu/drm/xe/display/xe_display_rpm.c
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c
@@ -1,73 +1,74 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */
+#include <drm/intel/display_parent_interface.h>
+
#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_pm.h"
-static struct xe_device *display_to_xe(struct intel_display *display)
-{
- return to_xe_device(display->drm);
-}
-
-struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+static struct ref_tracker *xe_display_rpm_get(const struct drm_device *drm)
{
- return intel_display_rpm_get(display);
+ return xe_pm_runtime_resume_and_get(to_xe_device(drm)) ? INTEL_WAKEREF_DEF : NULL;
}
-void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+static struct ref_tracker *xe_display_rpm_get_if_in_use(const struct drm_device *drm)
{
- intel_display_rpm_put(display, wakeref);
+ return xe_pm_runtime_get_if_in_use(to_xe_device(drm)) ? INTEL_WAKEREF_DEF : NULL;
}
-struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+static struct ref_tracker *xe_display_rpm_get_noresume(const struct drm_device *drm)
{
- return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
-{
- return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
-{
- xe_pm_runtime_get_noresume(display_to_xe(display));
+ xe_pm_runtime_get_noresume(to_xe_device(drm));
return INTEL_WAKEREF_DEF;
}
-void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+static void xe_display_rpm_put(const struct drm_device *drm, struct ref_tracker *wakeref)
{
if (wakeref)
- xe_pm_runtime_put(display_to_xe(display));
+ xe_pm_runtime_put(to_xe_device(drm));
}
-void intel_display_rpm_put_unchecked(struct intel_display *display)
+static void xe_display_rpm_put_unchecked(const struct drm_device *drm)
{
- xe_pm_runtime_put(display_to_xe(display));
+ xe_pm_runtime_put(to_xe_device(drm));
}
-bool intel_display_rpm_suspended(struct intel_display *display)
+static bool xe_display_rpm_suspended(const struct drm_device *drm)
{
- struct xe_device *xe = display_to_xe(display);
+ struct xe_device *xe = to_xe_device(drm);
return pm_runtime_suspended(xe->drm.dev);
}
-void assert_display_rpm_held(struct intel_display *display)
+static void xe_display_rpm_assert_held(const struct drm_device *drm)
{
/* FIXME */
}
-void intel_display_rpm_assert_block(struct intel_display *display)
+static void xe_display_rpm_assert_block(const struct drm_device *drm)
{
/* FIXME */
}
-void intel_display_rpm_assert_unblock(struct intel_display *display)
+static void xe_display_rpm_assert_unblock(const struct drm_device *drm)
{
/* FIXME */
}
+
+const struct intel_display_rpm_interface xe_display_rpm_interface = {
+ .get = xe_display_rpm_get,
+ .get_raw = xe_display_rpm_get,
+ .get_if_in_use = xe_display_rpm_get_if_in_use,
+ .get_noresume = xe_display_rpm_get_noresume,
+ .put = xe_display_rpm_put,
+ .put_raw = xe_display_rpm_put,
+ .put_unchecked = xe_display_rpm_put_unchecked,
+ .suspended = xe_display_rpm_suspended,
+ .assert_held = xe_display_rpm_assert_held,
+ .assert_block = xe_display_rpm_assert_block,
+ .assert_unblock = xe_display_rpm_assert_unblock
+};
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.h b/drivers/gpu/drm/xe/display/xe_display_rpm.h
new file mode 100644
index 000000000000..0bf9d31e87c1
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_DISPLAY_RPM_H_
+#define _XE_DISPLAY_RPM_H_
+
+extern const struct intel_display_rpm_interface xe_display_rpm_interface;
+
+#endif /* _XE_DISPLAY_RPM_H_ */
diff --git a/drivers/gpu/drm/xe/display/xe_stolen.c b/drivers/gpu/drm/xe/display/xe_stolen.c
new file mode 100644
index 000000000000..9f04ba36e930
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_stolen.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "gem/i915_gem_stolen.h"
+#include "xe_res_cursor.h"
+#include "xe_ttm_stolen_mgr.h"
+#include "xe_validation.h"
+
+struct intel_stolen_node {
+ struct xe_device *xe;
+ struct xe_bo *bo;
+};
+
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
+ unsigned int align, u64 start, u64 end)
+{
+ struct xe_device *xe = node->xe;
+
+ struct xe_bo *bo;
+ int err = 0;
+ u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
+
+ if (start < SZ_4K)
+ start = SZ_4K;
+
+ if (align) {
+ size = ALIGN(size, align);
+ start = ALIGN(start, align);
+ }
+
+ bo = xe_bo_create_pin_range_novm(xe, xe_device_get_root_tile(xe),
+ size, start, end, ttm_bo_type_kernel, flags);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ bo = NULL;
+ return err;
+ }
+
+ node->bo = bo;
+
+ return err;
+}
+
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size, unsigned int align)
+{
+ /* Not used on xe */
+ WARN_ON(1);
+
+ return -ENODEV;
+}
+
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node)
+{
+ xe_bo_unpin_map_no_vm(node->bo);
+ node->bo = NULL;
+}
+
+bool i915_gem_stolen_initialized(struct drm_device *drm)
+{
+ struct xe_device *xe = to_xe_device(drm);
+
+ return ttm_manager_type(&xe->ttm, XE_PL_STOLEN);
+}
+
+bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node)
+{
+ return node->bo;
+}
+
+u32 i915_gem_stolen_node_offset(struct intel_stolen_node *node)
+{
+ struct xe_res_cursor res;
+
+ xe_res_first(node->bo->ttm.resource, 0, 4096, &res);
+ return res.start;
+}
+
+/* Used for < gen4. These are not supported by Xe */
+u64 i915_gem_stolen_area_address(struct drm_device *drm)
+{
+ WARN_ON(1);
+
+ return 0;
+}
+
+/* Used for gen9 specific WA. Gen9 is not supported by Xe */
+u64 i915_gem_stolen_area_size(struct drm_device *drm)
+{
+ WARN_ON(1);
+
+ return 0;
+}
+
+u64 i915_gem_stolen_node_address(struct intel_stolen_node *node)
+{
+ struct xe_device *xe = node->xe;
+
+ return xe_ttm_stolen_gpu_offset(xe) + i915_gem_stolen_node_offset(node);
+}
+
+u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node)
+{
+ return node->bo->ttm.base.size;
+}
+
+struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct intel_stolen_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+ node->xe = xe;
+
+ return node;
+}
+
+void i915_gem_stolen_node_free(const struct intel_stolen_node *node)
+{
+ kfree(node);
+}
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index dc17f63f9353..af0ce275b032 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -228,6 +228,11 @@ struct xe_device {
/** @drm: drm device */
struct drm_device drm;
+#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
+ /** @display: display device data, must be placed after drm device member */
+ struct intel_display *display;
+#endif
+
/** @devcoredump: device coredump */
struct xe_devcoredump devcoredump;
@@ -632,8 +637,6 @@ struct xe_device {
* drm_i915_private during build. After cleanup these should go away,
* migrating to the right sub-structs
*/
- struct intel_display *display;
-
const struct dram_info *dram_info;
/*
@@ -645,18 +648,9 @@ struct xe_device {
/* To shut up runtime pm macros.. */
struct xe_runtime_pm {} runtime_pm;
- /* only to allow build, not used functionally */
- u32 irq_mask;
-
struct intel_uncore {
spinlock_t lock;
} uncore;
-
- /* only to allow build, not used functionally */
- struct {
- unsigned int hpll_freq;
- unsigned int czclk_freq;
- };
#endif
};
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 6e59642e7820..90e2ee5e9270 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -442,6 +442,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
INTEL_NVLS_IDS(INTEL_VGA_DEVICE, &nvls_desc),
INTEL_CRI_IDS(INTEL_PCI_DEVICE, &cri_desc),
+ INTEL_WCL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
{ }
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index cf318e3ddb5c..e4eebabab975 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -258,6 +258,8 @@
# define DP_DSC_RC_BUF_BLK_SIZE_4 0x1
# define DP_DSC_RC_BUF_BLK_SIZE_16 0x2
# define DP_DSC_RC_BUF_BLK_SIZE_64 0x3
+# define DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT 3 /* DP 2.1a, in units of 2 MPixels/sec */
+# define DP_DSC_THROUGHPUT_MODE_0_DELTA_MASK (0x1f << DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT)
#define DP_DSC_RC_BUF_SIZE 0x063
@@ -1686,6 +1688,7 @@ enum drm_dp_phy {
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
+#define DP_DSC_BRANCH_CAP_SIZE 3
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 5
#define DP_LTTPR_COMMON_CAP_SIZE 8
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 52ce28097015..df2f24b950e4 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -211,6 +211,11 @@ u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
u8 dsc_bpc[3]);
+int drm_dp_dsc_sink_max_slice_throughput(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ int peak_pixel_rate, bool is_rgb_yuv444);
+int drm_dp_dsc_branch_max_overall_throughput(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE],
+ bool is_rgb_yuv444);
+int drm_dp_dsc_branch_max_line_width(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE]);
static inline bool
drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
@@ -828,6 +833,15 @@ enum drm_dp_quirk {
* requires enabling DSC.
*/
DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC,
+ /**
+ * @DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT:
+ *
+ * The device doesn't support DSC decompression at the maximum DSC
+ * pixel throughput and compressed bpp it indicates via its DPCD DSC
+ * capabilities. The compressed bpp must be limited above a device
+ * specific DSC pixel throughput.
+ */
+ DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT,
};
/**
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 2d2a0bd526cf..66278ffeebd6 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -318,6 +318,17 @@ struct drm_crtc_state {
enum drm_scaling_filter scaling_filter;
/**
+ * @sharpness_strength:
+ *
+ * Used by the user to set the sharpness intensity.
+ * The value ranges from 0-255.
+ * Default value is 0 which disable the sharpness feature.
+ * Any value greater than 0 enables sharpening with the
+ * specified strength.
+ */
+ u8 sharpness_strength;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
@@ -1089,6 +1100,12 @@ struct drm_crtc {
struct drm_property *scaling_filter_property;
/**
+ * @sharpness_strength_property: property to apply
+ * the intensity of the sharpness requested.
+ */
+ struct drm_property *sharpness_strength_property;
+
+ /**
* @state:
*
* Current atomic state for this CRTC.
@@ -1324,4 +1341,5 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
unsigned int supported_filters);
bool drm_crtc_in_clone_mode(struct drm_crtc_state *crtc_state);
+int drm_crtc_create_sharpness_strength_property(struct drm_crtc *crtc);
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/intel/display_member.h b/include/drm/intel/display_member.h
new file mode 100644
index 000000000000..0319ea560b60
--- /dev/null
+++ b/include/drm/intel/display_member.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __DRM_INTEL_DISPLAY_H__
+#define __DRM_INTEL_DISPLAY_H__
+
+#include <linux/build_bug.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+#include <drm/drm_device.h>
+
+struct intel_display;
+
+/*
+ * A dummy device struct to define the relative offsets of drm and display
+ * members. With the members identically placed in struct drm_i915_private and
+ * struct xe_device, this allows figuring out the struct intel_display pointer
+ * without the definition of either driver specific structure.
+ */
+struct __intel_generic_device {
+ struct drm_device drm;
+ struct intel_display *display;
+};
+
+/**
+ * INTEL_DISPLAY_MEMBER_STATIC_ASSERT() - ensure correct placing of drm and display members
+ * @type: The struct to check
+ * @drm_member: Name of the struct drm_device member
+ * @display_member: Name of the struct intel_display * member.
+ *
+ * Use this static assert macro to ensure the struct drm_i915_private and struct
+ * xe_device struct drm_device and struct intel_display * members are at the
+ * same relative offsets.
+ */
+#define INTEL_DISPLAY_MEMBER_STATIC_ASSERT(type, drm_member, display_member) \
+ static_assert( \
+ offsetof(struct __intel_generic_device, display) - offsetof(struct __intel_generic_device, drm) == \
+ offsetof(type, display_member) - offsetof(type, drm_member), \
+ __stringify(type) " " __stringify(drm_member) " and " __stringify(display_member) " members at invalid offsets")
+
+#endif
diff --git a/include/drm/intel/display_parent_interface.h b/include/drm/intel/display_parent_interface.h
new file mode 100644
index 000000000000..26bedc360044
--- /dev/null
+++ b/include/drm/intel/display_parent_interface.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation x*/
+
+#ifndef __DISPLAY_PARENT_INTERFACE_H__
+#define __DISPLAY_PARENT_INTERFACE_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct ref_tracker;
+
+struct intel_display_rpm_interface {
+ struct ref_tracker *(*get)(const struct drm_device *drm);
+ struct ref_tracker *(*get_raw)(const struct drm_device *drm);
+ struct ref_tracker *(*get_if_in_use)(const struct drm_device *drm);
+ struct ref_tracker *(*get_noresume)(const struct drm_device *drm);
+
+ void (*put)(const struct drm_device *drm, struct ref_tracker *wakeref);
+ void (*put_raw)(const struct drm_device *drm, struct ref_tracker *wakeref);
+ void (*put_unchecked)(const struct drm_device *drm);
+
+ bool (*suspended)(const struct drm_device *drm);
+ void (*assert_held)(const struct drm_device *drm);
+ void (*assert_block)(const struct drm_device *drm);
+ void (*assert_unblock)(const struct drm_device *drm);
+};
+
+/**
+ * struct intel_display_parent_interface - services parent driver provides to display
+ *
+ * The parent, or core, driver provides a pointer to this structure to display
+ * driver when calling intel_display_device_probe(). The display driver uses it
+ * to access services provided by the parent driver. The structure may contain
+ * sub-struct pointers to group function pointers by functionality.
+ *
+ * All function and sub-struct pointers must be initialized and callable unless
+ * explicitly marked as "optional" below. The display driver will only NULL
+ * check the optional pointers.
+ */
+struct intel_display_parent_interface {
+ /** @rpm: Runtime PM functions */
+ const struct intel_display_rpm_interface *rpm;
+};
+
+#endif
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index 6e53fb4cdd37..b258e79b437a 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -880,7 +880,10 @@
MACRO__(0xB08F, ## __VA_ARGS__), \
MACRO__(0xB090, ## __VA_ARGS__), \
MACRO__(0xB0A0, ## __VA_ARGS__), \
- MACRO__(0xB0B0, ## __VA_ARGS__), \
+ MACRO__(0xB0B0, ## __VA_ARGS__)
+
+/* WCL */
+#define INTEL_WCL_IDS(MACRO__, ...) \
MACRO__(0xFD80, ## __VA_ARGS__), \
MACRO__(0xFD81, ## __VA_ARGS__)